##// END OF EJS Templates
exchange: switch to usual way of testing for bundle2-ness...
Martin von Zweigbergk -
r32891:7e2eb964 default
parent child Browse files
Show More
@@ -1,2009 +1,2009 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 hex,
15 hex,
16 nullid,
16 nullid,
17 )
17 )
18 from . import (
18 from . import (
19 bookmarks as bookmod,
19 bookmarks as bookmod,
20 bundle2,
20 bundle2,
21 changegroup,
21 changegroup,
22 discovery,
22 discovery,
23 error,
23 error,
24 lock as lockmod,
24 lock as lockmod,
25 obsolete,
25 obsolete,
26 phases,
26 phases,
27 pushkey,
27 pushkey,
28 scmutil,
28 scmutil,
29 sslutil,
29 sslutil,
30 streamclone,
30 streamclone,
31 url as urlmod,
31 url as urlmod,
32 util,
32 util,
33 )
33 )
34
34
35 urlerr = util.urlerr
35 urlerr = util.urlerr
36 urlreq = util.urlreq
36 urlreq = util.urlreq
37
37
38 # Maps bundle version human names to changegroup versions.
38 # Maps bundle version human names to changegroup versions.
39 _bundlespeccgversions = {'v1': '01',
39 _bundlespeccgversions = {'v1': '01',
40 'v2': '02',
40 'v2': '02',
41 'packed1': 's1',
41 'packed1': 's1',
42 'bundle2': '02', #legacy
42 'bundle2': '02', #legacy
43 }
43 }
44
44
45 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
45 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
46 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
46 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
47
47
48 def parsebundlespec(repo, spec, strict=True, externalnames=False):
48 def parsebundlespec(repo, spec, strict=True, externalnames=False):
49 """Parse a bundle string specification into parts.
49 """Parse a bundle string specification into parts.
50
50
51 Bundle specifications denote a well-defined bundle/exchange format.
51 Bundle specifications denote a well-defined bundle/exchange format.
52 The content of a given specification should not change over time in
52 The content of a given specification should not change over time in
53 order to ensure that bundles produced by a newer version of Mercurial are
53 order to ensure that bundles produced by a newer version of Mercurial are
54 readable from an older version.
54 readable from an older version.
55
55
56 The string currently has the form:
56 The string currently has the form:
57
57
58 <compression>-<type>[;<parameter0>[;<parameter1>]]
58 <compression>-<type>[;<parameter0>[;<parameter1>]]
59
59
60 Where <compression> is one of the supported compression formats
60 Where <compression> is one of the supported compression formats
61 and <type> is (currently) a version string. A ";" can follow the type and
61 and <type> is (currently) a version string. A ";" can follow the type and
62 all text afterwards is interpreted as URI encoded, ";" delimited key=value
62 all text afterwards is interpreted as URI encoded, ";" delimited key=value
63 pairs.
63 pairs.
64
64
65 If ``strict`` is True (the default) <compression> is required. Otherwise,
65 If ``strict`` is True (the default) <compression> is required. Otherwise,
66 it is optional.
66 it is optional.
67
67
68 If ``externalnames`` is False (the default), the human-centric names will
68 If ``externalnames`` is False (the default), the human-centric names will
69 be converted to their internal representation.
69 be converted to their internal representation.
70
70
71 Returns a 3-tuple of (compression, version, parameters). Compression will
71 Returns a 3-tuple of (compression, version, parameters). Compression will
72 be ``None`` if not in strict mode and a compression isn't defined.
72 be ``None`` if not in strict mode and a compression isn't defined.
73
73
74 An ``InvalidBundleSpecification`` is raised when the specification is
74 An ``InvalidBundleSpecification`` is raised when the specification is
75 not syntactically well formed.
75 not syntactically well formed.
76
76
77 An ``UnsupportedBundleSpecification`` is raised when the compression or
77 An ``UnsupportedBundleSpecification`` is raised when the compression or
78 bundle type/version is not recognized.
78 bundle type/version is not recognized.
79
79
80 Note: this function will likely eventually return a more complex data
80 Note: this function will likely eventually return a more complex data
81 structure, including bundle2 part information.
81 structure, including bundle2 part information.
82 """
82 """
83 def parseparams(s):
83 def parseparams(s):
84 if ';' not in s:
84 if ';' not in s:
85 return s, {}
85 return s, {}
86
86
87 params = {}
87 params = {}
88 version, paramstr = s.split(';', 1)
88 version, paramstr = s.split(';', 1)
89
89
90 for p in paramstr.split(';'):
90 for p in paramstr.split(';'):
91 if '=' not in p:
91 if '=' not in p:
92 raise error.InvalidBundleSpecification(
92 raise error.InvalidBundleSpecification(
93 _('invalid bundle specification: '
93 _('invalid bundle specification: '
94 'missing "=" in parameter: %s') % p)
94 'missing "=" in parameter: %s') % p)
95
95
96 key, value = p.split('=', 1)
96 key, value = p.split('=', 1)
97 key = urlreq.unquote(key)
97 key = urlreq.unquote(key)
98 value = urlreq.unquote(value)
98 value = urlreq.unquote(value)
99 params[key] = value
99 params[key] = value
100
100
101 return version, params
101 return version, params
102
102
103
103
104 if strict and '-' not in spec:
104 if strict and '-' not in spec:
105 raise error.InvalidBundleSpecification(
105 raise error.InvalidBundleSpecification(
106 _('invalid bundle specification; '
106 _('invalid bundle specification; '
107 'must be prefixed with compression: %s') % spec)
107 'must be prefixed with compression: %s') % spec)
108
108
109 if '-' in spec:
109 if '-' in spec:
110 compression, version = spec.split('-', 1)
110 compression, version = spec.split('-', 1)
111
111
112 if compression not in util.compengines.supportedbundlenames:
112 if compression not in util.compengines.supportedbundlenames:
113 raise error.UnsupportedBundleSpecification(
113 raise error.UnsupportedBundleSpecification(
114 _('%s compression is not supported') % compression)
114 _('%s compression is not supported') % compression)
115
115
116 version, params = parseparams(version)
116 version, params = parseparams(version)
117
117
118 if version not in _bundlespeccgversions:
118 if version not in _bundlespeccgversions:
119 raise error.UnsupportedBundleSpecification(
119 raise error.UnsupportedBundleSpecification(
120 _('%s is not a recognized bundle version') % version)
120 _('%s is not a recognized bundle version') % version)
121 else:
121 else:
122 # Value could be just the compression or just the version, in which
122 # Value could be just the compression or just the version, in which
123 # case some defaults are assumed (but only when not in strict mode).
123 # case some defaults are assumed (but only when not in strict mode).
124 assert not strict
124 assert not strict
125
125
126 spec, params = parseparams(spec)
126 spec, params = parseparams(spec)
127
127
128 if spec in util.compengines.supportedbundlenames:
128 if spec in util.compengines.supportedbundlenames:
129 compression = spec
129 compression = spec
130 version = 'v1'
130 version = 'v1'
131 # Generaldelta repos require v2.
131 # Generaldelta repos require v2.
132 if 'generaldelta' in repo.requirements:
132 if 'generaldelta' in repo.requirements:
133 version = 'v2'
133 version = 'v2'
134 # Modern compression engines require v2.
134 # Modern compression engines require v2.
135 if compression not in _bundlespecv1compengines:
135 if compression not in _bundlespecv1compengines:
136 version = 'v2'
136 version = 'v2'
137 elif spec in _bundlespeccgversions:
137 elif spec in _bundlespeccgversions:
138 if spec == 'packed1':
138 if spec == 'packed1':
139 compression = 'none'
139 compression = 'none'
140 else:
140 else:
141 compression = 'bzip2'
141 compression = 'bzip2'
142 version = spec
142 version = spec
143 else:
143 else:
144 raise error.UnsupportedBundleSpecification(
144 raise error.UnsupportedBundleSpecification(
145 _('%s is not a recognized bundle specification') % spec)
145 _('%s is not a recognized bundle specification') % spec)
146
146
147 # Bundle version 1 only supports a known set of compression engines.
147 # Bundle version 1 only supports a known set of compression engines.
148 if version == 'v1' and compression not in _bundlespecv1compengines:
148 if version == 'v1' and compression not in _bundlespecv1compengines:
149 raise error.UnsupportedBundleSpecification(
149 raise error.UnsupportedBundleSpecification(
150 _('compression engine %s is not supported on v1 bundles') %
150 _('compression engine %s is not supported on v1 bundles') %
151 compression)
151 compression)
152
152
153 # The specification for packed1 can optionally declare the data formats
153 # The specification for packed1 can optionally declare the data formats
154 # required to apply it. If we see this metadata, compare against what the
154 # required to apply it. If we see this metadata, compare against what the
155 # repo supports and error if the bundle isn't compatible.
155 # repo supports and error if the bundle isn't compatible.
156 if version == 'packed1' and 'requirements' in params:
156 if version == 'packed1' and 'requirements' in params:
157 requirements = set(params['requirements'].split(','))
157 requirements = set(params['requirements'].split(','))
158 missingreqs = requirements - repo.supportedformats
158 missingreqs = requirements - repo.supportedformats
159 if missingreqs:
159 if missingreqs:
160 raise error.UnsupportedBundleSpecification(
160 raise error.UnsupportedBundleSpecification(
161 _('missing support for repository features: %s') %
161 _('missing support for repository features: %s') %
162 ', '.join(sorted(missingreqs)))
162 ', '.join(sorted(missingreqs)))
163
163
164 if not externalnames:
164 if not externalnames:
165 engine = util.compengines.forbundlename(compression)
165 engine = util.compengines.forbundlename(compression)
166 compression = engine.bundletype()[1]
166 compression = engine.bundletype()[1]
167 version = _bundlespeccgversions[version]
167 version = _bundlespeccgversions[version]
168 return compression, version, params
168 return compression, version, params
169
169
170 def readbundle(ui, fh, fname, vfs=None):
170 def readbundle(ui, fh, fname, vfs=None):
171 header = changegroup.readexactly(fh, 4)
171 header = changegroup.readexactly(fh, 4)
172
172
173 alg = None
173 alg = None
174 if not fname:
174 if not fname:
175 fname = "stream"
175 fname = "stream"
176 if not header.startswith('HG') and header.startswith('\0'):
176 if not header.startswith('HG') and header.startswith('\0'):
177 fh = changegroup.headerlessfixup(fh, header)
177 fh = changegroup.headerlessfixup(fh, header)
178 header = "HG10"
178 header = "HG10"
179 alg = 'UN'
179 alg = 'UN'
180 elif vfs:
180 elif vfs:
181 fname = vfs.join(fname)
181 fname = vfs.join(fname)
182
182
183 magic, version = header[0:2], header[2:4]
183 magic, version = header[0:2], header[2:4]
184
184
185 if magic != 'HG':
185 if magic != 'HG':
186 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
186 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
187 if version == '10':
187 if version == '10':
188 if alg is None:
188 if alg is None:
189 alg = changegroup.readexactly(fh, 2)
189 alg = changegroup.readexactly(fh, 2)
190 return changegroup.cg1unpacker(fh, alg)
190 return changegroup.cg1unpacker(fh, alg)
191 elif version.startswith('2'):
191 elif version.startswith('2'):
192 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
192 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
193 elif version == 'S1':
193 elif version == 'S1':
194 return streamclone.streamcloneapplier(fh)
194 return streamclone.streamcloneapplier(fh)
195 else:
195 else:
196 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
196 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
197
197
198 def getbundlespec(ui, fh):
198 def getbundlespec(ui, fh):
199 """Infer the bundlespec from a bundle file handle.
199 """Infer the bundlespec from a bundle file handle.
200
200
201 The input file handle is seeked and the original seek position is not
201 The input file handle is seeked and the original seek position is not
202 restored.
202 restored.
203 """
203 """
204 def speccompression(alg):
204 def speccompression(alg):
205 try:
205 try:
206 return util.compengines.forbundletype(alg).bundletype()[0]
206 return util.compengines.forbundletype(alg).bundletype()[0]
207 except KeyError:
207 except KeyError:
208 return None
208 return None
209
209
210 b = readbundle(ui, fh, None)
210 b = readbundle(ui, fh, None)
211 if isinstance(b, changegroup.cg1unpacker):
211 if isinstance(b, changegroup.cg1unpacker):
212 alg = b._type
212 alg = b._type
213 if alg == '_truncatedBZ':
213 if alg == '_truncatedBZ':
214 alg = 'BZ'
214 alg = 'BZ'
215 comp = speccompression(alg)
215 comp = speccompression(alg)
216 if not comp:
216 if not comp:
217 raise error.Abort(_('unknown compression algorithm: %s') % alg)
217 raise error.Abort(_('unknown compression algorithm: %s') % alg)
218 return '%s-v1' % comp
218 return '%s-v1' % comp
219 elif isinstance(b, bundle2.unbundle20):
219 elif isinstance(b, bundle2.unbundle20):
220 if 'Compression' in b.params:
220 if 'Compression' in b.params:
221 comp = speccompression(b.params['Compression'])
221 comp = speccompression(b.params['Compression'])
222 if not comp:
222 if not comp:
223 raise error.Abort(_('unknown compression algorithm: %s') % comp)
223 raise error.Abort(_('unknown compression algorithm: %s') % comp)
224 else:
224 else:
225 comp = 'none'
225 comp = 'none'
226
226
227 version = None
227 version = None
228 for part in b.iterparts():
228 for part in b.iterparts():
229 if part.type == 'changegroup':
229 if part.type == 'changegroup':
230 version = part.params['version']
230 version = part.params['version']
231 if version in ('01', '02'):
231 if version in ('01', '02'):
232 version = 'v2'
232 version = 'v2'
233 else:
233 else:
234 raise error.Abort(_('changegroup version %s does not have '
234 raise error.Abort(_('changegroup version %s does not have '
235 'a known bundlespec') % version,
235 'a known bundlespec') % version,
236 hint=_('try upgrading your Mercurial '
236 hint=_('try upgrading your Mercurial '
237 'client'))
237 'client'))
238
238
239 if not version:
239 if not version:
240 raise error.Abort(_('could not identify changegroup version in '
240 raise error.Abort(_('could not identify changegroup version in '
241 'bundle'))
241 'bundle'))
242
242
243 return '%s-%s' % (comp, version)
243 return '%s-%s' % (comp, version)
244 elif isinstance(b, streamclone.streamcloneapplier):
244 elif isinstance(b, streamclone.streamcloneapplier):
245 requirements = streamclone.readbundle1header(fh)[2]
245 requirements = streamclone.readbundle1header(fh)[2]
246 params = 'requirements=%s' % ','.join(sorted(requirements))
246 params = 'requirements=%s' % ','.join(sorted(requirements))
247 return 'none-packed1;%s' % urlreq.quote(params)
247 return 'none-packed1;%s' % urlreq.quote(params)
248 else:
248 else:
249 raise error.Abort(_('unknown bundle type: %s') % b)
249 raise error.Abort(_('unknown bundle type: %s') % b)
250
250
251 def _computeoutgoing(repo, heads, common):
251 def _computeoutgoing(repo, heads, common):
252 """Computes which revs are outgoing given a set of common
252 """Computes which revs are outgoing given a set of common
253 and a set of heads.
253 and a set of heads.
254
254
255 This is a separate function so extensions can have access to
255 This is a separate function so extensions can have access to
256 the logic.
256 the logic.
257
257
258 Returns a discovery.outgoing object.
258 Returns a discovery.outgoing object.
259 """
259 """
260 cl = repo.changelog
260 cl = repo.changelog
261 if common:
261 if common:
262 hasnode = cl.hasnode
262 hasnode = cl.hasnode
263 common = [n for n in common if hasnode(n)]
263 common = [n for n in common if hasnode(n)]
264 else:
264 else:
265 common = [nullid]
265 common = [nullid]
266 if not heads:
266 if not heads:
267 heads = cl.heads()
267 heads = cl.heads()
268 return discovery.outgoing(repo, common, heads)
268 return discovery.outgoing(repo, common, heads)
269
269
270 def _forcebundle1(op):
270 def _forcebundle1(op):
271 """return true if a pull/push must use bundle1
271 """return true if a pull/push must use bundle1
272
272
273 This function is used to allow testing of the older bundle version"""
273 This function is used to allow testing of the older bundle version"""
274 ui = op.repo.ui
274 ui = op.repo.ui
275 forcebundle1 = False
275 forcebundle1 = False
276 # The goal is this config is to allow developer to choose the bundle
276 # The goal is this config is to allow developer to choose the bundle
277 # version used during exchanged. This is especially handy during test.
277 # version used during exchanged. This is especially handy during test.
278 # Value is a list of bundle version to be picked from, highest version
278 # Value is a list of bundle version to be picked from, highest version
279 # should be used.
279 # should be used.
280 #
280 #
281 # developer config: devel.legacy.exchange
281 # developer config: devel.legacy.exchange
282 exchange = ui.configlist('devel', 'legacy.exchange')
282 exchange = ui.configlist('devel', 'legacy.exchange')
283 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
283 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
284 return forcebundle1 or not op.remote.capable('bundle2')
284 return forcebundle1 or not op.remote.capable('bundle2')
285
285
286 class pushoperation(object):
286 class pushoperation(object):
287 """A object that represent a single push operation
287 """A object that represent a single push operation
288
288
289 Its purpose is to carry push related state and very common operations.
289 Its purpose is to carry push related state and very common operations.
290
290
291 A new pushoperation should be created at the beginning of each push and
291 A new pushoperation should be created at the beginning of each push and
292 discarded afterward.
292 discarded afterward.
293 """
293 """
294
294
295 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
295 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
296 bookmarks=()):
296 bookmarks=()):
297 # repo we push from
297 # repo we push from
298 self.repo = repo
298 self.repo = repo
299 self.ui = repo.ui
299 self.ui = repo.ui
300 # repo we push to
300 # repo we push to
301 self.remote = remote
301 self.remote = remote
302 # force option provided
302 # force option provided
303 self.force = force
303 self.force = force
304 # revs to be pushed (None is "all")
304 # revs to be pushed (None is "all")
305 self.revs = revs
305 self.revs = revs
306 # bookmark explicitly pushed
306 # bookmark explicitly pushed
307 self.bookmarks = bookmarks
307 self.bookmarks = bookmarks
308 # allow push of new branch
308 # allow push of new branch
309 self.newbranch = newbranch
309 self.newbranch = newbranch
310 # did a local lock get acquired?
310 # did a local lock get acquired?
311 self.locallocked = None
311 self.locallocked = None
312 # step already performed
312 # step already performed
313 # (used to check what steps have been already performed through bundle2)
313 # (used to check what steps have been already performed through bundle2)
314 self.stepsdone = set()
314 self.stepsdone = set()
315 # Integer version of the changegroup push result
315 # Integer version of the changegroup push result
316 # - None means nothing to push
316 # - None means nothing to push
317 # - 0 means HTTP error
317 # - 0 means HTTP error
318 # - 1 means we pushed and remote head count is unchanged *or*
318 # - 1 means we pushed and remote head count is unchanged *or*
319 # we have outgoing changesets but refused to push
319 # we have outgoing changesets but refused to push
320 # - other values as described by addchangegroup()
320 # - other values as described by addchangegroup()
321 self.cgresult = None
321 self.cgresult = None
322 # Boolean value for the bookmark push
322 # Boolean value for the bookmark push
323 self.bkresult = None
323 self.bkresult = None
324 # discover.outgoing object (contains common and outgoing data)
324 # discover.outgoing object (contains common and outgoing data)
325 self.outgoing = None
325 self.outgoing = None
326 # all remote topological heads before the push
326 # all remote topological heads before the push
327 self.remoteheads = None
327 self.remoteheads = None
328 # Details of the remote branch pre and post push
328 # Details of the remote branch pre and post push
329 #
329 #
330 # mapping: {'branch': ([remoteheads],
330 # mapping: {'branch': ([remoteheads],
331 # [newheads],
331 # [newheads],
332 # [unsyncedheads],
332 # [unsyncedheads],
333 # [discardedheads])}
333 # [discardedheads])}
334 # - branch: the branch name
334 # - branch: the branch name
335 # - remoteheads: the list of remote heads known locally
335 # - remoteheads: the list of remote heads known locally
336 # None if the branch is new
336 # None if the branch is new
337 # - newheads: the new remote heads (known locally) with outgoing pushed
337 # - newheads: the new remote heads (known locally) with outgoing pushed
338 # - unsyncedheads: the list of remote heads unknown locally.
338 # - unsyncedheads: the list of remote heads unknown locally.
339 # - discardedheads: the list of remote heads made obsolete by the push
339 # - discardedheads: the list of remote heads made obsolete by the push
340 self.pushbranchmap = None
340 self.pushbranchmap = None
341 # testable as a boolean indicating if any nodes are missing locally.
341 # testable as a boolean indicating if any nodes are missing locally.
342 self.incoming = None
342 self.incoming = None
343 # phases changes that must be pushed along side the changesets
343 # phases changes that must be pushed along side the changesets
344 self.outdatedphases = None
344 self.outdatedphases = None
345 # phases changes that must be pushed if changeset push fails
345 # phases changes that must be pushed if changeset push fails
346 self.fallbackoutdatedphases = None
346 self.fallbackoutdatedphases = None
347 # outgoing obsmarkers
347 # outgoing obsmarkers
348 self.outobsmarkers = set()
348 self.outobsmarkers = set()
349 # outgoing bookmarks
349 # outgoing bookmarks
350 self.outbookmarks = []
350 self.outbookmarks = []
351 # transaction manager
351 # transaction manager
352 self.trmanager = None
352 self.trmanager = None
353 # map { pushkey partid -> callback handling failure}
353 # map { pushkey partid -> callback handling failure}
354 # used to handle exception from mandatory pushkey part failure
354 # used to handle exception from mandatory pushkey part failure
355 self.pkfailcb = {}
355 self.pkfailcb = {}
356
356
357 @util.propertycache
357 @util.propertycache
358 def futureheads(self):
358 def futureheads(self):
359 """future remote heads if the changeset push succeeds"""
359 """future remote heads if the changeset push succeeds"""
360 return self.outgoing.missingheads
360 return self.outgoing.missingheads
361
361
362 @util.propertycache
362 @util.propertycache
363 def fallbackheads(self):
363 def fallbackheads(self):
364 """future remote heads if the changeset push fails"""
364 """future remote heads if the changeset push fails"""
365 if self.revs is None:
365 if self.revs is None:
366 # not target to push, all common are relevant
366 # not target to push, all common are relevant
367 return self.outgoing.commonheads
367 return self.outgoing.commonheads
368 unfi = self.repo.unfiltered()
368 unfi = self.repo.unfiltered()
369 # I want cheads = heads(::missingheads and ::commonheads)
369 # I want cheads = heads(::missingheads and ::commonheads)
370 # (missingheads is revs with secret changeset filtered out)
370 # (missingheads is revs with secret changeset filtered out)
371 #
371 #
372 # This can be expressed as:
372 # This can be expressed as:
373 # cheads = ( (missingheads and ::commonheads)
373 # cheads = ( (missingheads and ::commonheads)
374 # + (commonheads and ::missingheads))"
374 # + (commonheads and ::missingheads))"
375 # )
375 # )
376 #
376 #
377 # while trying to push we already computed the following:
377 # while trying to push we already computed the following:
378 # common = (::commonheads)
378 # common = (::commonheads)
379 # missing = ((commonheads::missingheads) - commonheads)
379 # missing = ((commonheads::missingheads) - commonheads)
380 #
380 #
381 # We can pick:
381 # We can pick:
382 # * missingheads part of common (::commonheads)
382 # * missingheads part of common (::commonheads)
383 common = self.outgoing.common
383 common = self.outgoing.common
384 nm = self.repo.changelog.nodemap
384 nm = self.repo.changelog.nodemap
385 cheads = [node for node in self.revs if nm[node] in common]
385 cheads = [node for node in self.revs if nm[node] in common]
386 # and
386 # and
387 # * commonheads parents on missing
387 # * commonheads parents on missing
388 revset = unfi.set('%ln and parents(roots(%ln))',
388 revset = unfi.set('%ln and parents(roots(%ln))',
389 self.outgoing.commonheads,
389 self.outgoing.commonheads,
390 self.outgoing.missing)
390 self.outgoing.missing)
391 cheads.extend(c.node() for c in revset)
391 cheads.extend(c.node() for c in revset)
392 return cheads
392 return cheads
393
393
394 @property
394 @property
395 def commonheads(self):
395 def commonheads(self):
396 """set of all common heads after changeset bundle push"""
396 """set of all common heads after changeset bundle push"""
397 if self.cgresult:
397 if self.cgresult:
398 return self.futureheads
398 return self.futureheads
399 else:
399 else:
400 return self.fallbackheads
400 return self.fallbackheads
401
401
402 # mapping of message used when pushing bookmark
402 # mapping of message used when pushing bookmark
403 bookmsgmap = {'update': (_("updating bookmark %s\n"),
403 bookmsgmap = {'update': (_("updating bookmark %s\n"),
404 _('updating bookmark %s failed!\n')),
404 _('updating bookmark %s failed!\n')),
405 'export': (_("exporting bookmark %s\n"),
405 'export': (_("exporting bookmark %s\n"),
406 _('exporting bookmark %s failed!\n')),
406 _('exporting bookmark %s failed!\n')),
407 'delete': (_("deleting remote bookmark %s\n"),
407 'delete': (_("deleting remote bookmark %s\n"),
408 _('deleting remote bookmark %s failed!\n')),
408 _('deleting remote bookmark %s failed!\n')),
409 }
409 }
410
410
411
411
412 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
412 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
413 opargs=None):
413 opargs=None):
414 '''Push outgoing changesets (limited by revs) from a local
414 '''Push outgoing changesets (limited by revs) from a local
415 repository to remote. Return an integer:
415 repository to remote. Return an integer:
416 - None means nothing to push
416 - None means nothing to push
417 - 0 means HTTP error
417 - 0 means HTTP error
418 - 1 means we pushed and remote head count is unchanged *or*
418 - 1 means we pushed and remote head count is unchanged *or*
419 we have outgoing changesets but refused to push
419 we have outgoing changesets but refused to push
420 - other values as described by addchangegroup()
420 - other values as described by addchangegroup()
421 '''
421 '''
422 if opargs is None:
422 if opargs is None:
423 opargs = {}
423 opargs = {}
424 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
424 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
425 **opargs)
425 **opargs)
426 if pushop.remote.local():
426 if pushop.remote.local():
427 missing = (set(pushop.repo.requirements)
427 missing = (set(pushop.repo.requirements)
428 - pushop.remote.local().supported)
428 - pushop.remote.local().supported)
429 if missing:
429 if missing:
430 msg = _("required features are not"
430 msg = _("required features are not"
431 " supported in the destination:"
431 " supported in the destination:"
432 " %s") % (', '.join(sorted(missing)))
432 " %s") % (', '.join(sorted(missing)))
433 raise error.Abort(msg)
433 raise error.Abort(msg)
434
434
435 # there are two ways to push to remote repo:
435 # there are two ways to push to remote repo:
436 #
436 #
437 # addchangegroup assumes local user can lock remote
437 # addchangegroup assumes local user can lock remote
438 # repo (local filesystem, old ssh servers).
438 # repo (local filesystem, old ssh servers).
439 #
439 #
440 # unbundle assumes local user cannot lock remote repo (new ssh
440 # unbundle assumes local user cannot lock remote repo (new ssh
441 # servers, http servers).
441 # servers, http servers).
442
442
443 if not pushop.remote.canpush():
443 if not pushop.remote.canpush():
444 raise error.Abort(_("destination does not support push"))
444 raise error.Abort(_("destination does not support push"))
445 # get local lock as we might write phase data
445 # get local lock as we might write phase data
446 localwlock = locallock = None
446 localwlock = locallock = None
447 try:
447 try:
448 # bundle2 push may receive a reply bundle touching bookmarks or other
448 # bundle2 push may receive a reply bundle touching bookmarks or other
449 # things requiring the wlock. Take it now to ensure proper ordering.
449 # things requiring the wlock. Take it now to ensure proper ordering.
450 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
450 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
451 if (not _forcebundle1(pushop)) and maypushback:
451 if (not _forcebundle1(pushop)) and maypushback:
452 localwlock = pushop.repo.wlock()
452 localwlock = pushop.repo.wlock()
453 locallock = pushop.repo.lock()
453 locallock = pushop.repo.lock()
454 pushop.locallocked = True
454 pushop.locallocked = True
455 except IOError as err:
455 except IOError as err:
456 pushop.locallocked = False
456 pushop.locallocked = False
457 if err.errno != errno.EACCES:
457 if err.errno != errno.EACCES:
458 raise
458 raise
459 # source repo cannot be locked.
459 # source repo cannot be locked.
460 # We do not abort the push, but just disable the local phase
460 # We do not abort the push, but just disable the local phase
461 # synchronisation.
461 # synchronisation.
462 msg = 'cannot lock source repository: %s\n' % err
462 msg = 'cannot lock source repository: %s\n' % err
463 pushop.ui.debug(msg)
463 pushop.ui.debug(msg)
464 try:
464 try:
465 if pushop.locallocked:
465 if pushop.locallocked:
466 pushop.trmanager = transactionmanager(pushop.repo,
466 pushop.trmanager = transactionmanager(pushop.repo,
467 'push-response',
467 'push-response',
468 pushop.remote.url())
468 pushop.remote.url())
469 pushop.repo.checkpush(pushop)
469 pushop.repo.checkpush(pushop)
470 lock = None
470 lock = None
471 unbundle = pushop.remote.capable('unbundle')
471 unbundle = pushop.remote.capable('unbundle')
472 if not unbundle:
472 if not unbundle:
473 lock = pushop.remote.lock()
473 lock = pushop.remote.lock()
474 try:
474 try:
475 _pushdiscovery(pushop)
475 _pushdiscovery(pushop)
476 if not _forcebundle1(pushop):
476 if not _forcebundle1(pushop):
477 _pushbundle2(pushop)
477 _pushbundle2(pushop)
478 _pushchangeset(pushop)
478 _pushchangeset(pushop)
479 _pushsyncphase(pushop)
479 _pushsyncphase(pushop)
480 _pushobsolete(pushop)
480 _pushobsolete(pushop)
481 _pushbookmark(pushop)
481 _pushbookmark(pushop)
482 finally:
482 finally:
483 if lock is not None:
483 if lock is not None:
484 lock.release()
484 lock.release()
485 if pushop.trmanager:
485 if pushop.trmanager:
486 pushop.trmanager.close()
486 pushop.trmanager.close()
487 finally:
487 finally:
488 if pushop.trmanager:
488 if pushop.trmanager:
489 pushop.trmanager.release()
489 pushop.trmanager.release()
490 if locallock is not None:
490 if locallock is not None:
491 locallock.release()
491 locallock.release()
492 if localwlock is not None:
492 if localwlock is not None:
493 localwlock.release()
493 localwlock.release()
494
494
495 return pushop
495 return pushop
496
496
497 # list of steps to perform discovery before push
497 # list of steps to perform discovery before push
498 pushdiscoveryorder = []
498 pushdiscoveryorder = []
499
499
500 # Mapping between step name and function
500 # Mapping between step name and function
501 #
501 #
502 # This exists to help extensions wrap steps if necessary
502 # This exists to help extensions wrap steps if necessary
503 pushdiscoverymapping = {}
503 pushdiscoverymapping = {}
504
504
505 def pushdiscovery(stepname):
505 def pushdiscovery(stepname):
506 """decorator for function performing discovery before push
506 """decorator for function performing discovery before push
507
507
508 The function is added to the step -> function mapping and appended to the
508 The function is added to the step -> function mapping and appended to the
509 list of steps. Beware that decorated function will be added in order (this
509 list of steps. Beware that decorated function will be added in order (this
510 may matter).
510 may matter).
511
511
512 You can only use this decorator for a new step, if you want to wrap a step
512 You can only use this decorator for a new step, if you want to wrap a step
513 from an extension, change the pushdiscovery dictionary directly."""
513 from an extension, change the pushdiscovery dictionary directly."""
514 def dec(func):
514 def dec(func):
515 assert stepname not in pushdiscoverymapping
515 assert stepname not in pushdiscoverymapping
516 pushdiscoverymapping[stepname] = func
516 pushdiscoverymapping[stepname] = func
517 pushdiscoveryorder.append(stepname)
517 pushdiscoveryorder.append(stepname)
518 return func
518 return func
519 return dec
519 return dec
520
520
521 def _pushdiscovery(pushop):
521 def _pushdiscovery(pushop):
522 """Run all discovery steps"""
522 """Run all discovery steps"""
523 for stepname in pushdiscoveryorder:
523 for stepname in pushdiscoveryorder:
524 step = pushdiscoverymapping[stepname]
524 step = pushdiscoverymapping[stepname]
525 step(pushop)
525 step(pushop)
526
526
527 @pushdiscovery('changeset')
527 @pushdiscovery('changeset')
528 def _pushdiscoverychangeset(pushop):
528 def _pushdiscoverychangeset(pushop):
529 """discover the changeset that need to be pushed"""
529 """discover the changeset that need to be pushed"""
530 fci = discovery.findcommonincoming
530 fci = discovery.findcommonincoming
531 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
531 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
532 common, inc, remoteheads = commoninc
532 common, inc, remoteheads = commoninc
533 fco = discovery.findcommonoutgoing
533 fco = discovery.findcommonoutgoing
534 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
534 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
535 commoninc=commoninc, force=pushop.force)
535 commoninc=commoninc, force=pushop.force)
536 pushop.outgoing = outgoing
536 pushop.outgoing = outgoing
537 pushop.remoteheads = remoteheads
537 pushop.remoteheads = remoteheads
538 pushop.incoming = inc
538 pushop.incoming = inc
539
539
540 @pushdiscovery('phase')
540 @pushdiscovery('phase')
541 def _pushdiscoveryphase(pushop):
541 def _pushdiscoveryphase(pushop):
542 """discover the phase that needs to be pushed
542 """discover the phase that needs to be pushed
543
543
544 (computed for both success and failure case for changesets push)"""
544 (computed for both success and failure case for changesets push)"""
545 outgoing = pushop.outgoing
545 outgoing = pushop.outgoing
546 unfi = pushop.repo.unfiltered()
546 unfi = pushop.repo.unfiltered()
547 remotephases = pushop.remote.listkeys('phases')
547 remotephases = pushop.remote.listkeys('phases')
548 publishing = remotephases.get('publishing', False)
548 publishing = remotephases.get('publishing', False)
549 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
549 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
550 and remotephases # server supports phases
550 and remotephases # server supports phases
551 and not pushop.outgoing.missing # no changesets to be pushed
551 and not pushop.outgoing.missing # no changesets to be pushed
552 and publishing):
552 and publishing):
553 # When:
553 # When:
554 # - this is a subrepo push
554 # - this is a subrepo push
555 # - and remote support phase
555 # - and remote support phase
556 # - and no changeset are to be pushed
556 # - and no changeset are to be pushed
557 # - and remote is publishing
557 # - and remote is publishing
558 # We may be in issue 3871 case!
558 # We may be in issue 3871 case!
559 # We drop the possible phase synchronisation done by
559 # We drop the possible phase synchronisation done by
560 # courtesy to publish changesets possibly locally draft
560 # courtesy to publish changesets possibly locally draft
561 # on the remote.
561 # on the remote.
562 remotephases = {'publishing': 'True'}
562 remotephases = {'publishing': 'True'}
563 ana = phases.analyzeremotephases(pushop.repo,
563 ana = phases.analyzeremotephases(pushop.repo,
564 pushop.fallbackheads,
564 pushop.fallbackheads,
565 remotephases)
565 remotephases)
566 pheads, droots = ana
566 pheads, droots = ana
567 extracond = ''
567 extracond = ''
568 if not publishing:
568 if not publishing:
569 extracond = ' and public()'
569 extracond = ' and public()'
570 revset = 'heads((%%ln::%%ln) %s)' % extracond
570 revset = 'heads((%%ln::%%ln) %s)' % extracond
571 # Get the list of all revs draft on remote by public here.
571 # Get the list of all revs draft on remote by public here.
572 # XXX Beware that revset break if droots is not strictly
572 # XXX Beware that revset break if droots is not strictly
573 # XXX root we may want to ensure it is but it is costly
573 # XXX root we may want to ensure it is but it is costly
574 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
574 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
575 if not outgoing.missing:
575 if not outgoing.missing:
576 future = fallback
576 future = fallback
577 else:
577 else:
578 # adds changeset we are going to push as draft
578 # adds changeset we are going to push as draft
579 #
579 #
580 # should not be necessary for publishing server, but because of an
580 # should not be necessary for publishing server, but because of an
581 # issue fixed in xxxxx we have to do it anyway.
581 # issue fixed in xxxxx we have to do it anyway.
582 fdroots = list(unfi.set('roots(%ln + %ln::)',
582 fdroots = list(unfi.set('roots(%ln + %ln::)',
583 outgoing.missing, droots))
583 outgoing.missing, droots))
584 fdroots = [f.node() for f in fdroots]
584 fdroots = [f.node() for f in fdroots]
585 future = list(unfi.set(revset, fdroots, pushop.futureheads))
585 future = list(unfi.set(revset, fdroots, pushop.futureheads))
586 pushop.outdatedphases = future
586 pushop.outdatedphases = future
587 pushop.fallbackoutdatedphases = fallback
587 pushop.fallbackoutdatedphases = fallback
588
588
589 @pushdiscovery('obsmarker')
589 @pushdiscovery('obsmarker')
590 def _pushdiscoveryobsmarkers(pushop):
590 def _pushdiscoveryobsmarkers(pushop):
591 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
591 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
592 and pushop.repo.obsstore
592 and pushop.repo.obsstore
593 and 'obsolete' in pushop.remote.listkeys('namespaces')):
593 and 'obsolete' in pushop.remote.listkeys('namespaces')):
594 repo = pushop.repo
594 repo = pushop.repo
595 # very naive computation, that can be quite expensive on big repo.
595 # very naive computation, that can be quite expensive on big repo.
596 # However: evolution is currently slow on them anyway.
596 # However: evolution is currently slow on them anyway.
597 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
597 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
598 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
598 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
599
599
600 @pushdiscovery('bookmarks')
600 @pushdiscovery('bookmarks')
601 def _pushdiscoverybookmarks(pushop):
601 def _pushdiscoverybookmarks(pushop):
602 ui = pushop.ui
602 ui = pushop.ui
603 repo = pushop.repo.unfiltered()
603 repo = pushop.repo.unfiltered()
604 remote = pushop.remote
604 remote = pushop.remote
605 ui.debug("checking for updated bookmarks\n")
605 ui.debug("checking for updated bookmarks\n")
606 ancestors = ()
606 ancestors = ()
607 if pushop.revs:
607 if pushop.revs:
608 revnums = map(repo.changelog.rev, pushop.revs)
608 revnums = map(repo.changelog.rev, pushop.revs)
609 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
609 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
610 remotebookmark = remote.listkeys('bookmarks')
610 remotebookmark = remote.listkeys('bookmarks')
611
611
612 explicit = set([repo._bookmarks.expandname(bookmark)
612 explicit = set([repo._bookmarks.expandname(bookmark)
613 for bookmark in pushop.bookmarks])
613 for bookmark in pushop.bookmarks])
614
614
615 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
615 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
616 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
616 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
617
617
618 def safehex(x):
618 def safehex(x):
619 if x is None:
619 if x is None:
620 return x
620 return x
621 return hex(x)
621 return hex(x)
622
622
623 def hexifycompbookmarks(bookmarks):
623 def hexifycompbookmarks(bookmarks):
624 for b, scid, dcid in bookmarks:
624 for b, scid, dcid in bookmarks:
625 yield b, safehex(scid), safehex(dcid)
625 yield b, safehex(scid), safehex(dcid)
626
626
627 comp = [hexifycompbookmarks(marks) for marks in comp]
627 comp = [hexifycompbookmarks(marks) for marks in comp]
628 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
628 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
629
629
630 for b, scid, dcid in advsrc:
630 for b, scid, dcid in advsrc:
631 if b in explicit:
631 if b in explicit:
632 explicit.remove(b)
632 explicit.remove(b)
633 if not ancestors or repo[scid].rev() in ancestors:
633 if not ancestors or repo[scid].rev() in ancestors:
634 pushop.outbookmarks.append((b, dcid, scid))
634 pushop.outbookmarks.append((b, dcid, scid))
635 # search added bookmark
635 # search added bookmark
636 for b, scid, dcid in addsrc:
636 for b, scid, dcid in addsrc:
637 if b in explicit:
637 if b in explicit:
638 explicit.remove(b)
638 explicit.remove(b)
639 pushop.outbookmarks.append((b, '', scid))
639 pushop.outbookmarks.append((b, '', scid))
640 # search for overwritten bookmark
640 # search for overwritten bookmark
641 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
641 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
642 if b in explicit:
642 if b in explicit:
643 explicit.remove(b)
643 explicit.remove(b)
644 pushop.outbookmarks.append((b, dcid, scid))
644 pushop.outbookmarks.append((b, dcid, scid))
645 # search for bookmark to delete
645 # search for bookmark to delete
646 for b, scid, dcid in adddst:
646 for b, scid, dcid in adddst:
647 if b in explicit:
647 if b in explicit:
648 explicit.remove(b)
648 explicit.remove(b)
649 # treat as "deleted locally"
649 # treat as "deleted locally"
650 pushop.outbookmarks.append((b, dcid, ''))
650 pushop.outbookmarks.append((b, dcid, ''))
651 # identical bookmarks shouldn't get reported
651 # identical bookmarks shouldn't get reported
652 for b, scid, dcid in same:
652 for b, scid, dcid in same:
653 if b in explicit:
653 if b in explicit:
654 explicit.remove(b)
654 explicit.remove(b)
655
655
656 if explicit:
656 if explicit:
657 explicit = sorted(explicit)
657 explicit = sorted(explicit)
658 # we should probably list all of them
658 # we should probably list all of them
659 ui.warn(_('bookmark %s does not exist on the local '
659 ui.warn(_('bookmark %s does not exist on the local '
660 'or remote repository!\n') % explicit[0])
660 'or remote repository!\n') % explicit[0])
661 pushop.bkresult = 2
661 pushop.bkresult = 2
662
662
663 pushop.outbookmarks.sort()
663 pushop.outbookmarks.sort()
664
664
665 def _pushcheckoutgoing(pushop):
665 def _pushcheckoutgoing(pushop):
666 outgoing = pushop.outgoing
666 outgoing = pushop.outgoing
667 unfi = pushop.repo.unfiltered()
667 unfi = pushop.repo.unfiltered()
668 if not outgoing.missing:
668 if not outgoing.missing:
669 # nothing to push
669 # nothing to push
670 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
670 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
671 return False
671 return False
672 # something to push
672 # something to push
673 if not pushop.force:
673 if not pushop.force:
674 # if repo.obsstore == False --> no obsolete
674 # if repo.obsstore == False --> no obsolete
675 # then, save the iteration
675 # then, save the iteration
676 if unfi.obsstore:
676 if unfi.obsstore:
677 # this message are here for 80 char limit reason
677 # this message are here for 80 char limit reason
678 mso = _("push includes obsolete changeset: %s!")
678 mso = _("push includes obsolete changeset: %s!")
679 mst = {"unstable": _("push includes unstable changeset: %s!"),
679 mst = {"unstable": _("push includes unstable changeset: %s!"),
680 "bumped": _("push includes bumped changeset: %s!"),
680 "bumped": _("push includes bumped changeset: %s!"),
681 "divergent": _("push includes divergent changeset: %s!")}
681 "divergent": _("push includes divergent changeset: %s!")}
682 # If we are to push if there is at least one
682 # If we are to push if there is at least one
683 # obsolete or unstable changeset in missing, at
683 # obsolete or unstable changeset in missing, at
684 # least one of the missinghead will be obsolete or
684 # least one of the missinghead will be obsolete or
685 # unstable. So checking heads only is ok
685 # unstable. So checking heads only is ok
686 for node in outgoing.missingheads:
686 for node in outgoing.missingheads:
687 ctx = unfi[node]
687 ctx = unfi[node]
688 if ctx.obsolete():
688 if ctx.obsolete():
689 raise error.Abort(mso % ctx)
689 raise error.Abort(mso % ctx)
690 elif ctx.troubled():
690 elif ctx.troubled():
691 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
691 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
692
692
693 discovery.checkheads(pushop)
693 discovery.checkheads(pushop)
694 return True
694 return True
695
695
696 # List of names of steps to perform for an outgoing bundle2, order matters.
696 # List of names of steps to perform for an outgoing bundle2, order matters.
697 b2partsgenorder = []
697 b2partsgenorder = []
698
698
699 # Mapping between step name and function
699 # Mapping between step name and function
700 #
700 #
701 # This exists to help extensions wrap steps if necessary
701 # This exists to help extensions wrap steps if necessary
702 b2partsgenmapping = {}
702 b2partsgenmapping = {}
703
703
704 def b2partsgenerator(stepname, idx=None):
704 def b2partsgenerator(stepname, idx=None):
705 """decorator for function generating bundle2 part
705 """decorator for function generating bundle2 part
706
706
707 The function is added to the step -> function mapping and appended to the
707 The function is added to the step -> function mapping and appended to the
708 list of steps. Beware that decorated functions will be added in order
708 list of steps. Beware that decorated functions will be added in order
709 (this may matter).
709 (this may matter).
710
710
711 You can only use this decorator for new steps, if you want to wrap a step
711 You can only use this decorator for new steps, if you want to wrap a step
712 from an extension, attack the b2partsgenmapping dictionary directly."""
712 from an extension, attack the b2partsgenmapping dictionary directly."""
713 def dec(func):
713 def dec(func):
714 assert stepname not in b2partsgenmapping
714 assert stepname not in b2partsgenmapping
715 b2partsgenmapping[stepname] = func
715 b2partsgenmapping[stepname] = func
716 if idx is None:
716 if idx is None:
717 b2partsgenorder.append(stepname)
717 b2partsgenorder.append(stepname)
718 else:
718 else:
719 b2partsgenorder.insert(idx, stepname)
719 b2partsgenorder.insert(idx, stepname)
720 return func
720 return func
721 return dec
721 return dec
722
722
723 def _pushb2ctxcheckheads(pushop, bundler):
723 def _pushb2ctxcheckheads(pushop, bundler):
724 """Generate race condition checking parts
724 """Generate race condition checking parts
725
725
726 Exists as an independent function to aid extensions
726 Exists as an independent function to aid extensions
727 """
727 """
728 # * 'force' do not check for push race,
728 # * 'force' do not check for push race,
729 # * if we don't push anything, there are nothing to check.
729 # * if we don't push anything, there are nothing to check.
730 if not pushop.force and pushop.outgoing.missingheads:
730 if not pushop.force and pushop.outgoing.missingheads:
731 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
731 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
732 if not allowunrelated:
732 if not allowunrelated:
733 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
733 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
734 else:
734 else:
735 affected = set()
735 affected = set()
736 for branch, heads in pushop.pushbranchmap.iteritems():
736 for branch, heads in pushop.pushbranchmap.iteritems():
737 remoteheads, newheads, unsyncedheads, discardedheads = heads
737 remoteheads, newheads, unsyncedheads, discardedheads = heads
738 if remoteheads is not None:
738 if remoteheads is not None:
739 remote = set(remoteheads)
739 remote = set(remoteheads)
740 affected |= set(discardedheads) & remote
740 affected |= set(discardedheads) & remote
741 affected |= remote - set(newheads)
741 affected |= remote - set(newheads)
742 if affected:
742 if affected:
743 data = iter(sorted(affected))
743 data = iter(sorted(affected))
744 bundler.newpart('check:updated-heads', data=data)
744 bundler.newpart('check:updated-heads', data=data)
745
745
746 @b2partsgenerator('changeset')
746 @b2partsgenerator('changeset')
747 def _pushb2ctx(pushop, bundler):
747 def _pushb2ctx(pushop, bundler):
748 """handle changegroup push through bundle2
748 """handle changegroup push through bundle2
749
749
750 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
750 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
751 """
751 """
752 if 'changesets' in pushop.stepsdone:
752 if 'changesets' in pushop.stepsdone:
753 return
753 return
754 pushop.stepsdone.add('changesets')
754 pushop.stepsdone.add('changesets')
755 # Send known heads to the server for race detection.
755 # Send known heads to the server for race detection.
756 if not _pushcheckoutgoing(pushop):
756 if not _pushcheckoutgoing(pushop):
757 return
757 return
758 pushop.repo.prepushoutgoinghooks(pushop)
758 pushop.repo.prepushoutgoinghooks(pushop)
759
759
760 _pushb2ctxcheckheads(pushop, bundler)
760 _pushb2ctxcheckheads(pushop, bundler)
761
761
762 b2caps = bundle2.bundle2caps(pushop.remote)
762 b2caps = bundle2.bundle2caps(pushop.remote)
763 version = '01'
763 version = '01'
764 cgversions = b2caps.get('changegroup')
764 cgversions = b2caps.get('changegroup')
765 if cgversions: # 3.1 and 3.2 ship with an empty value
765 if cgversions: # 3.1 and 3.2 ship with an empty value
766 cgversions = [v for v in cgversions
766 cgversions = [v for v in cgversions
767 if v in changegroup.supportedoutgoingversions(
767 if v in changegroup.supportedoutgoingversions(
768 pushop.repo)]
768 pushop.repo)]
769 if not cgversions:
769 if not cgversions:
770 raise ValueError(_('no common changegroup version'))
770 raise ValueError(_('no common changegroup version'))
771 version = max(cgversions)
771 version = max(cgversions)
772 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
772 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
773 pushop.outgoing,
773 pushop.outgoing,
774 version=version)
774 version=version)
775 cgpart = bundler.newpart('changegroup', data=cg)
775 cgpart = bundler.newpart('changegroup', data=cg)
776 if cgversions:
776 if cgversions:
777 cgpart.addparam('version', version)
777 cgpart.addparam('version', version)
778 if 'treemanifest' in pushop.repo.requirements:
778 if 'treemanifest' in pushop.repo.requirements:
779 cgpart.addparam('treemanifest', '1')
779 cgpart.addparam('treemanifest', '1')
780 def handlereply(op):
780 def handlereply(op):
781 """extract addchangegroup returns from server reply"""
781 """extract addchangegroup returns from server reply"""
782 cgreplies = op.records.getreplies(cgpart.id)
782 cgreplies = op.records.getreplies(cgpart.id)
783 assert len(cgreplies['changegroup']) == 1
783 assert len(cgreplies['changegroup']) == 1
784 pushop.cgresult = cgreplies['changegroup'][0]['return']
784 pushop.cgresult = cgreplies['changegroup'][0]['return']
785 return handlereply
785 return handlereply
786
786
787 @b2partsgenerator('phase')
787 @b2partsgenerator('phase')
788 def _pushb2phases(pushop, bundler):
788 def _pushb2phases(pushop, bundler):
789 """handle phase push through bundle2"""
789 """handle phase push through bundle2"""
790 if 'phases' in pushop.stepsdone:
790 if 'phases' in pushop.stepsdone:
791 return
791 return
792 b2caps = bundle2.bundle2caps(pushop.remote)
792 b2caps = bundle2.bundle2caps(pushop.remote)
793 if not 'pushkey' in b2caps:
793 if not 'pushkey' in b2caps:
794 return
794 return
795 pushop.stepsdone.add('phases')
795 pushop.stepsdone.add('phases')
796 part2node = []
796 part2node = []
797
797
798 def handlefailure(pushop, exc):
798 def handlefailure(pushop, exc):
799 targetid = int(exc.partid)
799 targetid = int(exc.partid)
800 for partid, node in part2node:
800 for partid, node in part2node:
801 if partid == targetid:
801 if partid == targetid:
802 raise error.Abort(_('updating %s to public failed') % node)
802 raise error.Abort(_('updating %s to public failed') % node)
803
803
804 enc = pushkey.encode
804 enc = pushkey.encode
805 for newremotehead in pushop.outdatedphases:
805 for newremotehead in pushop.outdatedphases:
806 part = bundler.newpart('pushkey')
806 part = bundler.newpart('pushkey')
807 part.addparam('namespace', enc('phases'))
807 part.addparam('namespace', enc('phases'))
808 part.addparam('key', enc(newremotehead.hex()))
808 part.addparam('key', enc(newremotehead.hex()))
809 part.addparam('old', enc(str(phases.draft)))
809 part.addparam('old', enc(str(phases.draft)))
810 part.addparam('new', enc(str(phases.public)))
810 part.addparam('new', enc(str(phases.public)))
811 part2node.append((part.id, newremotehead))
811 part2node.append((part.id, newremotehead))
812 pushop.pkfailcb[part.id] = handlefailure
812 pushop.pkfailcb[part.id] = handlefailure
813
813
814 def handlereply(op):
814 def handlereply(op):
815 for partid, node in part2node:
815 for partid, node in part2node:
816 partrep = op.records.getreplies(partid)
816 partrep = op.records.getreplies(partid)
817 results = partrep['pushkey']
817 results = partrep['pushkey']
818 assert len(results) <= 1
818 assert len(results) <= 1
819 msg = None
819 msg = None
820 if not results:
820 if not results:
821 msg = _('server ignored update of %s to public!\n') % node
821 msg = _('server ignored update of %s to public!\n') % node
822 elif not int(results[0]['return']):
822 elif not int(results[0]['return']):
823 msg = _('updating %s to public failed!\n') % node
823 msg = _('updating %s to public failed!\n') % node
824 if msg is not None:
824 if msg is not None:
825 pushop.ui.warn(msg)
825 pushop.ui.warn(msg)
826 return handlereply
826 return handlereply
827
827
828 @b2partsgenerator('obsmarkers')
828 @b2partsgenerator('obsmarkers')
829 def _pushb2obsmarkers(pushop, bundler):
829 def _pushb2obsmarkers(pushop, bundler):
830 if 'obsmarkers' in pushop.stepsdone:
830 if 'obsmarkers' in pushop.stepsdone:
831 return
831 return
832 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
832 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
833 if obsolete.commonversion(remoteversions) is None:
833 if obsolete.commonversion(remoteversions) is None:
834 return
834 return
835 pushop.stepsdone.add('obsmarkers')
835 pushop.stepsdone.add('obsmarkers')
836 if pushop.outobsmarkers:
836 if pushop.outobsmarkers:
837 markers = sorted(pushop.outobsmarkers)
837 markers = sorted(pushop.outobsmarkers)
838 bundle2.buildobsmarkerspart(bundler, markers)
838 bundle2.buildobsmarkerspart(bundler, markers)
839
839
840 @b2partsgenerator('bookmarks')
840 @b2partsgenerator('bookmarks')
841 def _pushb2bookmarks(pushop, bundler):
841 def _pushb2bookmarks(pushop, bundler):
842 """handle bookmark push through bundle2"""
842 """handle bookmark push through bundle2"""
843 if 'bookmarks' in pushop.stepsdone:
843 if 'bookmarks' in pushop.stepsdone:
844 return
844 return
845 b2caps = bundle2.bundle2caps(pushop.remote)
845 b2caps = bundle2.bundle2caps(pushop.remote)
846 if 'pushkey' not in b2caps:
846 if 'pushkey' not in b2caps:
847 return
847 return
848 pushop.stepsdone.add('bookmarks')
848 pushop.stepsdone.add('bookmarks')
849 part2book = []
849 part2book = []
850 enc = pushkey.encode
850 enc = pushkey.encode
851
851
852 def handlefailure(pushop, exc):
852 def handlefailure(pushop, exc):
853 targetid = int(exc.partid)
853 targetid = int(exc.partid)
854 for partid, book, action in part2book:
854 for partid, book, action in part2book:
855 if partid == targetid:
855 if partid == targetid:
856 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
856 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
857 # we should not be called for part we did not generated
857 # we should not be called for part we did not generated
858 assert False
858 assert False
859
859
860 for book, old, new in pushop.outbookmarks:
860 for book, old, new in pushop.outbookmarks:
861 part = bundler.newpart('pushkey')
861 part = bundler.newpart('pushkey')
862 part.addparam('namespace', enc('bookmarks'))
862 part.addparam('namespace', enc('bookmarks'))
863 part.addparam('key', enc(book))
863 part.addparam('key', enc(book))
864 part.addparam('old', enc(old))
864 part.addparam('old', enc(old))
865 part.addparam('new', enc(new))
865 part.addparam('new', enc(new))
866 action = 'update'
866 action = 'update'
867 if not old:
867 if not old:
868 action = 'export'
868 action = 'export'
869 elif not new:
869 elif not new:
870 action = 'delete'
870 action = 'delete'
871 part2book.append((part.id, book, action))
871 part2book.append((part.id, book, action))
872 pushop.pkfailcb[part.id] = handlefailure
872 pushop.pkfailcb[part.id] = handlefailure
873
873
874 def handlereply(op):
874 def handlereply(op):
875 ui = pushop.ui
875 ui = pushop.ui
876 for partid, book, action in part2book:
876 for partid, book, action in part2book:
877 partrep = op.records.getreplies(partid)
877 partrep = op.records.getreplies(partid)
878 results = partrep['pushkey']
878 results = partrep['pushkey']
879 assert len(results) <= 1
879 assert len(results) <= 1
880 if not results:
880 if not results:
881 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
881 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
882 else:
882 else:
883 ret = int(results[0]['return'])
883 ret = int(results[0]['return'])
884 if ret:
884 if ret:
885 ui.status(bookmsgmap[action][0] % book)
885 ui.status(bookmsgmap[action][0] % book)
886 else:
886 else:
887 ui.warn(bookmsgmap[action][1] % book)
887 ui.warn(bookmsgmap[action][1] % book)
888 if pushop.bkresult is not None:
888 if pushop.bkresult is not None:
889 pushop.bkresult = 1
889 pushop.bkresult = 1
890 return handlereply
890 return handlereply
891
891
892
892
893 def _pushbundle2(pushop):
893 def _pushbundle2(pushop):
894 """push data to the remote using bundle2
894 """push data to the remote using bundle2
895
895
896 The only currently supported type of data is changegroup but this will
896 The only currently supported type of data is changegroup but this will
897 evolve in the future."""
897 evolve in the future."""
898 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
898 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
899 pushback = (pushop.trmanager
899 pushback = (pushop.trmanager
900 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
900 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
901
901
902 # create reply capability
902 # create reply capability
903 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
903 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
904 allowpushback=pushback))
904 allowpushback=pushback))
905 bundler.newpart('replycaps', data=capsblob)
905 bundler.newpart('replycaps', data=capsblob)
906 replyhandlers = []
906 replyhandlers = []
907 for partgenname in b2partsgenorder:
907 for partgenname in b2partsgenorder:
908 partgen = b2partsgenmapping[partgenname]
908 partgen = b2partsgenmapping[partgenname]
909 ret = partgen(pushop, bundler)
909 ret = partgen(pushop, bundler)
910 if callable(ret):
910 if callable(ret):
911 replyhandlers.append(ret)
911 replyhandlers.append(ret)
912 # do not push if nothing to push
912 # do not push if nothing to push
913 if bundler.nbparts <= 1:
913 if bundler.nbparts <= 1:
914 return
914 return
915 stream = util.chunkbuffer(bundler.getchunks())
915 stream = util.chunkbuffer(bundler.getchunks())
916 try:
916 try:
917 try:
917 try:
918 reply = pushop.remote.unbundle(
918 reply = pushop.remote.unbundle(
919 stream, ['force'], pushop.remote.url())
919 stream, ['force'], pushop.remote.url())
920 except error.BundleValueError as exc:
920 except error.BundleValueError as exc:
921 raise error.Abort(_('missing support for %s') % exc)
921 raise error.Abort(_('missing support for %s') % exc)
922 try:
922 try:
923 trgetter = None
923 trgetter = None
924 if pushback:
924 if pushback:
925 trgetter = pushop.trmanager.transaction
925 trgetter = pushop.trmanager.transaction
926 op = bundle2.processbundle(pushop.repo, reply, trgetter)
926 op = bundle2.processbundle(pushop.repo, reply, trgetter)
927 except error.BundleValueError as exc:
927 except error.BundleValueError as exc:
928 raise error.Abort(_('missing support for %s') % exc)
928 raise error.Abort(_('missing support for %s') % exc)
929 except bundle2.AbortFromPart as exc:
929 except bundle2.AbortFromPart as exc:
930 pushop.ui.status(_('remote: %s\n') % exc)
930 pushop.ui.status(_('remote: %s\n') % exc)
931 if exc.hint is not None:
931 if exc.hint is not None:
932 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
932 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
933 raise error.Abort(_('push failed on remote'))
933 raise error.Abort(_('push failed on remote'))
934 except error.PushkeyFailed as exc:
934 except error.PushkeyFailed as exc:
935 partid = int(exc.partid)
935 partid = int(exc.partid)
936 if partid not in pushop.pkfailcb:
936 if partid not in pushop.pkfailcb:
937 raise
937 raise
938 pushop.pkfailcb[partid](pushop, exc)
938 pushop.pkfailcb[partid](pushop, exc)
939 for rephand in replyhandlers:
939 for rephand in replyhandlers:
940 rephand(op)
940 rephand(op)
941
941
942 def _pushchangeset(pushop):
942 def _pushchangeset(pushop):
943 """Make the actual push of changeset bundle to remote repo"""
943 """Make the actual push of changeset bundle to remote repo"""
944 if 'changesets' in pushop.stepsdone:
944 if 'changesets' in pushop.stepsdone:
945 return
945 return
946 pushop.stepsdone.add('changesets')
946 pushop.stepsdone.add('changesets')
947 if not _pushcheckoutgoing(pushop):
947 if not _pushcheckoutgoing(pushop):
948 return
948 return
949 pushop.repo.prepushoutgoinghooks(pushop)
949 pushop.repo.prepushoutgoinghooks(pushop)
950 outgoing = pushop.outgoing
950 outgoing = pushop.outgoing
951 unbundle = pushop.remote.capable('unbundle')
951 unbundle = pushop.remote.capable('unbundle')
952 # TODO: get bundlecaps from remote
952 # TODO: get bundlecaps from remote
953 bundlecaps = None
953 bundlecaps = None
954 # create a changegroup from local
954 # create a changegroup from local
955 if pushop.revs is None and not (outgoing.excluded
955 if pushop.revs is None and not (outgoing.excluded
956 or pushop.repo.changelog.filteredrevs):
956 or pushop.repo.changelog.filteredrevs):
957 # push everything,
957 # push everything,
958 # use the fast path, no race possible on push
958 # use the fast path, no race possible on push
959 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
959 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
960 cg = changegroup.getsubset(pushop.repo,
960 cg = changegroup.getsubset(pushop.repo,
961 outgoing,
961 outgoing,
962 bundler,
962 bundler,
963 'push',
963 'push',
964 fastpath=True)
964 fastpath=True)
965 else:
965 else:
966 cg = changegroup.getchangegroup(pushop.repo, 'push', outgoing,
966 cg = changegroup.getchangegroup(pushop.repo, 'push', outgoing,
967 bundlecaps=bundlecaps)
967 bundlecaps=bundlecaps)
968
968
969 # apply changegroup to remote
969 # apply changegroup to remote
970 if unbundle:
970 if unbundle:
971 # local repo finds heads on server, finds out what
971 # local repo finds heads on server, finds out what
972 # revs it must push. once revs transferred, if server
972 # revs it must push. once revs transferred, if server
973 # finds it has different heads (someone else won
973 # finds it has different heads (someone else won
974 # commit/push race), server aborts.
974 # commit/push race), server aborts.
975 if pushop.force:
975 if pushop.force:
976 remoteheads = ['force']
976 remoteheads = ['force']
977 else:
977 else:
978 remoteheads = pushop.remoteheads
978 remoteheads = pushop.remoteheads
979 # ssh: return remote's addchangegroup()
979 # ssh: return remote's addchangegroup()
980 # http: return remote's addchangegroup() or 0 for error
980 # http: return remote's addchangegroup() or 0 for error
981 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
981 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
982 pushop.repo.url())
982 pushop.repo.url())
983 else:
983 else:
984 # we return an integer indicating remote head count
984 # we return an integer indicating remote head count
985 # change
985 # change
986 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
986 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
987 pushop.repo.url())
987 pushop.repo.url())
988
988
989 def _pushsyncphase(pushop):
989 def _pushsyncphase(pushop):
990 """synchronise phase information locally and remotely"""
990 """synchronise phase information locally and remotely"""
991 cheads = pushop.commonheads
991 cheads = pushop.commonheads
992 # even when we don't push, exchanging phase data is useful
992 # even when we don't push, exchanging phase data is useful
993 remotephases = pushop.remote.listkeys('phases')
993 remotephases = pushop.remote.listkeys('phases')
994 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
994 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
995 and remotephases # server supports phases
995 and remotephases # server supports phases
996 and pushop.cgresult is None # nothing was pushed
996 and pushop.cgresult is None # nothing was pushed
997 and remotephases.get('publishing', False)):
997 and remotephases.get('publishing', False)):
998 # When:
998 # When:
999 # - this is a subrepo push
999 # - this is a subrepo push
1000 # - and remote support phase
1000 # - and remote support phase
1001 # - and no changeset was pushed
1001 # - and no changeset was pushed
1002 # - and remote is publishing
1002 # - and remote is publishing
1003 # We may be in issue 3871 case!
1003 # We may be in issue 3871 case!
1004 # We drop the possible phase synchronisation done by
1004 # We drop the possible phase synchronisation done by
1005 # courtesy to publish changesets possibly locally draft
1005 # courtesy to publish changesets possibly locally draft
1006 # on the remote.
1006 # on the remote.
1007 remotephases = {'publishing': 'True'}
1007 remotephases = {'publishing': 'True'}
1008 if not remotephases: # old server or public only reply from non-publishing
1008 if not remotephases: # old server or public only reply from non-publishing
1009 _localphasemove(pushop, cheads)
1009 _localphasemove(pushop, cheads)
1010 # don't push any phase data as there is nothing to push
1010 # don't push any phase data as there is nothing to push
1011 else:
1011 else:
1012 ana = phases.analyzeremotephases(pushop.repo, cheads,
1012 ana = phases.analyzeremotephases(pushop.repo, cheads,
1013 remotephases)
1013 remotephases)
1014 pheads, droots = ana
1014 pheads, droots = ana
1015 ### Apply remote phase on local
1015 ### Apply remote phase on local
1016 if remotephases.get('publishing', False):
1016 if remotephases.get('publishing', False):
1017 _localphasemove(pushop, cheads)
1017 _localphasemove(pushop, cheads)
1018 else: # publish = False
1018 else: # publish = False
1019 _localphasemove(pushop, pheads)
1019 _localphasemove(pushop, pheads)
1020 _localphasemove(pushop, cheads, phases.draft)
1020 _localphasemove(pushop, cheads, phases.draft)
1021 ### Apply local phase on remote
1021 ### Apply local phase on remote
1022
1022
1023 if pushop.cgresult:
1023 if pushop.cgresult:
1024 if 'phases' in pushop.stepsdone:
1024 if 'phases' in pushop.stepsdone:
1025 # phases already pushed though bundle2
1025 # phases already pushed though bundle2
1026 return
1026 return
1027 outdated = pushop.outdatedphases
1027 outdated = pushop.outdatedphases
1028 else:
1028 else:
1029 outdated = pushop.fallbackoutdatedphases
1029 outdated = pushop.fallbackoutdatedphases
1030
1030
1031 pushop.stepsdone.add('phases')
1031 pushop.stepsdone.add('phases')
1032
1032
1033 # filter heads already turned public by the push
1033 # filter heads already turned public by the push
1034 outdated = [c for c in outdated if c.node() not in pheads]
1034 outdated = [c for c in outdated if c.node() not in pheads]
1035 # fallback to independent pushkey command
1035 # fallback to independent pushkey command
1036 for newremotehead in outdated:
1036 for newremotehead in outdated:
1037 r = pushop.remote.pushkey('phases',
1037 r = pushop.remote.pushkey('phases',
1038 newremotehead.hex(),
1038 newremotehead.hex(),
1039 str(phases.draft),
1039 str(phases.draft),
1040 str(phases.public))
1040 str(phases.public))
1041 if not r:
1041 if not r:
1042 pushop.ui.warn(_('updating %s to public failed!\n')
1042 pushop.ui.warn(_('updating %s to public failed!\n')
1043 % newremotehead)
1043 % newremotehead)
1044
1044
1045 def _localphasemove(pushop, nodes, phase=phases.public):
1045 def _localphasemove(pushop, nodes, phase=phases.public):
1046 """move <nodes> to <phase> in the local source repo"""
1046 """move <nodes> to <phase> in the local source repo"""
1047 if pushop.trmanager:
1047 if pushop.trmanager:
1048 phases.advanceboundary(pushop.repo,
1048 phases.advanceboundary(pushop.repo,
1049 pushop.trmanager.transaction(),
1049 pushop.trmanager.transaction(),
1050 phase,
1050 phase,
1051 nodes)
1051 nodes)
1052 else:
1052 else:
1053 # repo is not locked, do not change any phases!
1053 # repo is not locked, do not change any phases!
1054 # Informs the user that phases should have been moved when
1054 # Informs the user that phases should have been moved when
1055 # applicable.
1055 # applicable.
1056 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1056 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1057 phasestr = phases.phasenames[phase]
1057 phasestr = phases.phasenames[phase]
1058 if actualmoves:
1058 if actualmoves:
1059 pushop.ui.status(_('cannot lock source repo, skipping '
1059 pushop.ui.status(_('cannot lock source repo, skipping '
1060 'local %s phase update\n') % phasestr)
1060 'local %s phase update\n') % phasestr)
1061
1061
1062 def _pushobsolete(pushop):
1062 def _pushobsolete(pushop):
1063 """utility function to push obsolete markers to a remote"""
1063 """utility function to push obsolete markers to a remote"""
1064 if 'obsmarkers' in pushop.stepsdone:
1064 if 'obsmarkers' in pushop.stepsdone:
1065 return
1065 return
1066 repo = pushop.repo
1066 repo = pushop.repo
1067 remote = pushop.remote
1067 remote = pushop.remote
1068 pushop.stepsdone.add('obsmarkers')
1068 pushop.stepsdone.add('obsmarkers')
1069 if pushop.outobsmarkers:
1069 if pushop.outobsmarkers:
1070 pushop.ui.debug('try to push obsolete markers to remote\n')
1070 pushop.ui.debug('try to push obsolete markers to remote\n')
1071 rslts = []
1071 rslts = []
1072 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1072 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1073 for key in sorted(remotedata, reverse=True):
1073 for key in sorted(remotedata, reverse=True):
1074 # reverse sort to ensure we end with dump0
1074 # reverse sort to ensure we end with dump0
1075 data = remotedata[key]
1075 data = remotedata[key]
1076 rslts.append(remote.pushkey('obsolete', key, '', data))
1076 rslts.append(remote.pushkey('obsolete', key, '', data))
1077 if [r for r in rslts if not r]:
1077 if [r for r in rslts if not r]:
1078 msg = _('failed to push some obsolete markers!\n')
1078 msg = _('failed to push some obsolete markers!\n')
1079 repo.ui.warn(msg)
1079 repo.ui.warn(msg)
1080
1080
1081 def _pushbookmark(pushop):
1081 def _pushbookmark(pushop):
1082 """Update bookmark position on remote"""
1082 """Update bookmark position on remote"""
1083 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1083 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1084 return
1084 return
1085 pushop.stepsdone.add('bookmarks')
1085 pushop.stepsdone.add('bookmarks')
1086 ui = pushop.ui
1086 ui = pushop.ui
1087 remote = pushop.remote
1087 remote = pushop.remote
1088
1088
1089 for b, old, new in pushop.outbookmarks:
1089 for b, old, new in pushop.outbookmarks:
1090 action = 'update'
1090 action = 'update'
1091 if not old:
1091 if not old:
1092 action = 'export'
1092 action = 'export'
1093 elif not new:
1093 elif not new:
1094 action = 'delete'
1094 action = 'delete'
1095 if remote.pushkey('bookmarks', b, old, new):
1095 if remote.pushkey('bookmarks', b, old, new):
1096 ui.status(bookmsgmap[action][0] % b)
1096 ui.status(bookmsgmap[action][0] % b)
1097 else:
1097 else:
1098 ui.warn(bookmsgmap[action][1] % b)
1098 ui.warn(bookmsgmap[action][1] % b)
1099 # discovery can have set the value form invalid entry
1099 # discovery can have set the value form invalid entry
1100 if pushop.bkresult is not None:
1100 if pushop.bkresult is not None:
1101 pushop.bkresult = 1
1101 pushop.bkresult = 1
1102
1102
1103 class pulloperation(object):
1103 class pulloperation(object):
1104 """A object that represent a single pull operation
1104 """A object that represent a single pull operation
1105
1105
1106 It purpose is to carry pull related state and very common operation.
1106 It purpose is to carry pull related state and very common operation.
1107
1107
1108 A new should be created at the beginning of each pull and discarded
1108 A new should be created at the beginning of each pull and discarded
1109 afterward.
1109 afterward.
1110 """
1110 """
1111
1111
1112 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1112 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1113 remotebookmarks=None, streamclonerequested=None):
1113 remotebookmarks=None, streamclonerequested=None):
1114 # repo we pull into
1114 # repo we pull into
1115 self.repo = repo
1115 self.repo = repo
1116 # repo we pull from
1116 # repo we pull from
1117 self.remote = remote
1117 self.remote = remote
1118 # revision we try to pull (None is "all")
1118 # revision we try to pull (None is "all")
1119 self.heads = heads
1119 self.heads = heads
1120 # bookmark pulled explicitly
1120 # bookmark pulled explicitly
1121 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1121 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1122 for bookmark in bookmarks]
1122 for bookmark in bookmarks]
1123 # do we force pull?
1123 # do we force pull?
1124 self.force = force
1124 self.force = force
1125 # whether a streaming clone was requested
1125 # whether a streaming clone was requested
1126 self.streamclonerequested = streamclonerequested
1126 self.streamclonerequested = streamclonerequested
1127 # transaction manager
1127 # transaction manager
1128 self.trmanager = None
1128 self.trmanager = None
1129 # set of common changeset between local and remote before pull
1129 # set of common changeset between local and remote before pull
1130 self.common = None
1130 self.common = None
1131 # set of pulled head
1131 # set of pulled head
1132 self.rheads = None
1132 self.rheads = None
1133 # list of missing changeset to fetch remotely
1133 # list of missing changeset to fetch remotely
1134 self.fetch = None
1134 self.fetch = None
1135 # remote bookmarks data
1135 # remote bookmarks data
1136 self.remotebookmarks = remotebookmarks
1136 self.remotebookmarks = remotebookmarks
1137 # result of changegroup pulling (used as return code by pull)
1137 # result of changegroup pulling (used as return code by pull)
1138 self.cgresult = None
1138 self.cgresult = None
1139 # list of step already done
1139 # list of step already done
1140 self.stepsdone = set()
1140 self.stepsdone = set()
1141 # Whether we attempted a clone from pre-generated bundles.
1141 # Whether we attempted a clone from pre-generated bundles.
1142 self.clonebundleattempted = False
1142 self.clonebundleattempted = False
1143
1143
1144 @util.propertycache
1144 @util.propertycache
1145 def pulledsubset(self):
1145 def pulledsubset(self):
1146 """heads of the set of changeset target by the pull"""
1146 """heads of the set of changeset target by the pull"""
1147 # compute target subset
1147 # compute target subset
1148 if self.heads is None:
1148 if self.heads is None:
1149 # We pulled every thing possible
1149 # We pulled every thing possible
1150 # sync on everything common
1150 # sync on everything common
1151 c = set(self.common)
1151 c = set(self.common)
1152 ret = list(self.common)
1152 ret = list(self.common)
1153 for n in self.rheads:
1153 for n in self.rheads:
1154 if n not in c:
1154 if n not in c:
1155 ret.append(n)
1155 ret.append(n)
1156 return ret
1156 return ret
1157 else:
1157 else:
1158 # We pulled a specific subset
1158 # We pulled a specific subset
1159 # sync on this subset
1159 # sync on this subset
1160 return self.heads
1160 return self.heads
1161
1161
1162 @util.propertycache
1162 @util.propertycache
1163 def canusebundle2(self):
1163 def canusebundle2(self):
1164 return not _forcebundle1(self)
1164 return not _forcebundle1(self)
1165
1165
1166 @util.propertycache
1166 @util.propertycache
1167 def remotebundle2caps(self):
1167 def remotebundle2caps(self):
1168 return bundle2.bundle2caps(self.remote)
1168 return bundle2.bundle2caps(self.remote)
1169
1169
1170 def gettransaction(self):
1170 def gettransaction(self):
1171 # deprecated; talk to trmanager directly
1171 # deprecated; talk to trmanager directly
1172 return self.trmanager.transaction()
1172 return self.trmanager.transaction()
1173
1173
1174 class transactionmanager(object):
1174 class transactionmanager(object):
1175 """An object to manage the life cycle of a transaction
1175 """An object to manage the life cycle of a transaction
1176
1176
1177 It creates the transaction on demand and calls the appropriate hooks when
1177 It creates the transaction on demand and calls the appropriate hooks when
1178 closing the transaction."""
1178 closing the transaction."""
1179 def __init__(self, repo, source, url):
1179 def __init__(self, repo, source, url):
1180 self.repo = repo
1180 self.repo = repo
1181 self.source = source
1181 self.source = source
1182 self.url = url
1182 self.url = url
1183 self._tr = None
1183 self._tr = None
1184
1184
1185 def transaction(self):
1185 def transaction(self):
1186 """Return an open transaction object, constructing if necessary"""
1186 """Return an open transaction object, constructing if necessary"""
1187 if not self._tr:
1187 if not self._tr:
1188 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1188 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1189 self._tr = self.repo.transaction(trname)
1189 self._tr = self.repo.transaction(trname)
1190 self._tr.hookargs['source'] = self.source
1190 self._tr.hookargs['source'] = self.source
1191 self._tr.hookargs['url'] = self.url
1191 self._tr.hookargs['url'] = self.url
1192 return self._tr
1192 return self._tr
1193
1193
1194 def close(self):
1194 def close(self):
1195 """close transaction if created"""
1195 """close transaction if created"""
1196 if self._tr is not None:
1196 if self._tr is not None:
1197 self._tr.close()
1197 self._tr.close()
1198
1198
1199 def release(self):
1199 def release(self):
1200 """release transaction if created"""
1200 """release transaction if created"""
1201 if self._tr is not None:
1201 if self._tr is not None:
1202 self._tr.release()
1202 self._tr.release()
1203
1203
1204 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1204 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1205 streamclonerequested=None):
1205 streamclonerequested=None):
1206 """Fetch repository data from a remote.
1206 """Fetch repository data from a remote.
1207
1207
1208 This is the main function used to retrieve data from a remote repository.
1208 This is the main function used to retrieve data from a remote repository.
1209
1209
1210 ``repo`` is the local repository to clone into.
1210 ``repo`` is the local repository to clone into.
1211 ``remote`` is a peer instance.
1211 ``remote`` is a peer instance.
1212 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1212 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1213 default) means to pull everything from the remote.
1213 default) means to pull everything from the remote.
1214 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1214 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1215 default, all remote bookmarks are pulled.
1215 default, all remote bookmarks are pulled.
1216 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1216 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1217 initialization.
1217 initialization.
1218 ``streamclonerequested`` is a boolean indicating whether a "streaming
1218 ``streamclonerequested`` is a boolean indicating whether a "streaming
1219 clone" is requested. A "streaming clone" is essentially a raw file copy
1219 clone" is requested. A "streaming clone" is essentially a raw file copy
1220 of revlogs from the server. This only works when the local repository is
1220 of revlogs from the server. This only works when the local repository is
1221 empty. The default value of ``None`` means to respect the server
1221 empty. The default value of ``None`` means to respect the server
1222 configuration for preferring stream clones.
1222 configuration for preferring stream clones.
1223
1223
1224 Returns the ``pulloperation`` created for this pull.
1224 Returns the ``pulloperation`` created for this pull.
1225 """
1225 """
1226 if opargs is None:
1226 if opargs is None:
1227 opargs = {}
1227 opargs = {}
1228 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1228 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1229 streamclonerequested=streamclonerequested, **opargs)
1229 streamclonerequested=streamclonerequested, **opargs)
1230 if pullop.remote.local():
1230 if pullop.remote.local():
1231 missing = set(pullop.remote.requirements) - pullop.repo.supported
1231 missing = set(pullop.remote.requirements) - pullop.repo.supported
1232 if missing:
1232 if missing:
1233 msg = _("required features are not"
1233 msg = _("required features are not"
1234 " supported in the destination:"
1234 " supported in the destination:"
1235 " %s") % (', '.join(sorted(missing)))
1235 " %s") % (', '.join(sorted(missing)))
1236 raise error.Abort(msg)
1236 raise error.Abort(msg)
1237
1237
1238 wlock = lock = None
1238 wlock = lock = None
1239 try:
1239 try:
1240 wlock = pullop.repo.wlock()
1240 wlock = pullop.repo.wlock()
1241 lock = pullop.repo.lock()
1241 lock = pullop.repo.lock()
1242 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1242 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1243 streamclone.maybeperformlegacystreamclone(pullop)
1243 streamclone.maybeperformlegacystreamclone(pullop)
1244 # This should ideally be in _pullbundle2(). However, it needs to run
1244 # This should ideally be in _pullbundle2(). However, it needs to run
1245 # before discovery to avoid extra work.
1245 # before discovery to avoid extra work.
1246 _maybeapplyclonebundle(pullop)
1246 _maybeapplyclonebundle(pullop)
1247 _pulldiscovery(pullop)
1247 _pulldiscovery(pullop)
1248 if pullop.canusebundle2:
1248 if pullop.canusebundle2:
1249 _pullbundle2(pullop)
1249 _pullbundle2(pullop)
1250 _pullchangeset(pullop)
1250 _pullchangeset(pullop)
1251 _pullphase(pullop)
1251 _pullphase(pullop)
1252 _pullbookmarks(pullop)
1252 _pullbookmarks(pullop)
1253 _pullobsolete(pullop)
1253 _pullobsolete(pullop)
1254 pullop.trmanager.close()
1254 pullop.trmanager.close()
1255 finally:
1255 finally:
1256 lockmod.release(pullop.trmanager, lock, wlock)
1256 lockmod.release(pullop.trmanager, lock, wlock)
1257
1257
1258 return pullop
1258 return pullop
1259
1259
1260 # list of steps to perform discovery before pull
1260 # list of steps to perform discovery before pull
1261 pulldiscoveryorder = []
1261 pulldiscoveryorder = []
1262
1262
1263 # Mapping between step name and function
1263 # Mapping between step name and function
1264 #
1264 #
1265 # This exists to help extensions wrap steps if necessary
1265 # This exists to help extensions wrap steps if necessary
1266 pulldiscoverymapping = {}
1266 pulldiscoverymapping = {}
1267
1267
1268 def pulldiscovery(stepname):
1268 def pulldiscovery(stepname):
1269 """decorator for function performing discovery before pull
1269 """decorator for function performing discovery before pull
1270
1270
1271 The function is added to the step -> function mapping and appended to the
1271 The function is added to the step -> function mapping and appended to the
1272 list of steps. Beware that decorated function will be added in order (this
1272 list of steps. Beware that decorated function will be added in order (this
1273 may matter).
1273 may matter).
1274
1274
1275 You can only use this decorator for a new step, if you want to wrap a step
1275 You can only use this decorator for a new step, if you want to wrap a step
1276 from an extension, change the pulldiscovery dictionary directly."""
1276 from an extension, change the pulldiscovery dictionary directly."""
1277 def dec(func):
1277 def dec(func):
1278 assert stepname not in pulldiscoverymapping
1278 assert stepname not in pulldiscoverymapping
1279 pulldiscoverymapping[stepname] = func
1279 pulldiscoverymapping[stepname] = func
1280 pulldiscoveryorder.append(stepname)
1280 pulldiscoveryorder.append(stepname)
1281 return func
1281 return func
1282 return dec
1282 return dec
1283
1283
1284 def _pulldiscovery(pullop):
1284 def _pulldiscovery(pullop):
1285 """Run all discovery steps"""
1285 """Run all discovery steps"""
1286 for stepname in pulldiscoveryorder:
1286 for stepname in pulldiscoveryorder:
1287 step = pulldiscoverymapping[stepname]
1287 step = pulldiscoverymapping[stepname]
1288 step(pullop)
1288 step(pullop)
1289
1289
1290 @pulldiscovery('b1:bookmarks')
1290 @pulldiscovery('b1:bookmarks')
1291 def _pullbookmarkbundle1(pullop):
1291 def _pullbookmarkbundle1(pullop):
1292 """fetch bookmark data in bundle1 case
1292 """fetch bookmark data in bundle1 case
1293
1293
1294 If not using bundle2, we have to fetch bookmarks before changeset
1294 If not using bundle2, we have to fetch bookmarks before changeset
1295 discovery to reduce the chance and impact of race conditions."""
1295 discovery to reduce the chance and impact of race conditions."""
1296 if pullop.remotebookmarks is not None:
1296 if pullop.remotebookmarks is not None:
1297 return
1297 return
1298 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1298 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1299 # all known bundle2 servers now support listkeys, but lets be nice with
1299 # all known bundle2 servers now support listkeys, but lets be nice with
1300 # new implementation.
1300 # new implementation.
1301 return
1301 return
1302 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1302 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1303
1303
1304
1304
1305 @pulldiscovery('changegroup')
1305 @pulldiscovery('changegroup')
1306 def _pulldiscoverychangegroup(pullop):
1306 def _pulldiscoverychangegroup(pullop):
1307 """discovery phase for the pull
1307 """discovery phase for the pull
1308
1308
1309 Current handle changeset discovery only, will change handle all discovery
1309 Current handle changeset discovery only, will change handle all discovery
1310 at some point."""
1310 at some point."""
1311 tmp = discovery.findcommonincoming(pullop.repo,
1311 tmp = discovery.findcommonincoming(pullop.repo,
1312 pullop.remote,
1312 pullop.remote,
1313 heads=pullop.heads,
1313 heads=pullop.heads,
1314 force=pullop.force)
1314 force=pullop.force)
1315 common, fetch, rheads = tmp
1315 common, fetch, rheads = tmp
1316 nm = pullop.repo.unfiltered().changelog.nodemap
1316 nm = pullop.repo.unfiltered().changelog.nodemap
1317 if fetch and rheads:
1317 if fetch and rheads:
1318 # If a remote heads in filtered locally, lets drop it from the unknown
1318 # If a remote heads in filtered locally, lets drop it from the unknown
1319 # remote heads and put in back in common.
1319 # remote heads and put in back in common.
1320 #
1320 #
1321 # This is a hackish solution to catch most of "common but locally
1321 # This is a hackish solution to catch most of "common but locally
1322 # hidden situation". We do not performs discovery on unfiltered
1322 # hidden situation". We do not performs discovery on unfiltered
1323 # repository because it end up doing a pathological amount of round
1323 # repository because it end up doing a pathological amount of round
1324 # trip for w huge amount of changeset we do not care about.
1324 # trip for w huge amount of changeset we do not care about.
1325 #
1325 #
1326 # If a set of such "common but filtered" changeset exist on the server
1326 # If a set of such "common but filtered" changeset exist on the server
1327 # but are not including a remote heads, we'll not be able to detect it,
1327 # but are not including a remote heads, we'll not be able to detect it,
1328 scommon = set(common)
1328 scommon = set(common)
1329 filteredrheads = []
1329 filteredrheads = []
1330 for n in rheads:
1330 for n in rheads:
1331 if n in nm:
1331 if n in nm:
1332 if n not in scommon:
1332 if n not in scommon:
1333 common.append(n)
1333 common.append(n)
1334 else:
1334 else:
1335 filteredrheads.append(n)
1335 filteredrheads.append(n)
1336 if not filteredrheads:
1336 if not filteredrheads:
1337 fetch = []
1337 fetch = []
1338 rheads = filteredrheads
1338 rheads = filteredrheads
1339 pullop.common = common
1339 pullop.common = common
1340 pullop.fetch = fetch
1340 pullop.fetch = fetch
1341 pullop.rheads = rheads
1341 pullop.rheads = rheads
1342
1342
1343 def _pullbundle2(pullop):
1343 def _pullbundle2(pullop):
1344 """pull data using bundle2
1344 """pull data using bundle2
1345
1345
1346 For now, the only supported data are changegroup."""
1346 For now, the only supported data are changegroup."""
1347 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1347 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1348
1348
1349 # At the moment we don't do stream clones over bundle2. If that is
1349 # At the moment we don't do stream clones over bundle2. If that is
1350 # implemented then here's where the check for that will go.
1350 # implemented then here's where the check for that will go.
1351 streaming = False
1351 streaming = False
1352
1352
1353 # pulling changegroup
1353 # pulling changegroup
1354 pullop.stepsdone.add('changegroup')
1354 pullop.stepsdone.add('changegroup')
1355
1355
1356 kwargs['common'] = pullop.common
1356 kwargs['common'] = pullop.common
1357 kwargs['heads'] = pullop.heads or pullop.rheads
1357 kwargs['heads'] = pullop.heads or pullop.rheads
1358 kwargs['cg'] = pullop.fetch
1358 kwargs['cg'] = pullop.fetch
1359 if 'listkeys' in pullop.remotebundle2caps:
1359 if 'listkeys' in pullop.remotebundle2caps:
1360 kwargs['listkeys'] = ['phases']
1360 kwargs['listkeys'] = ['phases']
1361 if pullop.remotebookmarks is None:
1361 if pullop.remotebookmarks is None:
1362 # make sure to always includes bookmark data when migrating
1362 # make sure to always includes bookmark data when migrating
1363 # `hg incoming --bundle` to using this function.
1363 # `hg incoming --bundle` to using this function.
1364 kwargs['listkeys'].append('bookmarks')
1364 kwargs['listkeys'].append('bookmarks')
1365
1365
1366 # If this is a full pull / clone and the server supports the clone bundles
1366 # If this is a full pull / clone and the server supports the clone bundles
1367 # feature, tell the server whether we attempted a clone bundle. The
1367 # feature, tell the server whether we attempted a clone bundle. The
1368 # presence of this flag indicates the client supports clone bundles. This
1368 # presence of this flag indicates the client supports clone bundles. This
1369 # will enable the server to treat clients that support clone bundles
1369 # will enable the server to treat clients that support clone bundles
1370 # differently from those that don't.
1370 # differently from those that don't.
1371 if (pullop.remote.capable('clonebundles')
1371 if (pullop.remote.capable('clonebundles')
1372 and pullop.heads is None and list(pullop.common) == [nullid]):
1372 and pullop.heads is None and list(pullop.common) == [nullid]):
1373 kwargs['cbattempted'] = pullop.clonebundleattempted
1373 kwargs['cbattempted'] = pullop.clonebundleattempted
1374
1374
1375 if streaming:
1375 if streaming:
1376 pullop.repo.ui.status(_('streaming all changes\n'))
1376 pullop.repo.ui.status(_('streaming all changes\n'))
1377 elif not pullop.fetch:
1377 elif not pullop.fetch:
1378 pullop.repo.ui.status(_("no changes found\n"))
1378 pullop.repo.ui.status(_("no changes found\n"))
1379 pullop.cgresult = 0
1379 pullop.cgresult = 0
1380 else:
1380 else:
1381 if pullop.heads is None and list(pullop.common) == [nullid]:
1381 if pullop.heads is None and list(pullop.common) == [nullid]:
1382 pullop.repo.ui.status(_("requesting all changes\n"))
1382 pullop.repo.ui.status(_("requesting all changes\n"))
1383 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1383 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1384 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1384 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1385 if obsolete.commonversion(remoteversions) is not None:
1385 if obsolete.commonversion(remoteversions) is not None:
1386 kwargs['obsmarkers'] = True
1386 kwargs['obsmarkers'] = True
1387 pullop.stepsdone.add('obsmarkers')
1387 pullop.stepsdone.add('obsmarkers')
1388 _pullbundle2extraprepare(pullop, kwargs)
1388 _pullbundle2extraprepare(pullop, kwargs)
1389 bundle = pullop.remote.getbundle('pull', **kwargs)
1389 bundle = pullop.remote.getbundle('pull', **kwargs)
1390 try:
1390 try:
1391 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1391 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1392 except bundle2.AbortFromPart as exc:
1392 except bundle2.AbortFromPart as exc:
1393 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1393 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1394 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1394 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1395 except error.BundleValueError as exc:
1395 except error.BundleValueError as exc:
1396 raise error.Abort(_('missing support for %s') % exc)
1396 raise error.Abort(_('missing support for %s') % exc)
1397
1397
1398 if pullop.fetch:
1398 if pullop.fetch:
1399 results = [cg['return'] for cg in op.records['changegroup']]
1399 results = [cg['return'] for cg in op.records['changegroup']]
1400 pullop.cgresult = changegroup.combineresults(results)
1400 pullop.cgresult = changegroup.combineresults(results)
1401
1401
1402 # processing phases change
1402 # processing phases change
1403 for namespace, value in op.records['listkeys']:
1403 for namespace, value in op.records['listkeys']:
1404 if namespace == 'phases':
1404 if namespace == 'phases':
1405 _pullapplyphases(pullop, value)
1405 _pullapplyphases(pullop, value)
1406
1406
1407 # processing bookmark update
1407 # processing bookmark update
1408 for namespace, value in op.records['listkeys']:
1408 for namespace, value in op.records['listkeys']:
1409 if namespace == 'bookmarks':
1409 if namespace == 'bookmarks':
1410 pullop.remotebookmarks = value
1410 pullop.remotebookmarks = value
1411
1411
1412 # bookmark data were either already there or pulled in the bundle
1412 # bookmark data were either already there or pulled in the bundle
1413 if pullop.remotebookmarks is not None:
1413 if pullop.remotebookmarks is not None:
1414 _pullbookmarks(pullop)
1414 _pullbookmarks(pullop)
1415
1415
1416 def _pullbundle2extraprepare(pullop, kwargs):
1416 def _pullbundle2extraprepare(pullop, kwargs):
1417 """hook function so that extensions can extend the getbundle call"""
1417 """hook function so that extensions can extend the getbundle call"""
1418 pass
1418 pass
1419
1419
1420 def _pullchangeset(pullop):
1420 def _pullchangeset(pullop):
1421 """pull changeset from unbundle into the local repo"""
1421 """pull changeset from unbundle into the local repo"""
1422 # We delay the open of the transaction as late as possible so we
1422 # We delay the open of the transaction as late as possible so we
1423 # don't open transaction for nothing or you break future useful
1423 # don't open transaction for nothing or you break future useful
1424 # rollback call
1424 # rollback call
1425 if 'changegroup' in pullop.stepsdone:
1425 if 'changegroup' in pullop.stepsdone:
1426 return
1426 return
1427 pullop.stepsdone.add('changegroup')
1427 pullop.stepsdone.add('changegroup')
1428 if not pullop.fetch:
1428 if not pullop.fetch:
1429 pullop.repo.ui.status(_("no changes found\n"))
1429 pullop.repo.ui.status(_("no changes found\n"))
1430 pullop.cgresult = 0
1430 pullop.cgresult = 0
1431 return
1431 return
1432 pullop.gettransaction()
1432 pullop.gettransaction()
1433 if pullop.heads is None and list(pullop.common) == [nullid]:
1433 if pullop.heads is None and list(pullop.common) == [nullid]:
1434 pullop.repo.ui.status(_("requesting all changes\n"))
1434 pullop.repo.ui.status(_("requesting all changes\n"))
1435 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1435 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1436 # issue1320, avoid a race if remote changed after discovery
1436 # issue1320, avoid a race if remote changed after discovery
1437 pullop.heads = pullop.rheads
1437 pullop.heads = pullop.rheads
1438
1438
1439 if pullop.remote.capable('getbundle'):
1439 if pullop.remote.capable('getbundle'):
1440 # TODO: get bundlecaps from remote
1440 # TODO: get bundlecaps from remote
1441 cg = pullop.remote.getbundle('pull', common=pullop.common,
1441 cg = pullop.remote.getbundle('pull', common=pullop.common,
1442 heads=pullop.heads or pullop.rheads)
1442 heads=pullop.heads or pullop.rheads)
1443 elif pullop.heads is None:
1443 elif pullop.heads is None:
1444 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1444 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1445 elif not pullop.remote.capable('changegroupsubset'):
1445 elif not pullop.remote.capable('changegroupsubset'):
1446 raise error.Abort(_("partial pull cannot be done because "
1446 raise error.Abort(_("partial pull cannot be done because "
1447 "other repository doesn't support "
1447 "other repository doesn't support "
1448 "changegroupsubset."))
1448 "changegroupsubset."))
1449 else:
1449 else:
1450 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1450 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1451 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1451 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1452
1452
1453 def _pullphase(pullop):
1453 def _pullphase(pullop):
1454 # Get remote phases data from remote
1454 # Get remote phases data from remote
1455 if 'phases' in pullop.stepsdone:
1455 if 'phases' in pullop.stepsdone:
1456 return
1456 return
1457 remotephases = pullop.remote.listkeys('phases')
1457 remotephases = pullop.remote.listkeys('phases')
1458 _pullapplyphases(pullop, remotephases)
1458 _pullapplyphases(pullop, remotephases)
1459
1459
1460 def _pullapplyphases(pullop, remotephases):
1460 def _pullapplyphases(pullop, remotephases):
1461 """apply phase movement from observed remote state"""
1461 """apply phase movement from observed remote state"""
1462 if 'phases' in pullop.stepsdone:
1462 if 'phases' in pullop.stepsdone:
1463 return
1463 return
1464 pullop.stepsdone.add('phases')
1464 pullop.stepsdone.add('phases')
1465 publishing = bool(remotephases.get('publishing', False))
1465 publishing = bool(remotephases.get('publishing', False))
1466 if remotephases and not publishing:
1466 if remotephases and not publishing:
1467 # remote is new and non-publishing
1467 # remote is new and non-publishing
1468 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1468 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1469 pullop.pulledsubset,
1469 pullop.pulledsubset,
1470 remotephases)
1470 remotephases)
1471 dheads = pullop.pulledsubset
1471 dheads = pullop.pulledsubset
1472 else:
1472 else:
1473 # Remote is old or publishing all common changesets
1473 # Remote is old or publishing all common changesets
1474 # should be seen as public
1474 # should be seen as public
1475 pheads = pullop.pulledsubset
1475 pheads = pullop.pulledsubset
1476 dheads = []
1476 dheads = []
1477 unfi = pullop.repo.unfiltered()
1477 unfi = pullop.repo.unfiltered()
1478 phase = unfi._phasecache.phase
1478 phase = unfi._phasecache.phase
1479 rev = unfi.changelog.nodemap.get
1479 rev = unfi.changelog.nodemap.get
1480 public = phases.public
1480 public = phases.public
1481 draft = phases.draft
1481 draft = phases.draft
1482
1482
1483 # exclude changesets already public locally and update the others
1483 # exclude changesets already public locally and update the others
1484 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1484 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1485 if pheads:
1485 if pheads:
1486 tr = pullop.gettransaction()
1486 tr = pullop.gettransaction()
1487 phases.advanceboundary(pullop.repo, tr, public, pheads)
1487 phases.advanceboundary(pullop.repo, tr, public, pheads)
1488
1488
1489 # exclude changesets already draft locally and update the others
1489 # exclude changesets already draft locally and update the others
1490 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1490 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1491 if dheads:
1491 if dheads:
1492 tr = pullop.gettransaction()
1492 tr = pullop.gettransaction()
1493 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1493 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1494
1494
1495 def _pullbookmarks(pullop):
1495 def _pullbookmarks(pullop):
1496 """process the remote bookmark information to update the local one"""
1496 """process the remote bookmark information to update the local one"""
1497 if 'bookmarks' in pullop.stepsdone:
1497 if 'bookmarks' in pullop.stepsdone:
1498 return
1498 return
1499 pullop.stepsdone.add('bookmarks')
1499 pullop.stepsdone.add('bookmarks')
1500 repo = pullop.repo
1500 repo = pullop.repo
1501 remotebookmarks = pullop.remotebookmarks
1501 remotebookmarks = pullop.remotebookmarks
1502 remotebookmarks = bookmod.unhexlifybookmarks(remotebookmarks)
1502 remotebookmarks = bookmod.unhexlifybookmarks(remotebookmarks)
1503 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1503 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1504 pullop.remote.url(),
1504 pullop.remote.url(),
1505 pullop.gettransaction,
1505 pullop.gettransaction,
1506 explicit=pullop.explicitbookmarks)
1506 explicit=pullop.explicitbookmarks)
1507
1507
1508 def _pullobsolete(pullop):
1508 def _pullobsolete(pullop):
1509 """utility function to pull obsolete markers from a remote
1509 """utility function to pull obsolete markers from a remote
1510
1510
1511 The `gettransaction` is function that return the pull transaction, creating
1511 The `gettransaction` is function that return the pull transaction, creating
1512 one if necessary. We return the transaction to inform the calling code that
1512 one if necessary. We return the transaction to inform the calling code that
1513 a new transaction have been created (when applicable).
1513 a new transaction have been created (when applicable).
1514
1514
1515 Exists mostly to allow overriding for experimentation purpose"""
1515 Exists mostly to allow overriding for experimentation purpose"""
1516 if 'obsmarkers' in pullop.stepsdone:
1516 if 'obsmarkers' in pullop.stepsdone:
1517 return
1517 return
1518 pullop.stepsdone.add('obsmarkers')
1518 pullop.stepsdone.add('obsmarkers')
1519 tr = None
1519 tr = None
1520 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1520 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1521 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1521 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1522 remoteobs = pullop.remote.listkeys('obsolete')
1522 remoteobs = pullop.remote.listkeys('obsolete')
1523 if 'dump0' in remoteobs:
1523 if 'dump0' in remoteobs:
1524 tr = pullop.gettransaction()
1524 tr = pullop.gettransaction()
1525 markers = []
1525 markers = []
1526 for key in sorted(remoteobs, reverse=True):
1526 for key in sorted(remoteobs, reverse=True):
1527 if key.startswith('dump'):
1527 if key.startswith('dump'):
1528 data = util.b85decode(remoteobs[key])
1528 data = util.b85decode(remoteobs[key])
1529 version, newmarks = obsolete._readmarkers(data)
1529 version, newmarks = obsolete._readmarkers(data)
1530 markers += newmarks
1530 markers += newmarks
1531 if markers:
1531 if markers:
1532 pullop.repo.obsstore.add(tr, markers)
1532 pullop.repo.obsstore.add(tr, markers)
1533 pullop.repo.invalidatevolatilesets()
1533 pullop.repo.invalidatevolatilesets()
1534 return tr
1534 return tr
1535
1535
1536 def caps20to10(repo):
1536 def caps20to10(repo):
1537 """return a set with appropriate options to use bundle20 during getbundle"""
1537 """return a set with appropriate options to use bundle20 during getbundle"""
1538 caps = {'HG20'}
1538 caps = {'HG20'}
1539 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1539 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1540 caps.add('bundle2=' + urlreq.quote(capsblob))
1540 caps.add('bundle2=' + urlreq.quote(capsblob))
1541 return caps
1541 return caps
1542
1542
1543 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1543 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1544 getbundle2partsorder = []
1544 getbundle2partsorder = []
1545
1545
1546 # Mapping between step name and function
1546 # Mapping between step name and function
1547 #
1547 #
1548 # This exists to help extensions wrap steps if necessary
1548 # This exists to help extensions wrap steps if necessary
1549 getbundle2partsmapping = {}
1549 getbundle2partsmapping = {}
1550
1550
1551 def getbundle2partsgenerator(stepname, idx=None):
1551 def getbundle2partsgenerator(stepname, idx=None):
1552 """decorator for function generating bundle2 part for getbundle
1552 """decorator for function generating bundle2 part for getbundle
1553
1553
1554 The function is added to the step -> function mapping and appended to the
1554 The function is added to the step -> function mapping and appended to the
1555 list of steps. Beware that decorated functions will be added in order
1555 list of steps. Beware that decorated functions will be added in order
1556 (this may matter).
1556 (this may matter).
1557
1557
1558 You can only use this decorator for new steps, if you want to wrap a step
1558 You can only use this decorator for new steps, if you want to wrap a step
1559 from an extension, attack the getbundle2partsmapping dictionary directly."""
1559 from an extension, attack the getbundle2partsmapping dictionary directly."""
1560 def dec(func):
1560 def dec(func):
1561 assert stepname not in getbundle2partsmapping
1561 assert stepname not in getbundle2partsmapping
1562 getbundle2partsmapping[stepname] = func
1562 getbundle2partsmapping[stepname] = func
1563 if idx is None:
1563 if idx is None:
1564 getbundle2partsorder.append(stepname)
1564 getbundle2partsorder.append(stepname)
1565 else:
1565 else:
1566 getbundle2partsorder.insert(idx, stepname)
1566 getbundle2partsorder.insert(idx, stepname)
1567 return func
1567 return func
1568 return dec
1568 return dec
1569
1569
1570 def bundle2requested(bundlecaps):
1570 def bundle2requested(bundlecaps):
1571 if bundlecaps is not None:
1571 if bundlecaps is not None:
1572 return any(cap.startswith('HG2') for cap in bundlecaps)
1572 return any(cap.startswith('HG2') for cap in bundlecaps)
1573 return False
1573 return False
1574
1574
1575 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1575 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1576 **kwargs):
1576 **kwargs):
1577 """Return chunks constituting a bundle's raw data.
1577 """Return chunks constituting a bundle's raw data.
1578
1578
1579 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1579 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1580 passed.
1580 passed.
1581
1581
1582 Returns an iterator over raw chunks (of varying sizes).
1582 Returns an iterator over raw chunks (of varying sizes).
1583 """
1583 """
1584 usebundle2 = bundle2requested(bundlecaps)
1584 usebundle2 = bundle2requested(bundlecaps)
1585 # bundle10 case
1585 # bundle10 case
1586 if not usebundle2:
1586 if not usebundle2:
1587 if bundlecaps and not kwargs.get('cg', True):
1587 if bundlecaps and not kwargs.get('cg', True):
1588 raise ValueError(_('request for bundle10 must include changegroup'))
1588 raise ValueError(_('request for bundle10 must include changegroup'))
1589
1589
1590 if kwargs:
1590 if kwargs:
1591 raise ValueError(_('unsupported getbundle arguments: %s')
1591 raise ValueError(_('unsupported getbundle arguments: %s')
1592 % ', '.join(sorted(kwargs.keys())))
1592 % ', '.join(sorted(kwargs.keys())))
1593 outgoing = _computeoutgoing(repo, heads, common)
1593 outgoing = _computeoutgoing(repo, heads, common)
1594 bundler = changegroup.getbundler('01', repo, bundlecaps)
1594 bundler = changegroup.getbundler('01', repo, bundlecaps)
1595 return changegroup.getsubsetraw(repo, outgoing, bundler, source)
1595 return changegroup.getsubsetraw(repo, outgoing, bundler, source)
1596
1596
1597 # bundle20 case
1597 # bundle20 case
1598 b2caps = {}
1598 b2caps = {}
1599 for bcaps in bundlecaps:
1599 for bcaps in bundlecaps:
1600 if bcaps.startswith('bundle2='):
1600 if bcaps.startswith('bundle2='):
1601 blob = urlreq.unquote(bcaps[len('bundle2='):])
1601 blob = urlreq.unquote(bcaps[len('bundle2='):])
1602 b2caps.update(bundle2.decodecaps(blob))
1602 b2caps.update(bundle2.decodecaps(blob))
1603 bundler = bundle2.bundle20(repo.ui, b2caps)
1603 bundler = bundle2.bundle20(repo.ui, b2caps)
1604
1604
1605 kwargs['heads'] = heads
1605 kwargs['heads'] = heads
1606 kwargs['common'] = common
1606 kwargs['common'] = common
1607
1607
1608 for name in getbundle2partsorder:
1608 for name in getbundle2partsorder:
1609 func = getbundle2partsmapping[name]
1609 func = getbundle2partsmapping[name]
1610 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1610 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1611 **kwargs)
1611 **kwargs)
1612
1612
1613 return bundler.getchunks()
1613 return bundler.getchunks()
1614
1614
1615 @getbundle2partsgenerator('changegroup')
1615 @getbundle2partsgenerator('changegroup')
1616 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1616 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1617 b2caps=None, heads=None, common=None, **kwargs):
1617 b2caps=None, heads=None, common=None, **kwargs):
1618 """add a changegroup part to the requested bundle"""
1618 """add a changegroup part to the requested bundle"""
1619 cg = None
1619 cg = None
1620 if kwargs.get('cg', True):
1620 if kwargs.get('cg', True):
1621 # build changegroup bundle here.
1621 # build changegroup bundle here.
1622 version = '01'
1622 version = '01'
1623 cgversions = b2caps.get('changegroup')
1623 cgversions = b2caps.get('changegroup')
1624 if cgversions: # 3.1 and 3.2 ship with an empty value
1624 if cgversions: # 3.1 and 3.2 ship with an empty value
1625 cgversions = [v for v in cgversions
1625 cgversions = [v for v in cgversions
1626 if v in changegroup.supportedoutgoingversions(repo)]
1626 if v in changegroup.supportedoutgoingversions(repo)]
1627 if not cgversions:
1627 if not cgversions:
1628 raise ValueError(_('no common changegroup version'))
1628 raise ValueError(_('no common changegroup version'))
1629 version = max(cgversions)
1629 version = max(cgversions)
1630 outgoing = _computeoutgoing(repo, heads, common)
1630 outgoing = _computeoutgoing(repo, heads, common)
1631 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1631 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1632 bundlecaps=bundlecaps,
1632 bundlecaps=bundlecaps,
1633 version=version)
1633 version=version)
1634
1634
1635 if cg:
1635 if cg:
1636 part = bundler.newpart('changegroup', data=cg)
1636 part = bundler.newpart('changegroup', data=cg)
1637 if cgversions:
1637 if cgversions:
1638 part.addparam('version', version)
1638 part.addparam('version', version)
1639 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1639 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1640 if 'treemanifest' in repo.requirements:
1640 if 'treemanifest' in repo.requirements:
1641 part.addparam('treemanifest', '1')
1641 part.addparam('treemanifest', '1')
1642
1642
1643 @getbundle2partsgenerator('listkeys')
1643 @getbundle2partsgenerator('listkeys')
1644 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1644 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1645 b2caps=None, **kwargs):
1645 b2caps=None, **kwargs):
1646 """add parts containing listkeys namespaces to the requested bundle"""
1646 """add parts containing listkeys namespaces to the requested bundle"""
1647 listkeys = kwargs.get('listkeys', ())
1647 listkeys = kwargs.get('listkeys', ())
1648 for namespace in listkeys:
1648 for namespace in listkeys:
1649 part = bundler.newpart('listkeys')
1649 part = bundler.newpart('listkeys')
1650 part.addparam('namespace', namespace)
1650 part.addparam('namespace', namespace)
1651 keys = repo.listkeys(namespace).items()
1651 keys = repo.listkeys(namespace).items()
1652 part.data = pushkey.encodekeys(keys)
1652 part.data = pushkey.encodekeys(keys)
1653
1653
1654 @getbundle2partsgenerator('obsmarkers')
1654 @getbundle2partsgenerator('obsmarkers')
1655 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1655 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1656 b2caps=None, heads=None, **kwargs):
1656 b2caps=None, heads=None, **kwargs):
1657 """add an obsolescence markers part to the requested bundle"""
1657 """add an obsolescence markers part to the requested bundle"""
1658 if kwargs.get('obsmarkers', False):
1658 if kwargs.get('obsmarkers', False):
1659 if heads is None:
1659 if heads is None:
1660 heads = repo.heads()
1660 heads = repo.heads()
1661 subset = [c.node() for c in repo.set('::%ln', heads)]
1661 subset = [c.node() for c in repo.set('::%ln', heads)]
1662 markers = repo.obsstore.relevantmarkers(subset)
1662 markers = repo.obsstore.relevantmarkers(subset)
1663 markers = sorted(markers)
1663 markers = sorted(markers)
1664 bundle2.buildobsmarkerspart(bundler, markers)
1664 bundle2.buildobsmarkerspart(bundler, markers)
1665
1665
1666 @getbundle2partsgenerator('hgtagsfnodes')
1666 @getbundle2partsgenerator('hgtagsfnodes')
1667 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1667 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1668 b2caps=None, heads=None, common=None,
1668 b2caps=None, heads=None, common=None,
1669 **kwargs):
1669 **kwargs):
1670 """Transfer the .hgtags filenodes mapping.
1670 """Transfer the .hgtags filenodes mapping.
1671
1671
1672 Only values for heads in this bundle will be transferred.
1672 Only values for heads in this bundle will be transferred.
1673
1673
1674 The part data consists of pairs of 20 byte changeset node and .hgtags
1674 The part data consists of pairs of 20 byte changeset node and .hgtags
1675 filenodes raw values.
1675 filenodes raw values.
1676 """
1676 """
1677 # Don't send unless:
1677 # Don't send unless:
1678 # - changeset are being exchanged,
1678 # - changeset are being exchanged,
1679 # - the client supports it.
1679 # - the client supports it.
1680 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1680 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1681 return
1681 return
1682
1682
1683 outgoing = _computeoutgoing(repo, heads, common)
1683 outgoing = _computeoutgoing(repo, heads, common)
1684 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1684 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1685
1685
1686 def _getbookmarks(repo, **kwargs):
1686 def _getbookmarks(repo, **kwargs):
1687 """Returns bookmark to node mapping.
1687 """Returns bookmark to node mapping.
1688
1688
1689 This function is primarily used to generate `bookmarks` bundle2 part.
1689 This function is primarily used to generate `bookmarks` bundle2 part.
1690 It is a separate function in order to make it easy to wrap it
1690 It is a separate function in order to make it easy to wrap it
1691 in extensions. Passing `kwargs` to the function makes it easy to
1691 in extensions. Passing `kwargs` to the function makes it easy to
1692 add new parameters in extensions.
1692 add new parameters in extensions.
1693 """
1693 """
1694
1694
1695 return dict(bookmod.listbinbookmarks(repo))
1695 return dict(bookmod.listbinbookmarks(repo))
1696
1696
1697 def check_heads(repo, their_heads, context):
1697 def check_heads(repo, their_heads, context):
1698 """check if the heads of a repo have been modified
1698 """check if the heads of a repo have been modified
1699
1699
1700 Used by peer for unbundling.
1700 Used by peer for unbundling.
1701 """
1701 """
1702 heads = repo.heads()
1702 heads = repo.heads()
1703 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1703 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1704 if not (their_heads == ['force'] or their_heads == heads or
1704 if not (their_heads == ['force'] or their_heads == heads or
1705 their_heads == ['hashed', heads_hash]):
1705 their_heads == ['hashed', heads_hash]):
1706 # someone else committed/pushed/unbundled while we
1706 # someone else committed/pushed/unbundled while we
1707 # were transferring data
1707 # were transferring data
1708 raise error.PushRaced('repository changed while %s - '
1708 raise error.PushRaced('repository changed while %s - '
1709 'please try again' % context)
1709 'please try again' % context)
1710
1710
1711 def unbundle(repo, cg, heads, source, url):
1711 def unbundle(repo, cg, heads, source, url):
1712 """Apply a bundle to a repo.
1712 """Apply a bundle to a repo.
1713
1713
1714 this function makes sure the repo is locked during the application and have
1714 this function makes sure the repo is locked during the application and have
1715 mechanism to check that no push race occurred between the creation of the
1715 mechanism to check that no push race occurred between the creation of the
1716 bundle and its application.
1716 bundle and its application.
1717
1717
1718 If the push was raced as PushRaced exception is raised."""
1718 If the push was raced as PushRaced exception is raised."""
1719 r = 0
1719 r = 0
1720 # need a transaction when processing a bundle2 stream
1720 # need a transaction when processing a bundle2 stream
1721 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1721 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1722 lockandtr = [None, None, None]
1722 lockandtr = [None, None, None]
1723 recordout = None
1723 recordout = None
1724 # quick fix for output mismatch with bundle2 in 3.4
1724 # quick fix for output mismatch with bundle2 in 3.4
1725 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1725 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1726 False)
1726 False)
1727 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1727 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1728 captureoutput = True
1728 captureoutput = True
1729 try:
1729 try:
1730 # note: outside bundle1, 'heads' is expected to be empty and this
1730 # note: outside bundle1, 'heads' is expected to be empty and this
1731 # 'check_heads' call wil be a no-op
1731 # 'check_heads' call wil be a no-op
1732 check_heads(repo, heads, 'uploading changes')
1732 check_heads(repo, heads, 'uploading changes')
1733 # push can proceed
1733 # push can proceed
1734 if not util.safehasattr(cg, 'params'):
1734 if not isinstance(cg, bundle2.unbundle20):
1735 # legacy case: bundle1 (changegroup 01)
1735 # legacy case: bundle1 (changegroup 01)
1736 with repo.lock():
1736 with repo.lock():
1737 r = cg.apply(repo, source, url)
1737 r = cg.apply(repo, source, url)
1738 else:
1738 else:
1739 r = None
1739 r = None
1740 try:
1740 try:
1741 def gettransaction():
1741 def gettransaction():
1742 if not lockandtr[2]:
1742 if not lockandtr[2]:
1743 lockandtr[0] = repo.wlock()
1743 lockandtr[0] = repo.wlock()
1744 lockandtr[1] = repo.lock()
1744 lockandtr[1] = repo.lock()
1745 lockandtr[2] = repo.transaction(source)
1745 lockandtr[2] = repo.transaction(source)
1746 lockandtr[2].hookargs['source'] = source
1746 lockandtr[2].hookargs['source'] = source
1747 lockandtr[2].hookargs['url'] = url
1747 lockandtr[2].hookargs['url'] = url
1748 lockandtr[2].hookargs['bundle2'] = '1'
1748 lockandtr[2].hookargs['bundle2'] = '1'
1749 return lockandtr[2]
1749 return lockandtr[2]
1750
1750
1751 # Do greedy locking by default until we're satisfied with lazy
1751 # Do greedy locking by default until we're satisfied with lazy
1752 # locking.
1752 # locking.
1753 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1753 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1754 gettransaction()
1754 gettransaction()
1755
1755
1756 op = bundle2.bundleoperation(repo, gettransaction,
1756 op = bundle2.bundleoperation(repo, gettransaction,
1757 captureoutput=captureoutput)
1757 captureoutput=captureoutput)
1758 try:
1758 try:
1759 op = bundle2.processbundle(repo, cg, op=op)
1759 op = bundle2.processbundle(repo, cg, op=op)
1760 finally:
1760 finally:
1761 r = op.reply
1761 r = op.reply
1762 if captureoutput and r is not None:
1762 if captureoutput and r is not None:
1763 repo.ui.pushbuffer(error=True, subproc=True)
1763 repo.ui.pushbuffer(error=True, subproc=True)
1764 def recordout(output):
1764 def recordout(output):
1765 r.newpart('output', data=output, mandatory=False)
1765 r.newpart('output', data=output, mandatory=False)
1766 if lockandtr[2] is not None:
1766 if lockandtr[2] is not None:
1767 lockandtr[2].close()
1767 lockandtr[2].close()
1768 except BaseException as exc:
1768 except BaseException as exc:
1769 exc.duringunbundle2 = True
1769 exc.duringunbundle2 = True
1770 if captureoutput and r is not None:
1770 if captureoutput and r is not None:
1771 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1771 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1772 def recordout(output):
1772 def recordout(output):
1773 part = bundle2.bundlepart('output', data=output,
1773 part = bundle2.bundlepart('output', data=output,
1774 mandatory=False)
1774 mandatory=False)
1775 parts.append(part)
1775 parts.append(part)
1776 raise
1776 raise
1777 finally:
1777 finally:
1778 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1778 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1779 if recordout is not None:
1779 if recordout is not None:
1780 recordout(repo.ui.popbuffer())
1780 recordout(repo.ui.popbuffer())
1781 return r
1781 return r
1782
1782
1783 def _maybeapplyclonebundle(pullop):
1783 def _maybeapplyclonebundle(pullop):
1784 """Apply a clone bundle from a remote, if possible."""
1784 """Apply a clone bundle from a remote, if possible."""
1785
1785
1786 repo = pullop.repo
1786 repo = pullop.repo
1787 remote = pullop.remote
1787 remote = pullop.remote
1788
1788
1789 if not repo.ui.configbool('ui', 'clonebundles', True):
1789 if not repo.ui.configbool('ui', 'clonebundles', True):
1790 return
1790 return
1791
1791
1792 # Only run if local repo is empty.
1792 # Only run if local repo is empty.
1793 if len(repo):
1793 if len(repo):
1794 return
1794 return
1795
1795
1796 if pullop.heads:
1796 if pullop.heads:
1797 return
1797 return
1798
1798
1799 if not remote.capable('clonebundles'):
1799 if not remote.capable('clonebundles'):
1800 return
1800 return
1801
1801
1802 res = remote._call('clonebundles')
1802 res = remote._call('clonebundles')
1803
1803
1804 # If we call the wire protocol command, that's good enough to record the
1804 # If we call the wire protocol command, that's good enough to record the
1805 # attempt.
1805 # attempt.
1806 pullop.clonebundleattempted = True
1806 pullop.clonebundleattempted = True
1807
1807
1808 entries = parseclonebundlesmanifest(repo, res)
1808 entries = parseclonebundlesmanifest(repo, res)
1809 if not entries:
1809 if not entries:
1810 repo.ui.note(_('no clone bundles available on remote; '
1810 repo.ui.note(_('no clone bundles available on remote; '
1811 'falling back to regular clone\n'))
1811 'falling back to regular clone\n'))
1812 return
1812 return
1813
1813
1814 entries = filterclonebundleentries(repo, entries)
1814 entries = filterclonebundleentries(repo, entries)
1815 if not entries:
1815 if not entries:
1816 # There is a thundering herd concern here. However, if a server
1816 # There is a thundering herd concern here. However, if a server
1817 # operator doesn't advertise bundles appropriate for its clients,
1817 # operator doesn't advertise bundles appropriate for its clients,
1818 # they deserve what's coming. Furthermore, from a client's
1818 # they deserve what's coming. Furthermore, from a client's
1819 # perspective, no automatic fallback would mean not being able to
1819 # perspective, no automatic fallback would mean not being able to
1820 # clone!
1820 # clone!
1821 repo.ui.warn(_('no compatible clone bundles available on server; '
1821 repo.ui.warn(_('no compatible clone bundles available on server; '
1822 'falling back to regular clone\n'))
1822 'falling back to regular clone\n'))
1823 repo.ui.warn(_('(you may want to report this to the server '
1823 repo.ui.warn(_('(you may want to report this to the server '
1824 'operator)\n'))
1824 'operator)\n'))
1825 return
1825 return
1826
1826
1827 entries = sortclonebundleentries(repo.ui, entries)
1827 entries = sortclonebundleentries(repo.ui, entries)
1828
1828
1829 url = entries[0]['URL']
1829 url = entries[0]['URL']
1830 repo.ui.status(_('applying clone bundle from %s\n') % url)
1830 repo.ui.status(_('applying clone bundle from %s\n') % url)
1831 if trypullbundlefromurl(repo.ui, repo, url):
1831 if trypullbundlefromurl(repo.ui, repo, url):
1832 repo.ui.status(_('finished applying clone bundle\n'))
1832 repo.ui.status(_('finished applying clone bundle\n'))
1833 # Bundle failed.
1833 # Bundle failed.
1834 #
1834 #
1835 # We abort by default to avoid the thundering herd of
1835 # We abort by default to avoid the thundering herd of
1836 # clients flooding a server that was expecting expensive
1836 # clients flooding a server that was expecting expensive
1837 # clone load to be offloaded.
1837 # clone load to be offloaded.
1838 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1838 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1839 repo.ui.warn(_('falling back to normal clone\n'))
1839 repo.ui.warn(_('falling back to normal clone\n'))
1840 else:
1840 else:
1841 raise error.Abort(_('error applying bundle'),
1841 raise error.Abort(_('error applying bundle'),
1842 hint=_('if this error persists, consider contacting '
1842 hint=_('if this error persists, consider contacting '
1843 'the server operator or disable clone '
1843 'the server operator or disable clone '
1844 'bundles via '
1844 'bundles via '
1845 '"--config ui.clonebundles=false"'))
1845 '"--config ui.clonebundles=false"'))
1846
1846
1847 def parseclonebundlesmanifest(repo, s):
1847 def parseclonebundlesmanifest(repo, s):
1848 """Parses the raw text of a clone bundles manifest.
1848 """Parses the raw text of a clone bundles manifest.
1849
1849
1850 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1850 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1851 to the URL and other keys are the attributes for the entry.
1851 to the URL and other keys are the attributes for the entry.
1852 """
1852 """
1853 m = []
1853 m = []
1854 for line in s.splitlines():
1854 for line in s.splitlines():
1855 fields = line.split()
1855 fields = line.split()
1856 if not fields:
1856 if not fields:
1857 continue
1857 continue
1858 attrs = {'URL': fields[0]}
1858 attrs = {'URL': fields[0]}
1859 for rawattr in fields[1:]:
1859 for rawattr in fields[1:]:
1860 key, value = rawattr.split('=', 1)
1860 key, value = rawattr.split('=', 1)
1861 key = urlreq.unquote(key)
1861 key = urlreq.unquote(key)
1862 value = urlreq.unquote(value)
1862 value = urlreq.unquote(value)
1863 attrs[key] = value
1863 attrs[key] = value
1864
1864
1865 # Parse BUNDLESPEC into components. This makes client-side
1865 # Parse BUNDLESPEC into components. This makes client-side
1866 # preferences easier to specify since you can prefer a single
1866 # preferences easier to specify since you can prefer a single
1867 # component of the BUNDLESPEC.
1867 # component of the BUNDLESPEC.
1868 if key == 'BUNDLESPEC':
1868 if key == 'BUNDLESPEC':
1869 try:
1869 try:
1870 comp, version, params = parsebundlespec(repo, value,
1870 comp, version, params = parsebundlespec(repo, value,
1871 externalnames=True)
1871 externalnames=True)
1872 attrs['COMPRESSION'] = comp
1872 attrs['COMPRESSION'] = comp
1873 attrs['VERSION'] = version
1873 attrs['VERSION'] = version
1874 except error.InvalidBundleSpecification:
1874 except error.InvalidBundleSpecification:
1875 pass
1875 pass
1876 except error.UnsupportedBundleSpecification:
1876 except error.UnsupportedBundleSpecification:
1877 pass
1877 pass
1878
1878
1879 m.append(attrs)
1879 m.append(attrs)
1880
1880
1881 return m
1881 return m
1882
1882
1883 def filterclonebundleentries(repo, entries):
1883 def filterclonebundleentries(repo, entries):
1884 """Remove incompatible clone bundle manifest entries.
1884 """Remove incompatible clone bundle manifest entries.
1885
1885
1886 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1886 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1887 and returns a new list consisting of only the entries that this client
1887 and returns a new list consisting of only the entries that this client
1888 should be able to apply.
1888 should be able to apply.
1889
1889
1890 There is no guarantee we'll be able to apply all returned entries because
1890 There is no guarantee we'll be able to apply all returned entries because
1891 the metadata we use to filter on may be missing or wrong.
1891 the metadata we use to filter on may be missing or wrong.
1892 """
1892 """
1893 newentries = []
1893 newentries = []
1894 for entry in entries:
1894 for entry in entries:
1895 spec = entry.get('BUNDLESPEC')
1895 spec = entry.get('BUNDLESPEC')
1896 if spec:
1896 if spec:
1897 try:
1897 try:
1898 parsebundlespec(repo, spec, strict=True)
1898 parsebundlespec(repo, spec, strict=True)
1899 except error.InvalidBundleSpecification as e:
1899 except error.InvalidBundleSpecification as e:
1900 repo.ui.debug(str(e) + '\n')
1900 repo.ui.debug(str(e) + '\n')
1901 continue
1901 continue
1902 except error.UnsupportedBundleSpecification as e:
1902 except error.UnsupportedBundleSpecification as e:
1903 repo.ui.debug('filtering %s because unsupported bundle '
1903 repo.ui.debug('filtering %s because unsupported bundle '
1904 'spec: %s\n' % (entry['URL'], str(e)))
1904 'spec: %s\n' % (entry['URL'], str(e)))
1905 continue
1905 continue
1906
1906
1907 if 'REQUIRESNI' in entry and not sslutil.hassni:
1907 if 'REQUIRESNI' in entry and not sslutil.hassni:
1908 repo.ui.debug('filtering %s because SNI not supported\n' %
1908 repo.ui.debug('filtering %s because SNI not supported\n' %
1909 entry['URL'])
1909 entry['URL'])
1910 continue
1910 continue
1911
1911
1912 newentries.append(entry)
1912 newentries.append(entry)
1913
1913
1914 return newentries
1914 return newentries
1915
1915
1916 class clonebundleentry(object):
1916 class clonebundleentry(object):
1917 """Represents an item in a clone bundles manifest.
1917 """Represents an item in a clone bundles manifest.
1918
1918
1919 This rich class is needed to support sorting since sorted() in Python 3
1919 This rich class is needed to support sorting since sorted() in Python 3
1920 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
1920 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
1921 won't work.
1921 won't work.
1922 """
1922 """
1923
1923
1924 def __init__(self, value, prefers):
1924 def __init__(self, value, prefers):
1925 self.value = value
1925 self.value = value
1926 self.prefers = prefers
1926 self.prefers = prefers
1927
1927
1928 def _cmp(self, other):
1928 def _cmp(self, other):
1929 for prefkey, prefvalue in self.prefers:
1929 for prefkey, prefvalue in self.prefers:
1930 avalue = self.value.get(prefkey)
1930 avalue = self.value.get(prefkey)
1931 bvalue = other.value.get(prefkey)
1931 bvalue = other.value.get(prefkey)
1932
1932
1933 # Special case for b missing attribute and a matches exactly.
1933 # Special case for b missing attribute and a matches exactly.
1934 if avalue is not None and bvalue is None and avalue == prefvalue:
1934 if avalue is not None and bvalue is None and avalue == prefvalue:
1935 return -1
1935 return -1
1936
1936
1937 # Special case for a missing attribute and b matches exactly.
1937 # Special case for a missing attribute and b matches exactly.
1938 if bvalue is not None and avalue is None and bvalue == prefvalue:
1938 if bvalue is not None and avalue is None and bvalue == prefvalue:
1939 return 1
1939 return 1
1940
1940
1941 # We can't compare unless attribute present on both.
1941 # We can't compare unless attribute present on both.
1942 if avalue is None or bvalue is None:
1942 if avalue is None or bvalue is None:
1943 continue
1943 continue
1944
1944
1945 # Same values should fall back to next attribute.
1945 # Same values should fall back to next attribute.
1946 if avalue == bvalue:
1946 if avalue == bvalue:
1947 continue
1947 continue
1948
1948
1949 # Exact matches come first.
1949 # Exact matches come first.
1950 if avalue == prefvalue:
1950 if avalue == prefvalue:
1951 return -1
1951 return -1
1952 if bvalue == prefvalue:
1952 if bvalue == prefvalue:
1953 return 1
1953 return 1
1954
1954
1955 # Fall back to next attribute.
1955 # Fall back to next attribute.
1956 continue
1956 continue
1957
1957
1958 # If we got here we couldn't sort by attributes and prefers. Fall
1958 # If we got here we couldn't sort by attributes and prefers. Fall
1959 # back to index order.
1959 # back to index order.
1960 return 0
1960 return 0
1961
1961
1962 def __lt__(self, other):
1962 def __lt__(self, other):
1963 return self._cmp(other) < 0
1963 return self._cmp(other) < 0
1964
1964
1965 def __gt__(self, other):
1965 def __gt__(self, other):
1966 return self._cmp(other) > 0
1966 return self._cmp(other) > 0
1967
1967
1968 def __eq__(self, other):
1968 def __eq__(self, other):
1969 return self._cmp(other) == 0
1969 return self._cmp(other) == 0
1970
1970
1971 def __le__(self, other):
1971 def __le__(self, other):
1972 return self._cmp(other) <= 0
1972 return self._cmp(other) <= 0
1973
1973
1974 def __ge__(self, other):
1974 def __ge__(self, other):
1975 return self._cmp(other) >= 0
1975 return self._cmp(other) >= 0
1976
1976
1977 def __ne__(self, other):
1977 def __ne__(self, other):
1978 return self._cmp(other) != 0
1978 return self._cmp(other) != 0
1979
1979
1980 def sortclonebundleentries(ui, entries):
1980 def sortclonebundleentries(ui, entries):
1981 prefers = ui.configlist('ui', 'clonebundleprefers', default=[])
1981 prefers = ui.configlist('ui', 'clonebundleprefers', default=[])
1982 if not prefers:
1982 if not prefers:
1983 return list(entries)
1983 return list(entries)
1984
1984
1985 prefers = [p.split('=', 1) for p in prefers]
1985 prefers = [p.split('=', 1) for p in prefers]
1986
1986
1987 items = sorted(clonebundleentry(v, prefers) for v in entries)
1987 items = sorted(clonebundleentry(v, prefers) for v in entries)
1988 return [i.value for i in items]
1988 return [i.value for i in items]
1989
1989
1990 def trypullbundlefromurl(ui, repo, url):
1990 def trypullbundlefromurl(ui, repo, url):
1991 """Attempt to apply a bundle from a URL."""
1991 """Attempt to apply a bundle from a URL."""
1992 with repo.lock(), repo.transaction('bundleurl') as tr:
1992 with repo.lock(), repo.transaction('bundleurl') as tr:
1993 try:
1993 try:
1994 fh = urlmod.open(ui, url)
1994 fh = urlmod.open(ui, url)
1995 cg = readbundle(ui, fh, 'stream')
1995 cg = readbundle(ui, fh, 'stream')
1996
1996
1997 if isinstance(cg, bundle2.unbundle20):
1997 if isinstance(cg, bundle2.unbundle20):
1998 bundle2.processbundle(repo, cg, lambda: tr)
1998 bundle2.processbundle(repo, cg, lambda: tr)
1999 elif isinstance(cg, streamclone.streamcloneapplier):
1999 elif isinstance(cg, streamclone.streamcloneapplier):
2000 cg.apply(repo)
2000 cg.apply(repo)
2001 else:
2001 else:
2002 cg.apply(repo, 'clonebundles', url)
2002 cg.apply(repo, 'clonebundles', url)
2003 return True
2003 return True
2004 except urlerr.httperror as e:
2004 except urlerr.httperror as e:
2005 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2005 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2006 except urlerr.urlerror as e:
2006 except urlerr.urlerror as e:
2007 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2007 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2008
2008
2009 return False
2009 return False
General Comments 0
You need to be logged in to leave comments. Login now