##// END OF EJS Templates
bundle2: don't check for whether we can do stream clones...
Siddharth Agarwal -
r32257:205bd393 default
parent child Browse files
Show More
@@ -1,1998 +1,2000 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 hex,
15 hex,
16 nullid,
16 nullid,
17 )
17 )
18 from . import (
18 from . import (
19 bookmarks as bookmod,
19 bookmarks as bookmod,
20 bundle2,
20 bundle2,
21 changegroup,
21 changegroup,
22 discovery,
22 discovery,
23 error,
23 error,
24 lock as lockmod,
24 lock as lockmod,
25 obsolete,
25 obsolete,
26 phases,
26 phases,
27 pushkey,
27 pushkey,
28 scmutil,
28 scmutil,
29 sslutil,
29 sslutil,
30 streamclone,
30 streamclone,
31 url as urlmod,
31 url as urlmod,
32 util,
32 util,
33 )
33 )
34
34
35 urlerr = util.urlerr
35 urlerr = util.urlerr
36 urlreq = util.urlreq
36 urlreq = util.urlreq
37
37
38 # Maps bundle version human names to changegroup versions.
38 # Maps bundle version human names to changegroup versions.
39 _bundlespeccgversions = {'v1': '01',
39 _bundlespeccgversions = {'v1': '01',
40 'v2': '02',
40 'v2': '02',
41 'packed1': 's1',
41 'packed1': 's1',
42 'bundle2': '02', #legacy
42 'bundle2': '02', #legacy
43 }
43 }
44
44
45 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
45 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
46 _bundlespecv1compengines = set(['gzip', 'bzip2', 'none'])
46 _bundlespecv1compengines = set(['gzip', 'bzip2', 'none'])
47
47
48 def parsebundlespec(repo, spec, strict=True, externalnames=False):
48 def parsebundlespec(repo, spec, strict=True, externalnames=False):
49 """Parse a bundle string specification into parts.
49 """Parse a bundle string specification into parts.
50
50
51 Bundle specifications denote a well-defined bundle/exchange format.
51 Bundle specifications denote a well-defined bundle/exchange format.
52 The content of a given specification should not change over time in
52 The content of a given specification should not change over time in
53 order to ensure that bundles produced by a newer version of Mercurial are
53 order to ensure that bundles produced by a newer version of Mercurial are
54 readable from an older version.
54 readable from an older version.
55
55
56 The string currently has the form:
56 The string currently has the form:
57
57
58 <compression>-<type>[;<parameter0>[;<parameter1>]]
58 <compression>-<type>[;<parameter0>[;<parameter1>]]
59
59
60 Where <compression> is one of the supported compression formats
60 Where <compression> is one of the supported compression formats
61 and <type> is (currently) a version string. A ";" can follow the type and
61 and <type> is (currently) a version string. A ";" can follow the type and
62 all text afterwards is interpreted as URI encoded, ";" delimited key=value
62 all text afterwards is interpreted as URI encoded, ";" delimited key=value
63 pairs.
63 pairs.
64
64
65 If ``strict`` is True (the default) <compression> is required. Otherwise,
65 If ``strict`` is True (the default) <compression> is required. Otherwise,
66 it is optional.
66 it is optional.
67
67
68 If ``externalnames`` is False (the default), the human-centric names will
68 If ``externalnames`` is False (the default), the human-centric names will
69 be converted to their internal representation.
69 be converted to their internal representation.
70
70
71 Returns a 3-tuple of (compression, version, parameters). Compression will
71 Returns a 3-tuple of (compression, version, parameters). Compression will
72 be ``None`` if not in strict mode and a compression isn't defined.
72 be ``None`` if not in strict mode and a compression isn't defined.
73
73
74 An ``InvalidBundleSpecification`` is raised when the specification is
74 An ``InvalidBundleSpecification`` is raised when the specification is
75 not syntactically well formed.
75 not syntactically well formed.
76
76
77 An ``UnsupportedBundleSpecification`` is raised when the compression or
77 An ``UnsupportedBundleSpecification`` is raised when the compression or
78 bundle type/version is not recognized.
78 bundle type/version is not recognized.
79
79
80 Note: this function will likely eventually return a more complex data
80 Note: this function will likely eventually return a more complex data
81 structure, including bundle2 part information.
81 structure, including bundle2 part information.
82 """
82 """
83 def parseparams(s):
83 def parseparams(s):
84 if ';' not in s:
84 if ';' not in s:
85 return s, {}
85 return s, {}
86
86
87 params = {}
87 params = {}
88 version, paramstr = s.split(';', 1)
88 version, paramstr = s.split(';', 1)
89
89
90 for p in paramstr.split(';'):
90 for p in paramstr.split(';'):
91 if '=' not in p:
91 if '=' not in p:
92 raise error.InvalidBundleSpecification(
92 raise error.InvalidBundleSpecification(
93 _('invalid bundle specification: '
93 _('invalid bundle specification: '
94 'missing "=" in parameter: %s') % p)
94 'missing "=" in parameter: %s') % p)
95
95
96 key, value = p.split('=', 1)
96 key, value = p.split('=', 1)
97 key = urlreq.unquote(key)
97 key = urlreq.unquote(key)
98 value = urlreq.unquote(value)
98 value = urlreq.unquote(value)
99 params[key] = value
99 params[key] = value
100
100
101 return version, params
101 return version, params
102
102
103
103
104 if strict and '-' not in spec:
104 if strict and '-' not in spec:
105 raise error.InvalidBundleSpecification(
105 raise error.InvalidBundleSpecification(
106 _('invalid bundle specification; '
106 _('invalid bundle specification; '
107 'must be prefixed with compression: %s') % spec)
107 'must be prefixed with compression: %s') % spec)
108
108
109 if '-' in spec:
109 if '-' in spec:
110 compression, version = spec.split('-', 1)
110 compression, version = spec.split('-', 1)
111
111
112 if compression not in util.compengines.supportedbundlenames:
112 if compression not in util.compengines.supportedbundlenames:
113 raise error.UnsupportedBundleSpecification(
113 raise error.UnsupportedBundleSpecification(
114 _('%s compression is not supported') % compression)
114 _('%s compression is not supported') % compression)
115
115
116 version, params = parseparams(version)
116 version, params = parseparams(version)
117
117
118 if version not in _bundlespeccgversions:
118 if version not in _bundlespeccgversions:
119 raise error.UnsupportedBundleSpecification(
119 raise error.UnsupportedBundleSpecification(
120 _('%s is not a recognized bundle version') % version)
120 _('%s is not a recognized bundle version') % version)
121 else:
121 else:
122 # Value could be just the compression or just the version, in which
122 # Value could be just the compression or just the version, in which
123 # case some defaults are assumed (but only when not in strict mode).
123 # case some defaults are assumed (but only when not in strict mode).
124 assert not strict
124 assert not strict
125
125
126 spec, params = parseparams(spec)
126 spec, params = parseparams(spec)
127
127
128 if spec in util.compengines.supportedbundlenames:
128 if spec in util.compengines.supportedbundlenames:
129 compression = spec
129 compression = spec
130 version = 'v1'
130 version = 'v1'
131 # Generaldelta repos require v2.
131 # Generaldelta repos require v2.
132 if 'generaldelta' in repo.requirements:
132 if 'generaldelta' in repo.requirements:
133 version = 'v2'
133 version = 'v2'
134 # Modern compression engines require v2.
134 # Modern compression engines require v2.
135 if compression not in _bundlespecv1compengines:
135 if compression not in _bundlespecv1compengines:
136 version = 'v2'
136 version = 'v2'
137 elif spec in _bundlespeccgversions:
137 elif spec in _bundlespeccgversions:
138 if spec == 'packed1':
138 if spec == 'packed1':
139 compression = 'none'
139 compression = 'none'
140 else:
140 else:
141 compression = 'bzip2'
141 compression = 'bzip2'
142 version = spec
142 version = spec
143 else:
143 else:
144 raise error.UnsupportedBundleSpecification(
144 raise error.UnsupportedBundleSpecification(
145 _('%s is not a recognized bundle specification') % spec)
145 _('%s is not a recognized bundle specification') % spec)
146
146
147 # Bundle version 1 only supports a known set of compression engines.
147 # Bundle version 1 only supports a known set of compression engines.
148 if version == 'v1' and compression not in _bundlespecv1compengines:
148 if version == 'v1' and compression not in _bundlespecv1compengines:
149 raise error.UnsupportedBundleSpecification(
149 raise error.UnsupportedBundleSpecification(
150 _('compression engine %s is not supported on v1 bundles') %
150 _('compression engine %s is not supported on v1 bundles') %
151 compression)
151 compression)
152
152
153 # The specification for packed1 can optionally declare the data formats
153 # The specification for packed1 can optionally declare the data formats
154 # required to apply it. If we see this metadata, compare against what the
154 # required to apply it. If we see this metadata, compare against what the
155 # repo supports and error if the bundle isn't compatible.
155 # repo supports and error if the bundle isn't compatible.
156 if version == 'packed1' and 'requirements' in params:
156 if version == 'packed1' and 'requirements' in params:
157 requirements = set(params['requirements'].split(','))
157 requirements = set(params['requirements'].split(','))
158 missingreqs = requirements - repo.supportedformats
158 missingreqs = requirements - repo.supportedformats
159 if missingreqs:
159 if missingreqs:
160 raise error.UnsupportedBundleSpecification(
160 raise error.UnsupportedBundleSpecification(
161 _('missing support for repository features: %s') %
161 _('missing support for repository features: %s') %
162 ', '.join(sorted(missingreqs)))
162 ', '.join(sorted(missingreqs)))
163
163
164 if not externalnames:
164 if not externalnames:
165 engine = util.compengines.forbundlename(compression)
165 engine = util.compengines.forbundlename(compression)
166 compression = engine.bundletype()[1]
166 compression = engine.bundletype()[1]
167 version = _bundlespeccgversions[version]
167 version = _bundlespeccgversions[version]
168 return compression, version, params
168 return compression, version, params
169
169
170 def readbundle(ui, fh, fname, vfs=None):
170 def readbundle(ui, fh, fname, vfs=None):
171 header = changegroup.readexactly(fh, 4)
171 header = changegroup.readexactly(fh, 4)
172
172
173 alg = None
173 alg = None
174 if not fname:
174 if not fname:
175 fname = "stream"
175 fname = "stream"
176 if not header.startswith('HG') and header.startswith('\0'):
176 if not header.startswith('HG') and header.startswith('\0'):
177 fh = changegroup.headerlessfixup(fh, header)
177 fh = changegroup.headerlessfixup(fh, header)
178 header = "HG10"
178 header = "HG10"
179 alg = 'UN'
179 alg = 'UN'
180 elif vfs:
180 elif vfs:
181 fname = vfs.join(fname)
181 fname = vfs.join(fname)
182
182
183 magic, version = header[0:2], header[2:4]
183 magic, version = header[0:2], header[2:4]
184
184
185 if magic != 'HG':
185 if magic != 'HG':
186 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
186 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
187 if version == '10':
187 if version == '10':
188 if alg is None:
188 if alg is None:
189 alg = changegroup.readexactly(fh, 2)
189 alg = changegroup.readexactly(fh, 2)
190 return changegroup.cg1unpacker(fh, alg)
190 return changegroup.cg1unpacker(fh, alg)
191 elif version.startswith('2'):
191 elif version.startswith('2'):
192 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
192 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
193 elif version == 'S1':
193 elif version == 'S1':
194 return streamclone.streamcloneapplier(fh)
194 return streamclone.streamcloneapplier(fh)
195 else:
195 else:
196 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
196 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
197
197
198 def getbundlespec(ui, fh):
198 def getbundlespec(ui, fh):
199 """Infer the bundlespec from a bundle file handle.
199 """Infer the bundlespec from a bundle file handle.
200
200
201 The input file handle is seeked and the original seek position is not
201 The input file handle is seeked and the original seek position is not
202 restored.
202 restored.
203 """
203 """
204 def speccompression(alg):
204 def speccompression(alg):
205 try:
205 try:
206 return util.compengines.forbundletype(alg).bundletype()[0]
206 return util.compengines.forbundletype(alg).bundletype()[0]
207 except KeyError:
207 except KeyError:
208 return None
208 return None
209
209
210 b = readbundle(ui, fh, None)
210 b = readbundle(ui, fh, None)
211 if isinstance(b, changegroup.cg1unpacker):
211 if isinstance(b, changegroup.cg1unpacker):
212 alg = b._type
212 alg = b._type
213 if alg == '_truncatedBZ':
213 if alg == '_truncatedBZ':
214 alg = 'BZ'
214 alg = 'BZ'
215 comp = speccompression(alg)
215 comp = speccompression(alg)
216 if not comp:
216 if not comp:
217 raise error.Abort(_('unknown compression algorithm: %s') % alg)
217 raise error.Abort(_('unknown compression algorithm: %s') % alg)
218 return '%s-v1' % comp
218 return '%s-v1' % comp
219 elif isinstance(b, bundle2.unbundle20):
219 elif isinstance(b, bundle2.unbundle20):
220 if 'Compression' in b.params:
220 if 'Compression' in b.params:
221 comp = speccompression(b.params['Compression'])
221 comp = speccompression(b.params['Compression'])
222 if not comp:
222 if not comp:
223 raise error.Abort(_('unknown compression algorithm: %s') % comp)
223 raise error.Abort(_('unknown compression algorithm: %s') % comp)
224 else:
224 else:
225 comp = 'none'
225 comp = 'none'
226
226
227 version = None
227 version = None
228 for part in b.iterparts():
228 for part in b.iterparts():
229 if part.type == 'changegroup':
229 if part.type == 'changegroup':
230 version = part.params['version']
230 version = part.params['version']
231 if version in ('01', '02'):
231 if version in ('01', '02'):
232 version = 'v2'
232 version = 'v2'
233 else:
233 else:
234 raise error.Abort(_('changegroup version %s does not have '
234 raise error.Abort(_('changegroup version %s does not have '
235 'a known bundlespec') % version,
235 'a known bundlespec') % version,
236 hint=_('try upgrading your Mercurial '
236 hint=_('try upgrading your Mercurial '
237 'client'))
237 'client'))
238
238
239 if not version:
239 if not version:
240 raise error.Abort(_('could not identify changegroup version in '
240 raise error.Abort(_('could not identify changegroup version in '
241 'bundle'))
241 'bundle'))
242
242
243 return '%s-%s' % (comp, version)
243 return '%s-%s' % (comp, version)
244 elif isinstance(b, streamclone.streamcloneapplier):
244 elif isinstance(b, streamclone.streamcloneapplier):
245 requirements = streamclone.readbundle1header(fh)[2]
245 requirements = streamclone.readbundle1header(fh)[2]
246 params = 'requirements=%s' % ','.join(sorted(requirements))
246 params = 'requirements=%s' % ','.join(sorted(requirements))
247 return 'none-packed1;%s' % urlreq.quote(params)
247 return 'none-packed1;%s' % urlreq.quote(params)
248 else:
248 else:
249 raise error.Abort(_('unknown bundle type: %s') % b)
249 raise error.Abort(_('unknown bundle type: %s') % b)
250
250
251 def buildobsmarkerspart(bundler, markers):
251 def buildobsmarkerspart(bundler, markers):
252 """add an obsmarker part to the bundler with <markers>
252 """add an obsmarker part to the bundler with <markers>
253
253
254 No part is created if markers is empty.
254 No part is created if markers is empty.
255 Raises ValueError if the bundler doesn't support any known obsmarker format.
255 Raises ValueError if the bundler doesn't support any known obsmarker format.
256 """
256 """
257 if markers:
257 if markers:
258 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
258 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
259 version = obsolete.commonversion(remoteversions)
259 version = obsolete.commonversion(remoteversions)
260 if version is None:
260 if version is None:
261 raise ValueError('bundler does not support common obsmarker format')
261 raise ValueError('bundler does not support common obsmarker format')
262 stream = obsolete.encodemarkers(markers, True, version=version)
262 stream = obsolete.encodemarkers(markers, True, version=version)
263 return bundler.newpart('obsmarkers', data=stream)
263 return bundler.newpart('obsmarkers', data=stream)
264 return None
264 return None
265
265
266 def _computeoutgoing(repo, heads, common):
266 def _computeoutgoing(repo, heads, common):
267 """Computes which revs are outgoing given a set of common
267 """Computes which revs are outgoing given a set of common
268 and a set of heads.
268 and a set of heads.
269
269
270 This is a separate function so extensions can have access to
270 This is a separate function so extensions can have access to
271 the logic.
271 the logic.
272
272
273 Returns a discovery.outgoing object.
273 Returns a discovery.outgoing object.
274 """
274 """
275 cl = repo.changelog
275 cl = repo.changelog
276 if common:
276 if common:
277 hasnode = cl.hasnode
277 hasnode = cl.hasnode
278 common = [n for n in common if hasnode(n)]
278 common = [n for n in common if hasnode(n)]
279 else:
279 else:
280 common = [nullid]
280 common = [nullid]
281 if not heads:
281 if not heads:
282 heads = cl.heads()
282 heads = cl.heads()
283 return discovery.outgoing(repo, common, heads)
283 return discovery.outgoing(repo, common, heads)
284
284
285 def _forcebundle1(op):
285 def _forcebundle1(op):
286 """return true if a pull/push must use bundle1
286 """return true if a pull/push must use bundle1
287
287
288 This function is used to allow testing of the older bundle version"""
288 This function is used to allow testing of the older bundle version"""
289 ui = op.repo.ui
289 ui = op.repo.ui
290 forcebundle1 = False
290 forcebundle1 = False
291 # The goal is this config is to allow developer to choose the bundle
291 # The goal is this config is to allow developer to choose the bundle
292 # version used during exchanged. This is especially handy during test.
292 # version used during exchanged. This is especially handy during test.
293 # Value is a list of bundle version to be picked from, highest version
293 # Value is a list of bundle version to be picked from, highest version
294 # should be used.
294 # should be used.
295 #
295 #
296 # developer config: devel.legacy.exchange
296 # developer config: devel.legacy.exchange
297 exchange = ui.configlist('devel', 'legacy.exchange')
297 exchange = ui.configlist('devel', 'legacy.exchange')
298 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
298 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
299 return forcebundle1 or not op.remote.capable('bundle2')
299 return forcebundle1 or not op.remote.capable('bundle2')
300
300
301 class pushoperation(object):
301 class pushoperation(object):
302 """A object that represent a single push operation
302 """A object that represent a single push operation
303
303
304 Its purpose is to carry push related state and very common operations.
304 Its purpose is to carry push related state and very common operations.
305
305
306 A new pushoperation should be created at the beginning of each push and
306 A new pushoperation should be created at the beginning of each push and
307 discarded afterward.
307 discarded afterward.
308 """
308 """
309
309
310 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
310 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
311 bookmarks=()):
311 bookmarks=()):
312 # repo we push from
312 # repo we push from
313 self.repo = repo
313 self.repo = repo
314 self.ui = repo.ui
314 self.ui = repo.ui
315 # repo we push to
315 # repo we push to
316 self.remote = remote
316 self.remote = remote
317 # force option provided
317 # force option provided
318 self.force = force
318 self.force = force
319 # revs to be pushed (None is "all")
319 # revs to be pushed (None is "all")
320 self.revs = revs
320 self.revs = revs
321 # bookmark explicitly pushed
321 # bookmark explicitly pushed
322 self.bookmarks = bookmarks
322 self.bookmarks = bookmarks
323 # allow push of new branch
323 # allow push of new branch
324 self.newbranch = newbranch
324 self.newbranch = newbranch
325 # did a local lock get acquired?
325 # did a local lock get acquired?
326 self.locallocked = None
326 self.locallocked = None
327 # step already performed
327 # step already performed
328 # (used to check what steps have been already performed through bundle2)
328 # (used to check what steps have been already performed through bundle2)
329 self.stepsdone = set()
329 self.stepsdone = set()
330 # Integer version of the changegroup push result
330 # Integer version of the changegroup push result
331 # - None means nothing to push
331 # - None means nothing to push
332 # - 0 means HTTP error
332 # - 0 means HTTP error
333 # - 1 means we pushed and remote head count is unchanged *or*
333 # - 1 means we pushed and remote head count is unchanged *or*
334 # we have outgoing changesets but refused to push
334 # we have outgoing changesets but refused to push
335 # - other values as described by addchangegroup()
335 # - other values as described by addchangegroup()
336 self.cgresult = None
336 self.cgresult = None
337 # Boolean value for the bookmark push
337 # Boolean value for the bookmark push
338 self.bkresult = None
338 self.bkresult = None
339 # discover.outgoing object (contains common and outgoing data)
339 # discover.outgoing object (contains common and outgoing data)
340 self.outgoing = None
340 self.outgoing = None
341 # all remote heads before the push
341 # all remote heads before the push
342 self.remoteheads = None
342 self.remoteheads = None
343 # testable as a boolean indicating if any nodes are missing locally.
343 # testable as a boolean indicating if any nodes are missing locally.
344 self.incoming = None
344 self.incoming = None
345 # phases changes that must be pushed along side the changesets
345 # phases changes that must be pushed along side the changesets
346 self.outdatedphases = None
346 self.outdatedphases = None
347 # phases changes that must be pushed if changeset push fails
347 # phases changes that must be pushed if changeset push fails
348 self.fallbackoutdatedphases = None
348 self.fallbackoutdatedphases = None
349 # outgoing obsmarkers
349 # outgoing obsmarkers
350 self.outobsmarkers = set()
350 self.outobsmarkers = set()
351 # outgoing bookmarks
351 # outgoing bookmarks
352 self.outbookmarks = []
352 self.outbookmarks = []
353 # transaction manager
353 # transaction manager
354 self.trmanager = None
354 self.trmanager = None
355 # map { pushkey partid -> callback handling failure}
355 # map { pushkey partid -> callback handling failure}
356 # used to handle exception from mandatory pushkey part failure
356 # used to handle exception from mandatory pushkey part failure
357 self.pkfailcb = {}
357 self.pkfailcb = {}
358
358
359 @util.propertycache
359 @util.propertycache
360 def futureheads(self):
360 def futureheads(self):
361 """future remote heads if the changeset push succeeds"""
361 """future remote heads if the changeset push succeeds"""
362 return self.outgoing.missingheads
362 return self.outgoing.missingheads
363
363
364 @util.propertycache
364 @util.propertycache
365 def fallbackheads(self):
365 def fallbackheads(self):
366 """future remote heads if the changeset push fails"""
366 """future remote heads if the changeset push fails"""
367 if self.revs is None:
367 if self.revs is None:
368 # not target to push, all common are relevant
368 # not target to push, all common are relevant
369 return self.outgoing.commonheads
369 return self.outgoing.commonheads
370 unfi = self.repo.unfiltered()
370 unfi = self.repo.unfiltered()
371 # I want cheads = heads(::missingheads and ::commonheads)
371 # I want cheads = heads(::missingheads and ::commonheads)
372 # (missingheads is revs with secret changeset filtered out)
372 # (missingheads is revs with secret changeset filtered out)
373 #
373 #
374 # This can be expressed as:
374 # This can be expressed as:
375 # cheads = ( (missingheads and ::commonheads)
375 # cheads = ( (missingheads and ::commonheads)
376 # + (commonheads and ::missingheads))"
376 # + (commonheads and ::missingheads))"
377 # )
377 # )
378 #
378 #
379 # while trying to push we already computed the following:
379 # while trying to push we already computed the following:
380 # common = (::commonheads)
380 # common = (::commonheads)
381 # missing = ((commonheads::missingheads) - commonheads)
381 # missing = ((commonheads::missingheads) - commonheads)
382 #
382 #
383 # We can pick:
383 # We can pick:
384 # * missingheads part of common (::commonheads)
384 # * missingheads part of common (::commonheads)
385 common = self.outgoing.common
385 common = self.outgoing.common
386 nm = self.repo.changelog.nodemap
386 nm = self.repo.changelog.nodemap
387 cheads = [node for node in self.revs if nm[node] in common]
387 cheads = [node for node in self.revs if nm[node] in common]
388 # and
388 # and
389 # * commonheads parents on missing
389 # * commonheads parents on missing
390 revset = unfi.set('%ln and parents(roots(%ln))',
390 revset = unfi.set('%ln and parents(roots(%ln))',
391 self.outgoing.commonheads,
391 self.outgoing.commonheads,
392 self.outgoing.missing)
392 self.outgoing.missing)
393 cheads.extend(c.node() for c in revset)
393 cheads.extend(c.node() for c in revset)
394 return cheads
394 return cheads
395
395
396 @property
396 @property
397 def commonheads(self):
397 def commonheads(self):
398 """set of all common heads after changeset bundle push"""
398 """set of all common heads after changeset bundle push"""
399 if self.cgresult:
399 if self.cgresult:
400 return self.futureheads
400 return self.futureheads
401 else:
401 else:
402 return self.fallbackheads
402 return self.fallbackheads
403
403
404 # mapping of message used when pushing bookmark
404 # mapping of message used when pushing bookmark
405 bookmsgmap = {'update': (_("updating bookmark %s\n"),
405 bookmsgmap = {'update': (_("updating bookmark %s\n"),
406 _('updating bookmark %s failed!\n')),
406 _('updating bookmark %s failed!\n')),
407 'export': (_("exporting bookmark %s\n"),
407 'export': (_("exporting bookmark %s\n"),
408 _('exporting bookmark %s failed!\n')),
408 _('exporting bookmark %s failed!\n')),
409 'delete': (_("deleting remote bookmark %s\n"),
409 'delete': (_("deleting remote bookmark %s\n"),
410 _('deleting remote bookmark %s failed!\n')),
410 _('deleting remote bookmark %s failed!\n')),
411 }
411 }
412
412
413
413
414 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
414 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
415 opargs=None):
415 opargs=None):
416 '''Push outgoing changesets (limited by revs) from a local
416 '''Push outgoing changesets (limited by revs) from a local
417 repository to remote. Return an integer:
417 repository to remote. Return an integer:
418 - None means nothing to push
418 - None means nothing to push
419 - 0 means HTTP error
419 - 0 means HTTP error
420 - 1 means we pushed and remote head count is unchanged *or*
420 - 1 means we pushed and remote head count is unchanged *or*
421 we have outgoing changesets but refused to push
421 we have outgoing changesets but refused to push
422 - other values as described by addchangegroup()
422 - other values as described by addchangegroup()
423 '''
423 '''
424 if opargs is None:
424 if opargs is None:
425 opargs = {}
425 opargs = {}
426 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
426 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
427 **opargs)
427 **opargs)
428 if pushop.remote.local():
428 if pushop.remote.local():
429 missing = (set(pushop.repo.requirements)
429 missing = (set(pushop.repo.requirements)
430 - pushop.remote.local().supported)
430 - pushop.remote.local().supported)
431 if missing:
431 if missing:
432 msg = _("required features are not"
432 msg = _("required features are not"
433 " supported in the destination:"
433 " supported in the destination:"
434 " %s") % (', '.join(sorted(missing)))
434 " %s") % (', '.join(sorted(missing)))
435 raise error.Abort(msg)
435 raise error.Abort(msg)
436
436
437 # there are two ways to push to remote repo:
437 # there are two ways to push to remote repo:
438 #
438 #
439 # addchangegroup assumes local user can lock remote
439 # addchangegroup assumes local user can lock remote
440 # repo (local filesystem, old ssh servers).
440 # repo (local filesystem, old ssh servers).
441 #
441 #
442 # unbundle assumes local user cannot lock remote repo (new ssh
442 # unbundle assumes local user cannot lock remote repo (new ssh
443 # servers, http servers).
443 # servers, http servers).
444
444
445 if not pushop.remote.canpush():
445 if not pushop.remote.canpush():
446 raise error.Abort(_("destination does not support push"))
446 raise error.Abort(_("destination does not support push"))
447 # get local lock as we might write phase data
447 # get local lock as we might write phase data
448 localwlock = locallock = None
448 localwlock = locallock = None
449 try:
449 try:
450 # bundle2 push may receive a reply bundle touching bookmarks or other
450 # bundle2 push may receive a reply bundle touching bookmarks or other
451 # things requiring the wlock. Take it now to ensure proper ordering.
451 # things requiring the wlock. Take it now to ensure proper ordering.
452 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
452 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
453 if (not _forcebundle1(pushop)) and maypushback:
453 if (not _forcebundle1(pushop)) and maypushback:
454 localwlock = pushop.repo.wlock()
454 localwlock = pushop.repo.wlock()
455 locallock = pushop.repo.lock()
455 locallock = pushop.repo.lock()
456 pushop.locallocked = True
456 pushop.locallocked = True
457 except IOError as err:
457 except IOError as err:
458 pushop.locallocked = False
458 pushop.locallocked = False
459 if err.errno != errno.EACCES:
459 if err.errno != errno.EACCES:
460 raise
460 raise
461 # source repo cannot be locked.
461 # source repo cannot be locked.
462 # We do not abort the push, but just disable the local phase
462 # We do not abort the push, but just disable the local phase
463 # synchronisation.
463 # synchronisation.
464 msg = 'cannot lock source repository: %s\n' % err
464 msg = 'cannot lock source repository: %s\n' % err
465 pushop.ui.debug(msg)
465 pushop.ui.debug(msg)
466 try:
466 try:
467 if pushop.locallocked:
467 if pushop.locallocked:
468 pushop.trmanager = transactionmanager(pushop.repo,
468 pushop.trmanager = transactionmanager(pushop.repo,
469 'push-response',
469 'push-response',
470 pushop.remote.url())
470 pushop.remote.url())
471 pushop.repo.checkpush(pushop)
471 pushop.repo.checkpush(pushop)
472 lock = None
472 lock = None
473 unbundle = pushop.remote.capable('unbundle')
473 unbundle = pushop.remote.capable('unbundle')
474 if not unbundle:
474 if not unbundle:
475 lock = pushop.remote.lock()
475 lock = pushop.remote.lock()
476 try:
476 try:
477 _pushdiscovery(pushop)
477 _pushdiscovery(pushop)
478 if not _forcebundle1(pushop):
478 if not _forcebundle1(pushop):
479 _pushbundle2(pushop)
479 _pushbundle2(pushop)
480 _pushchangeset(pushop)
480 _pushchangeset(pushop)
481 _pushsyncphase(pushop)
481 _pushsyncphase(pushop)
482 _pushobsolete(pushop)
482 _pushobsolete(pushop)
483 _pushbookmark(pushop)
483 _pushbookmark(pushop)
484 finally:
484 finally:
485 if lock is not None:
485 if lock is not None:
486 lock.release()
486 lock.release()
487 if pushop.trmanager:
487 if pushop.trmanager:
488 pushop.trmanager.close()
488 pushop.trmanager.close()
489 finally:
489 finally:
490 if pushop.trmanager:
490 if pushop.trmanager:
491 pushop.trmanager.release()
491 pushop.trmanager.release()
492 if locallock is not None:
492 if locallock is not None:
493 locallock.release()
493 locallock.release()
494 if localwlock is not None:
494 if localwlock is not None:
495 localwlock.release()
495 localwlock.release()
496
496
497 return pushop
497 return pushop
498
498
499 # list of steps to perform discovery before push
499 # list of steps to perform discovery before push
500 pushdiscoveryorder = []
500 pushdiscoveryorder = []
501
501
502 # Mapping between step name and function
502 # Mapping between step name and function
503 #
503 #
504 # This exists to help extensions wrap steps if necessary
504 # This exists to help extensions wrap steps if necessary
505 pushdiscoverymapping = {}
505 pushdiscoverymapping = {}
506
506
507 def pushdiscovery(stepname):
507 def pushdiscovery(stepname):
508 """decorator for function performing discovery before push
508 """decorator for function performing discovery before push
509
509
510 The function is added to the step -> function mapping and appended to the
510 The function is added to the step -> function mapping and appended to the
511 list of steps. Beware that decorated function will be added in order (this
511 list of steps. Beware that decorated function will be added in order (this
512 may matter).
512 may matter).
513
513
514 You can only use this decorator for a new step, if you want to wrap a step
514 You can only use this decorator for a new step, if you want to wrap a step
515 from an extension, change the pushdiscovery dictionary directly."""
515 from an extension, change the pushdiscovery dictionary directly."""
516 def dec(func):
516 def dec(func):
517 assert stepname not in pushdiscoverymapping
517 assert stepname not in pushdiscoverymapping
518 pushdiscoverymapping[stepname] = func
518 pushdiscoverymapping[stepname] = func
519 pushdiscoveryorder.append(stepname)
519 pushdiscoveryorder.append(stepname)
520 return func
520 return func
521 return dec
521 return dec
522
522
523 def _pushdiscovery(pushop):
523 def _pushdiscovery(pushop):
524 """Run all discovery steps"""
524 """Run all discovery steps"""
525 for stepname in pushdiscoveryorder:
525 for stepname in pushdiscoveryorder:
526 step = pushdiscoverymapping[stepname]
526 step = pushdiscoverymapping[stepname]
527 step(pushop)
527 step(pushop)
528
528
529 @pushdiscovery('changeset')
529 @pushdiscovery('changeset')
530 def _pushdiscoverychangeset(pushop):
530 def _pushdiscoverychangeset(pushop):
531 """discover the changeset that need to be pushed"""
531 """discover the changeset that need to be pushed"""
532 fci = discovery.findcommonincoming
532 fci = discovery.findcommonincoming
533 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
533 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
534 common, inc, remoteheads = commoninc
534 common, inc, remoteheads = commoninc
535 fco = discovery.findcommonoutgoing
535 fco = discovery.findcommonoutgoing
536 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
536 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
537 commoninc=commoninc, force=pushop.force)
537 commoninc=commoninc, force=pushop.force)
538 pushop.outgoing = outgoing
538 pushop.outgoing = outgoing
539 pushop.remoteheads = remoteheads
539 pushop.remoteheads = remoteheads
540 pushop.incoming = inc
540 pushop.incoming = inc
541
541
542 @pushdiscovery('phase')
542 @pushdiscovery('phase')
543 def _pushdiscoveryphase(pushop):
543 def _pushdiscoveryphase(pushop):
544 """discover the phase that needs to be pushed
544 """discover the phase that needs to be pushed
545
545
546 (computed for both success and failure case for changesets push)"""
546 (computed for both success and failure case for changesets push)"""
547 outgoing = pushop.outgoing
547 outgoing = pushop.outgoing
548 unfi = pushop.repo.unfiltered()
548 unfi = pushop.repo.unfiltered()
549 remotephases = pushop.remote.listkeys('phases')
549 remotephases = pushop.remote.listkeys('phases')
550 publishing = remotephases.get('publishing', False)
550 publishing = remotephases.get('publishing', False)
551 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
551 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
552 and remotephases # server supports phases
552 and remotephases # server supports phases
553 and not pushop.outgoing.missing # no changesets to be pushed
553 and not pushop.outgoing.missing # no changesets to be pushed
554 and publishing):
554 and publishing):
555 # When:
555 # When:
556 # - this is a subrepo push
556 # - this is a subrepo push
557 # - and remote support phase
557 # - and remote support phase
558 # - and no changeset are to be pushed
558 # - and no changeset are to be pushed
559 # - and remote is publishing
559 # - and remote is publishing
560 # We may be in issue 3871 case!
560 # We may be in issue 3871 case!
561 # We drop the possible phase synchronisation done by
561 # We drop the possible phase synchronisation done by
562 # courtesy to publish changesets possibly locally draft
562 # courtesy to publish changesets possibly locally draft
563 # on the remote.
563 # on the remote.
564 remotephases = {'publishing': 'True'}
564 remotephases = {'publishing': 'True'}
565 ana = phases.analyzeremotephases(pushop.repo,
565 ana = phases.analyzeremotephases(pushop.repo,
566 pushop.fallbackheads,
566 pushop.fallbackheads,
567 remotephases)
567 remotephases)
568 pheads, droots = ana
568 pheads, droots = ana
569 extracond = ''
569 extracond = ''
570 if not publishing:
570 if not publishing:
571 extracond = ' and public()'
571 extracond = ' and public()'
572 revset = 'heads((%%ln::%%ln) %s)' % extracond
572 revset = 'heads((%%ln::%%ln) %s)' % extracond
573 # Get the list of all revs draft on remote by public here.
573 # Get the list of all revs draft on remote by public here.
574 # XXX Beware that revset break if droots is not strictly
574 # XXX Beware that revset break if droots is not strictly
575 # XXX root we may want to ensure it is but it is costly
575 # XXX root we may want to ensure it is but it is costly
576 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
576 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
577 if not outgoing.missing:
577 if not outgoing.missing:
578 future = fallback
578 future = fallback
579 else:
579 else:
580 # adds changeset we are going to push as draft
580 # adds changeset we are going to push as draft
581 #
581 #
582 # should not be necessary for publishing server, but because of an
582 # should not be necessary for publishing server, but because of an
583 # issue fixed in xxxxx we have to do it anyway.
583 # issue fixed in xxxxx we have to do it anyway.
584 fdroots = list(unfi.set('roots(%ln + %ln::)',
584 fdroots = list(unfi.set('roots(%ln + %ln::)',
585 outgoing.missing, droots))
585 outgoing.missing, droots))
586 fdroots = [f.node() for f in fdroots]
586 fdroots = [f.node() for f in fdroots]
587 future = list(unfi.set(revset, fdroots, pushop.futureheads))
587 future = list(unfi.set(revset, fdroots, pushop.futureheads))
588 pushop.outdatedphases = future
588 pushop.outdatedphases = future
589 pushop.fallbackoutdatedphases = fallback
589 pushop.fallbackoutdatedphases = fallback
590
590
591 @pushdiscovery('obsmarker')
591 @pushdiscovery('obsmarker')
592 def _pushdiscoveryobsmarkers(pushop):
592 def _pushdiscoveryobsmarkers(pushop):
593 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
593 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
594 and pushop.repo.obsstore
594 and pushop.repo.obsstore
595 and 'obsolete' in pushop.remote.listkeys('namespaces')):
595 and 'obsolete' in pushop.remote.listkeys('namespaces')):
596 repo = pushop.repo
596 repo = pushop.repo
597 # very naive computation, that can be quite expensive on big repo.
597 # very naive computation, that can be quite expensive on big repo.
598 # However: evolution is currently slow on them anyway.
598 # However: evolution is currently slow on them anyway.
599 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
599 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
600 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
600 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
601
601
602 @pushdiscovery('bookmarks')
602 @pushdiscovery('bookmarks')
603 def _pushdiscoverybookmarks(pushop):
603 def _pushdiscoverybookmarks(pushop):
604 ui = pushop.ui
604 ui = pushop.ui
605 repo = pushop.repo.unfiltered()
605 repo = pushop.repo.unfiltered()
606 remote = pushop.remote
606 remote = pushop.remote
607 ui.debug("checking for updated bookmarks\n")
607 ui.debug("checking for updated bookmarks\n")
608 ancestors = ()
608 ancestors = ()
609 if pushop.revs:
609 if pushop.revs:
610 revnums = map(repo.changelog.rev, pushop.revs)
610 revnums = map(repo.changelog.rev, pushop.revs)
611 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
611 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
612 remotebookmark = remote.listkeys('bookmarks')
612 remotebookmark = remote.listkeys('bookmarks')
613
613
614 explicit = set([repo._bookmarks.expandname(bookmark)
614 explicit = set([repo._bookmarks.expandname(bookmark)
615 for bookmark in pushop.bookmarks])
615 for bookmark in pushop.bookmarks])
616
616
617 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
617 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
618 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
618 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
619
619
620 def safehex(x):
620 def safehex(x):
621 if x is None:
621 if x is None:
622 return x
622 return x
623 return hex(x)
623 return hex(x)
624
624
625 def hexifycompbookmarks(bookmarks):
625 def hexifycompbookmarks(bookmarks):
626 for b, scid, dcid in bookmarks:
626 for b, scid, dcid in bookmarks:
627 yield b, safehex(scid), safehex(dcid)
627 yield b, safehex(scid), safehex(dcid)
628
628
629 comp = [hexifycompbookmarks(marks) for marks in comp]
629 comp = [hexifycompbookmarks(marks) for marks in comp]
630 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
630 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
631
631
632 for b, scid, dcid in advsrc:
632 for b, scid, dcid in advsrc:
633 if b in explicit:
633 if b in explicit:
634 explicit.remove(b)
634 explicit.remove(b)
635 if not ancestors or repo[scid].rev() in ancestors:
635 if not ancestors or repo[scid].rev() in ancestors:
636 pushop.outbookmarks.append((b, dcid, scid))
636 pushop.outbookmarks.append((b, dcid, scid))
637 # search added bookmark
637 # search added bookmark
638 for b, scid, dcid in addsrc:
638 for b, scid, dcid in addsrc:
639 if b in explicit:
639 if b in explicit:
640 explicit.remove(b)
640 explicit.remove(b)
641 pushop.outbookmarks.append((b, '', scid))
641 pushop.outbookmarks.append((b, '', scid))
642 # search for overwritten bookmark
642 # search for overwritten bookmark
643 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
643 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
644 if b in explicit:
644 if b in explicit:
645 explicit.remove(b)
645 explicit.remove(b)
646 pushop.outbookmarks.append((b, dcid, scid))
646 pushop.outbookmarks.append((b, dcid, scid))
647 # search for bookmark to delete
647 # search for bookmark to delete
648 for b, scid, dcid in adddst:
648 for b, scid, dcid in adddst:
649 if b in explicit:
649 if b in explicit:
650 explicit.remove(b)
650 explicit.remove(b)
651 # treat as "deleted locally"
651 # treat as "deleted locally"
652 pushop.outbookmarks.append((b, dcid, ''))
652 pushop.outbookmarks.append((b, dcid, ''))
653 # identical bookmarks shouldn't get reported
653 # identical bookmarks shouldn't get reported
654 for b, scid, dcid in same:
654 for b, scid, dcid in same:
655 if b in explicit:
655 if b in explicit:
656 explicit.remove(b)
656 explicit.remove(b)
657
657
658 if explicit:
658 if explicit:
659 explicit = sorted(explicit)
659 explicit = sorted(explicit)
660 # we should probably list all of them
660 # we should probably list all of them
661 ui.warn(_('bookmark %s does not exist on the local '
661 ui.warn(_('bookmark %s does not exist on the local '
662 'or remote repository!\n') % explicit[0])
662 'or remote repository!\n') % explicit[0])
663 pushop.bkresult = 2
663 pushop.bkresult = 2
664
664
665 pushop.outbookmarks.sort()
665 pushop.outbookmarks.sort()
666
666
667 def _pushcheckoutgoing(pushop):
667 def _pushcheckoutgoing(pushop):
668 outgoing = pushop.outgoing
668 outgoing = pushop.outgoing
669 unfi = pushop.repo.unfiltered()
669 unfi = pushop.repo.unfiltered()
670 if not outgoing.missing:
670 if not outgoing.missing:
671 # nothing to push
671 # nothing to push
672 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
672 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
673 return False
673 return False
674 # something to push
674 # something to push
675 if not pushop.force:
675 if not pushop.force:
676 # if repo.obsstore == False --> no obsolete
676 # if repo.obsstore == False --> no obsolete
677 # then, save the iteration
677 # then, save the iteration
678 if unfi.obsstore:
678 if unfi.obsstore:
679 # this message are here for 80 char limit reason
679 # this message are here for 80 char limit reason
680 mso = _("push includes obsolete changeset: %s!")
680 mso = _("push includes obsolete changeset: %s!")
681 mst = {"unstable": _("push includes unstable changeset: %s!"),
681 mst = {"unstable": _("push includes unstable changeset: %s!"),
682 "bumped": _("push includes bumped changeset: %s!"),
682 "bumped": _("push includes bumped changeset: %s!"),
683 "divergent": _("push includes divergent changeset: %s!")}
683 "divergent": _("push includes divergent changeset: %s!")}
684 # If we are to push if there is at least one
684 # If we are to push if there is at least one
685 # obsolete or unstable changeset in missing, at
685 # obsolete or unstable changeset in missing, at
686 # least one of the missinghead will be obsolete or
686 # least one of the missinghead will be obsolete or
687 # unstable. So checking heads only is ok
687 # unstable. So checking heads only is ok
688 for node in outgoing.missingheads:
688 for node in outgoing.missingheads:
689 ctx = unfi[node]
689 ctx = unfi[node]
690 if ctx.obsolete():
690 if ctx.obsolete():
691 raise error.Abort(mso % ctx)
691 raise error.Abort(mso % ctx)
692 elif ctx.troubled():
692 elif ctx.troubled():
693 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
693 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
694
694
695 discovery.checkheads(pushop)
695 discovery.checkheads(pushop)
696 return True
696 return True
697
697
698 # List of names of steps to perform for an outgoing bundle2, order matters.
698 # List of names of steps to perform for an outgoing bundle2, order matters.
699 b2partsgenorder = []
699 b2partsgenorder = []
700
700
701 # Mapping between step name and function
701 # Mapping between step name and function
702 #
702 #
703 # This exists to help extensions wrap steps if necessary
703 # This exists to help extensions wrap steps if necessary
704 b2partsgenmapping = {}
704 b2partsgenmapping = {}
705
705
706 def b2partsgenerator(stepname, idx=None):
706 def b2partsgenerator(stepname, idx=None):
707 """decorator for function generating bundle2 part
707 """decorator for function generating bundle2 part
708
708
709 The function is added to the step -> function mapping and appended to the
709 The function is added to the step -> function mapping and appended to the
710 list of steps. Beware that decorated functions will be added in order
710 list of steps. Beware that decorated functions will be added in order
711 (this may matter).
711 (this may matter).
712
712
713 You can only use this decorator for new steps, if you want to wrap a step
713 You can only use this decorator for new steps, if you want to wrap a step
714 from an extension, attack the b2partsgenmapping dictionary directly."""
714 from an extension, attack the b2partsgenmapping dictionary directly."""
715 def dec(func):
715 def dec(func):
716 assert stepname not in b2partsgenmapping
716 assert stepname not in b2partsgenmapping
717 b2partsgenmapping[stepname] = func
717 b2partsgenmapping[stepname] = func
718 if idx is None:
718 if idx is None:
719 b2partsgenorder.append(stepname)
719 b2partsgenorder.append(stepname)
720 else:
720 else:
721 b2partsgenorder.insert(idx, stepname)
721 b2partsgenorder.insert(idx, stepname)
722 return func
722 return func
723 return dec
723 return dec
724
724
725 def _pushb2ctxcheckheads(pushop, bundler):
725 def _pushb2ctxcheckheads(pushop, bundler):
726 """Generate race condition checking parts
726 """Generate race condition checking parts
727
727
728 Exists as an independent function to aid extensions
728 Exists as an independent function to aid extensions
729 """
729 """
730 if not pushop.force:
730 if not pushop.force:
731 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
731 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
732
732
733 @b2partsgenerator('changeset')
733 @b2partsgenerator('changeset')
734 def _pushb2ctx(pushop, bundler):
734 def _pushb2ctx(pushop, bundler):
735 """handle changegroup push through bundle2
735 """handle changegroup push through bundle2
736
736
737 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
737 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
738 """
738 """
739 if 'changesets' in pushop.stepsdone:
739 if 'changesets' in pushop.stepsdone:
740 return
740 return
741 pushop.stepsdone.add('changesets')
741 pushop.stepsdone.add('changesets')
742 # Send known heads to the server for race detection.
742 # Send known heads to the server for race detection.
743 if not _pushcheckoutgoing(pushop):
743 if not _pushcheckoutgoing(pushop):
744 return
744 return
745 pushop.repo.prepushoutgoinghooks(pushop)
745 pushop.repo.prepushoutgoinghooks(pushop)
746
746
747 _pushb2ctxcheckheads(pushop, bundler)
747 _pushb2ctxcheckheads(pushop, bundler)
748
748
749 b2caps = bundle2.bundle2caps(pushop.remote)
749 b2caps = bundle2.bundle2caps(pushop.remote)
750 version = '01'
750 version = '01'
751 cgversions = b2caps.get('changegroup')
751 cgversions = b2caps.get('changegroup')
752 if cgversions: # 3.1 and 3.2 ship with an empty value
752 if cgversions: # 3.1 and 3.2 ship with an empty value
753 cgversions = [v for v in cgversions
753 cgversions = [v for v in cgversions
754 if v in changegroup.supportedoutgoingversions(
754 if v in changegroup.supportedoutgoingversions(
755 pushop.repo)]
755 pushop.repo)]
756 if not cgversions:
756 if not cgversions:
757 raise ValueError(_('no common changegroup version'))
757 raise ValueError(_('no common changegroup version'))
758 version = max(cgversions)
758 version = max(cgversions)
759 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
759 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
760 pushop.outgoing,
760 pushop.outgoing,
761 version=version)
761 version=version)
762 cgpart = bundler.newpart('changegroup', data=cg)
762 cgpart = bundler.newpart('changegroup', data=cg)
763 if cgversions:
763 if cgversions:
764 cgpart.addparam('version', version)
764 cgpart.addparam('version', version)
765 if 'treemanifest' in pushop.repo.requirements:
765 if 'treemanifest' in pushop.repo.requirements:
766 cgpart.addparam('treemanifest', '1')
766 cgpart.addparam('treemanifest', '1')
767 def handlereply(op):
767 def handlereply(op):
768 """extract addchangegroup returns from server reply"""
768 """extract addchangegroup returns from server reply"""
769 cgreplies = op.records.getreplies(cgpart.id)
769 cgreplies = op.records.getreplies(cgpart.id)
770 assert len(cgreplies['changegroup']) == 1
770 assert len(cgreplies['changegroup']) == 1
771 pushop.cgresult = cgreplies['changegroup'][0]['return']
771 pushop.cgresult = cgreplies['changegroup'][0]['return']
772 return handlereply
772 return handlereply
773
773
774 @b2partsgenerator('phase')
774 @b2partsgenerator('phase')
775 def _pushb2phases(pushop, bundler):
775 def _pushb2phases(pushop, bundler):
776 """handle phase push through bundle2"""
776 """handle phase push through bundle2"""
777 if 'phases' in pushop.stepsdone:
777 if 'phases' in pushop.stepsdone:
778 return
778 return
779 b2caps = bundle2.bundle2caps(pushop.remote)
779 b2caps = bundle2.bundle2caps(pushop.remote)
780 if not 'pushkey' in b2caps:
780 if not 'pushkey' in b2caps:
781 return
781 return
782 pushop.stepsdone.add('phases')
782 pushop.stepsdone.add('phases')
783 part2node = []
783 part2node = []
784
784
785 def handlefailure(pushop, exc):
785 def handlefailure(pushop, exc):
786 targetid = int(exc.partid)
786 targetid = int(exc.partid)
787 for partid, node in part2node:
787 for partid, node in part2node:
788 if partid == targetid:
788 if partid == targetid:
789 raise error.Abort(_('updating %s to public failed') % node)
789 raise error.Abort(_('updating %s to public failed') % node)
790
790
791 enc = pushkey.encode
791 enc = pushkey.encode
792 for newremotehead in pushop.outdatedphases:
792 for newremotehead in pushop.outdatedphases:
793 part = bundler.newpart('pushkey')
793 part = bundler.newpart('pushkey')
794 part.addparam('namespace', enc('phases'))
794 part.addparam('namespace', enc('phases'))
795 part.addparam('key', enc(newremotehead.hex()))
795 part.addparam('key', enc(newremotehead.hex()))
796 part.addparam('old', enc(str(phases.draft)))
796 part.addparam('old', enc(str(phases.draft)))
797 part.addparam('new', enc(str(phases.public)))
797 part.addparam('new', enc(str(phases.public)))
798 part2node.append((part.id, newremotehead))
798 part2node.append((part.id, newremotehead))
799 pushop.pkfailcb[part.id] = handlefailure
799 pushop.pkfailcb[part.id] = handlefailure
800
800
801 def handlereply(op):
801 def handlereply(op):
802 for partid, node in part2node:
802 for partid, node in part2node:
803 partrep = op.records.getreplies(partid)
803 partrep = op.records.getreplies(partid)
804 results = partrep['pushkey']
804 results = partrep['pushkey']
805 assert len(results) <= 1
805 assert len(results) <= 1
806 msg = None
806 msg = None
807 if not results:
807 if not results:
808 msg = _('server ignored update of %s to public!\n') % node
808 msg = _('server ignored update of %s to public!\n') % node
809 elif not int(results[0]['return']):
809 elif not int(results[0]['return']):
810 msg = _('updating %s to public failed!\n') % node
810 msg = _('updating %s to public failed!\n') % node
811 if msg is not None:
811 if msg is not None:
812 pushop.ui.warn(msg)
812 pushop.ui.warn(msg)
813 return handlereply
813 return handlereply
814
814
815 @b2partsgenerator('obsmarkers')
815 @b2partsgenerator('obsmarkers')
816 def _pushb2obsmarkers(pushop, bundler):
816 def _pushb2obsmarkers(pushop, bundler):
817 if 'obsmarkers' in pushop.stepsdone:
817 if 'obsmarkers' in pushop.stepsdone:
818 return
818 return
819 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
819 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
820 if obsolete.commonversion(remoteversions) is None:
820 if obsolete.commonversion(remoteversions) is None:
821 return
821 return
822 pushop.stepsdone.add('obsmarkers')
822 pushop.stepsdone.add('obsmarkers')
823 if pushop.outobsmarkers:
823 if pushop.outobsmarkers:
824 markers = sorted(pushop.outobsmarkers)
824 markers = sorted(pushop.outobsmarkers)
825 buildobsmarkerspart(bundler, markers)
825 buildobsmarkerspart(bundler, markers)
826
826
827 @b2partsgenerator('bookmarks')
827 @b2partsgenerator('bookmarks')
828 def _pushb2bookmarks(pushop, bundler):
828 def _pushb2bookmarks(pushop, bundler):
829 """handle bookmark push through bundle2"""
829 """handle bookmark push through bundle2"""
830 if 'bookmarks' in pushop.stepsdone:
830 if 'bookmarks' in pushop.stepsdone:
831 return
831 return
832 b2caps = bundle2.bundle2caps(pushop.remote)
832 b2caps = bundle2.bundle2caps(pushop.remote)
833 if 'pushkey' not in b2caps:
833 if 'pushkey' not in b2caps:
834 return
834 return
835 pushop.stepsdone.add('bookmarks')
835 pushop.stepsdone.add('bookmarks')
836 part2book = []
836 part2book = []
837 enc = pushkey.encode
837 enc = pushkey.encode
838
838
839 def handlefailure(pushop, exc):
839 def handlefailure(pushop, exc):
840 targetid = int(exc.partid)
840 targetid = int(exc.partid)
841 for partid, book, action in part2book:
841 for partid, book, action in part2book:
842 if partid == targetid:
842 if partid == targetid:
843 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
843 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
844 # we should not be called for part we did not generated
844 # we should not be called for part we did not generated
845 assert False
845 assert False
846
846
847 for book, old, new in pushop.outbookmarks:
847 for book, old, new in pushop.outbookmarks:
848 part = bundler.newpart('pushkey')
848 part = bundler.newpart('pushkey')
849 part.addparam('namespace', enc('bookmarks'))
849 part.addparam('namespace', enc('bookmarks'))
850 part.addparam('key', enc(book))
850 part.addparam('key', enc(book))
851 part.addparam('old', enc(old))
851 part.addparam('old', enc(old))
852 part.addparam('new', enc(new))
852 part.addparam('new', enc(new))
853 action = 'update'
853 action = 'update'
854 if not old:
854 if not old:
855 action = 'export'
855 action = 'export'
856 elif not new:
856 elif not new:
857 action = 'delete'
857 action = 'delete'
858 part2book.append((part.id, book, action))
858 part2book.append((part.id, book, action))
859 pushop.pkfailcb[part.id] = handlefailure
859 pushop.pkfailcb[part.id] = handlefailure
860
860
861 def handlereply(op):
861 def handlereply(op):
862 ui = pushop.ui
862 ui = pushop.ui
863 for partid, book, action in part2book:
863 for partid, book, action in part2book:
864 partrep = op.records.getreplies(partid)
864 partrep = op.records.getreplies(partid)
865 results = partrep['pushkey']
865 results = partrep['pushkey']
866 assert len(results) <= 1
866 assert len(results) <= 1
867 if not results:
867 if not results:
868 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
868 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
869 else:
869 else:
870 ret = int(results[0]['return'])
870 ret = int(results[0]['return'])
871 if ret:
871 if ret:
872 ui.status(bookmsgmap[action][0] % book)
872 ui.status(bookmsgmap[action][0] % book)
873 else:
873 else:
874 ui.warn(bookmsgmap[action][1] % book)
874 ui.warn(bookmsgmap[action][1] % book)
875 if pushop.bkresult is not None:
875 if pushop.bkresult is not None:
876 pushop.bkresult = 1
876 pushop.bkresult = 1
877 return handlereply
877 return handlereply
878
878
879
879
880 def _pushbundle2(pushop):
880 def _pushbundle2(pushop):
881 """push data to the remote using bundle2
881 """push data to the remote using bundle2
882
882
883 The only currently supported type of data is changegroup but this will
883 The only currently supported type of data is changegroup but this will
884 evolve in the future."""
884 evolve in the future."""
885 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
885 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
886 pushback = (pushop.trmanager
886 pushback = (pushop.trmanager
887 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
887 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
888
888
889 # create reply capability
889 # create reply capability
890 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
890 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
891 allowpushback=pushback))
891 allowpushback=pushback))
892 bundler.newpart('replycaps', data=capsblob)
892 bundler.newpart('replycaps', data=capsblob)
893 replyhandlers = []
893 replyhandlers = []
894 for partgenname in b2partsgenorder:
894 for partgenname in b2partsgenorder:
895 partgen = b2partsgenmapping[partgenname]
895 partgen = b2partsgenmapping[partgenname]
896 ret = partgen(pushop, bundler)
896 ret = partgen(pushop, bundler)
897 if callable(ret):
897 if callable(ret):
898 replyhandlers.append(ret)
898 replyhandlers.append(ret)
899 # do not push if nothing to push
899 # do not push if nothing to push
900 if bundler.nbparts <= 1:
900 if bundler.nbparts <= 1:
901 return
901 return
902 stream = util.chunkbuffer(bundler.getchunks())
902 stream = util.chunkbuffer(bundler.getchunks())
903 try:
903 try:
904 try:
904 try:
905 reply = pushop.remote.unbundle(
905 reply = pushop.remote.unbundle(
906 stream, ['force'], pushop.remote.url())
906 stream, ['force'], pushop.remote.url())
907 except error.BundleValueError as exc:
907 except error.BundleValueError as exc:
908 raise error.Abort(_('missing support for %s') % exc)
908 raise error.Abort(_('missing support for %s') % exc)
909 try:
909 try:
910 trgetter = None
910 trgetter = None
911 if pushback:
911 if pushback:
912 trgetter = pushop.trmanager.transaction
912 trgetter = pushop.trmanager.transaction
913 op = bundle2.processbundle(pushop.repo, reply, trgetter)
913 op = bundle2.processbundle(pushop.repo, reply, trgetter)
914 except error.BundleValueError as exc:
914 except error.BundleValueError as exc:
915 raise error.Abort(_('missing support for %s') % exc)
915 raise error.Abort(_('missing support for %s') % exc)
916 except bundle2.AbortFromPart as exc:
916 except bundle2.AbortFromPart as exc:
917 pushop.ui.status(_('remote: %s\n') % exc)
917 pushop.ui.status(_('remote: %s\n') % exc)
918 if exc.hint is not None:
918 if exc.hint is not None:
919 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
919 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
920 raise error.Abort(_('push failed on remote'))
920 raise error.Abort(_('push failed on remote'))
921 except error.PushkeyFailed as exc:
921 except error.PushkeyFailed as exc:
922 partid = int(exc.partid)
922 partid = int(exc.partid)
923 if partid not in pushop.pkfailcb:
923 if partid not in pushop.pkfailcb:
924 raise
924 raise
925 pushop.pkfailcb[partid](pushop, exc)
925 pushop.pkfailcb[partid](pushop, exc)
926 for rephand in replyhandlers:
926 for rephand in replyhandlers:
927 rephand(op)
927 rephand(op)
928
928
929 def _pushchangeset(pushop):
929 def _pushchangeset(pushop):
930 """Make the actual push of changeset bundle to remote repo"""
930 """Make the actual push of changeset bundle to remote repo"""
931 if 'changesets' in pushop.stepsdone:
931 if 'changesets' in pushop.stepsdone:
932 return
932 return
933 pushop.stepsdone.add('changesets')
933 pushop.stepsdone.add('changesets')
934 if not _pushcheckoutgoing(pushop):
934 if not _pushcheckoutgoing(pushop):
935 return
935 return
936 pushop.repo.prepushoutgoinghooks(pushop)
936 pushop.repo.prepushoutgoinghooks(pushop)
937 outgoing = pushop.outgoing
937 outgoing = pushop.outgoing
938 unbundle = pushop.remote.capable('unbundle')
938 unbundle = pushop.remote.capable('unbundle')
939 # create a changegroup from local
939 # create a changegroup from local
940 if pushop.revs is None and not (outgoing.excluded
940 if pushop.revs is None and not (outgoing.excluded
941 or pushop.repo.changelog.filteredrevs):
941 or pushop.repo.changelog.filteredrevs):
942 # push everything,
942 # push everything,
943 # use the fast path, no race possible on push
943 # use the fast path, no race possible on push
944 bundler = changegroup.cg1packer(pushop.repo)
944 bundler = changegroup.cg1packer(pushop.repo)
945 cg = changegroup.getsubset(pushop.repo,
945 cg = changegroup.getsubset(pushop.repo,
946 outgoing,
946 outgoing,
947 bundler,
947 bundler,
948 'push',
948 'push',
949 fastpath=True)
949 fastpath=True)
950 else:
950 else:
951 cg = changegroup.getchangegroup(pushop.repo, 'push', outgoing)
951 cg = changegroup.getchangegroup(pushop.repo, 'push', outgoing)
952
952
953 # apply changegroup to remote
953 # apply changegroup to remote
954 if unbundle:
954 if unbundle:
955 # local repo finds heads on server, finds out what
955 # local repo finds heads on server, finds out what
956 # revs it must push. once revs transferred, if server
956 # revs it must push. once revs transferred, if server
957 # finds it has different heads (someone else won
957 # finds it has different heads (someone else won
958 # commit/push race), server aborts.
958 # commit/push race), server aborts.
959 if pushop.force:
959 if pushop.force:
960 remoteheads = ['force']
960 remoteheads = ['force']
961 else:
961 else:
962 remoteheads = pushop.remoteheads
962 remoteheads = pushop.remoteheads
963 # ssh: return remote's addchangegroup()
963 # ssh: return remote's addchangegroup()
964 # http: return remote's addchangegroup() or 0 for error
964 # http: return remote's addchangegroup() or 0 for error
965 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
965 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
966 pushop.repo.url())
966 pushop.repo.url())
967 else:
967 else:
968 # we return an integer indicating remote head count
968 # we return an integer indicating remote head count
969 # change
969 # change
970 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
970 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
971 pushop.repo.url())
971 pushop.repo.url())
972
972
973 def _pushsyncphase(pushop):
973 def _pushsyncphase(pushop):
974 """synchronise phase information locally and remotely"""
974 """synchronise phase information locally and remotely"""
975 cheads = pushop.commonheads
975 cheads = pushop.commonheads
976 # even when we don't push, exchanging phase data is useful
976 # even when we don't push, exchanging phase data is useful
977 remotephases = pushop.remote.listkeys('phases')
977 remotephases = pushop.remote.listkeys('phases')
978 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
978 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
979 and remotephases # server supports phases
979 and remotephases # server supports phases
980 and pushop.cgresult is None # nothing was pushed
980 and pushop.cgresult is None # nothing was pushed
981 and remotephases.get('publishing', False)):
981 and remotephases.get('publishing', False)):
982 # When:
982 # When:
983 # - this is a subrepo push
983 # - this is a subrepo push
984 # - and remote support phase
984 # - and remote support phase
985 # - and no changeset was pushed
985 # - and no changeset was pushed
986 # - and remote is publishing
986 # - and remote is publishing
987 # We may be in issue 3871 case!
987 # We may be in issue 3871 case!
988 # We drop the possible phase synchronisation done by
988 # We drop the possible phase synchronisation done by
989 # courtesy to publish changesets possibly locally draft
989 # courtesy to publish changesets possibly locally draft
990 # on the remote.
990 # on the remote.
991 remotephases = {'publishing': 'True'}
991 remotephases = {'publishing': 'True'}
992 if not remotephases: # old server or public only reply from non-publishing
992 if not remotephases: # old server or public only reply from non-publishing
993 _localphasemove(pushop, cheads)
993 _localphasemove(pushop, cheads)
994 # don't push any phase data as there is nothing to push
994 # don't push any phase data as there is nothing to push
995 else:
995 else:
996 ana = phases.analyzeremotephases(pushop.repo, cheads,
996 ana = phases.analyzeremotephases(pushop.repo, cheads,
997 remotephases)
997 remotephases)
998 pheads, droots = ana
998 pheads, droots = ana
999 ### Apply remote phase on local
999 ### Apply remote phase on local
1000 if remotephases.get('publishing', False):
1000 if remotephases.get('publishing', False):
1001 _localphasemove(pushop, cheads)
1001 _localphasemove(pushop, cheads)
1002 else: # publish = False
1002 else: # publish = False
1003 _localphasemove(pushop, pheads)
1003 _localphasemove(pushop, pheads)
1004 _localphasemove(pushop, cheads, phases.draft)
1004 _localphasemove(pushop, cheads, phases.draft)
1005 ### Apply local phase on remote
1005 ### Apply local phase on remote
1006
1006
1007 if pushop.cgresult:
1007 if pushop.cgresult:
1008 if 'phases' in pushop.stepsdone:
1008 if 'phases' in pushop.stepsdone:
1009 # phases already pushed though bundle2
1009 # phases already pushed though bundle2
1010 return
1010 return
1011 outdated = pushop.outdatedphases
1011 outdated = pushop.outdatedphases
1012 else:
1012 else:
1013 outdated = pushop.fallbackoutdatedphases
1013 outdated = pushop.fallbackoutdatedphases
1014
1014
1015 pushop.stepsdone.add('phases')
1015 pushop.stepsdone.add('phases')
1016
1016
1017 # filter heads already turned public by the push
1017 # filter heads already turned public by the push
1018 outdated = [c for c in outdated if c.node() not in pheads]
1018 outdated = [c for c in outdated if c.node() not in pheads]
1019 # fallback to independent pushkey command
1019 # fallback to independent pushkey command
1020 for newremotehead in outdated:
1020 for newremotehead in outdated:
1021 r = pushop.remote.pushkey('phases',
1021 r = pushop.remote.pushkey('phases',
1022 newremotehead.hex(),
1022 newremotehead.hex(),
1023 str(phases.draft),
1023 str(phases.draft),
1024 str(phases.public))
1024 str(phases.public))
1025 if not r:
1025 if not r:
1026 pushop.ui.warn(_('updating %s to public failed!\n')
1026 pushop.ui.warn(_('updating %s to public failed!\n')
1027 % newremotehead)
1027 % newremotehead)
1028
1028
1029 def _localphasemove(pushop, nodes, phase=phases.public):
1029 def _localphasemove(pushop, nodes, phase=phases.public):
1030 """move <nodes> to <phase> in the local source repo"""
1030 """move <nodes> to <phase> in the local source repo"""
1031 if pushop.trmanager:
1031 if pushop.trmanager:
1032 phases.advanceboundary(pushop.repo,
1032 phases.advanceboundary(pushop.repo,
1033 pushop.trmanager.transaction(),
1033 pushop.trmanager.transaction(),
1034 phase,
1034 phase,
1035 nodes)
1035 nodes)
1036 else:
1036 else:
1037 # repo is not locked, do not change any phases!
1037 # repo is not locked, do not change any phases!
1038 # Informs the user that phases should have been moved when
1038 # Informs the user that phases should have been moved when
1039 # applicable.
1039 # applicable.
1040 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1040 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1041 phasestr = phases.phasenames[phase]
1041 phasestr = phases.phasenames[phase]
1042 if actualmoves:
1042 if actualmoves:
1043 pushop.ui.status(_('cannot lock source repo, skipping '
1043 pushop.ui.status(_('cannot lock source repo, skipping '
1044 'local %s phase update\n') % phasestr)
1044 'local %s phase update\n') % phasestr)
1045
1045
1046 def _pushobsolete(pushop):
1046 def _pushobsolete(pushop):
1047 """utility function to push obsolete markers to a remote"""
1047 """utility function to push obsolete markers to a remote"""
1048 if 'obsmarkers' in pushop.stepsdone:
1048 if 'obsmarkers' in pushop.stepsdone:
1049 return
1049 return
1050 repo = pushop.repo
1050 repo = pushop.repo
1051 remote = pushop.remote
1051 remote = pushop.remote
1052 pushop.stepsdone.add('obsmarkers')
1052 pushop.stepsdone.add('obsmarkers')
1053 if pushop.outobsmarkers:
1053 if pushop.outobsmarkers:
1054 pushop.ui.debug('try to push obsolete markers to remote\n')
1054 pushop.ui.debug('try to push obsolete markers to remote\n')
1055 rslts = []
1055 rslts = []
1056 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1056 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1057 for key in sorted(remotedata, reverse=True):
1057 for key in sorted(remotedata, reverse=True):
1058 # reverse sort to ensure we end with dump0
1058 # reverse sort to ensure we end with dump0
1059 data = remotedata[key]
1059 data = remotedata[key]
1060 rslts.append(remote.pushkey('obsolete', key, '', data))
1060 rslts.append(remote.pushkey('obsolete', key, '', data))
1061 if [r for r in rslts if not r]:
1061 if [r for r in rslts if not r]:
1062 msg = _('failed to push some obsolete markers!\n')
1062 msg = _('failed to push some obsolete markers!\n')
1063 repo.ui.warn(msg)
1063 repo.ui.warn(msg)
1064
1064
1065 def _pushbookmark(pushop):
1065 def _pushbookmark(pushop):
1066 """Update bookmark position on remote"""
1066 """Update bookmark position on remote"""
1067 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1067 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1068 return
1068 return
1069 pushop.stepsdone.add('bookmarks')
1069 pushop.stepsdone.add('bookmarks')
1070 ui = pushop.ui
1070 ui = pushop.ui
1071 remote = pushop.remote
1071 remote = pushop.remote
1072
1072
1073 for b, old, new in pushop.outbookmarks:
1073 for b, old, new in pushop.outbookmarks:
1074 action = 'update'
1074 action = 'update'
1075 if not old:
1075 if not old:
1076 action = 'export'
1076 action = 'export'
1077 elif not new:
1077 elif not new:
1078 action = 'delete'
1078 action = 'delete'
1079 if remote.pushkey('bookmarks', b, old, new):
1079 if remote.pushkey('bookmarks', b, old, new):
1080 ui.status(bookmsgmap[action][0] % b)
1080 ui.status(bookmsgmap[action][0] % b)
1081 else:
1081 else:
1082 ui.warn(bookmsgmap[action][1] % b)
1082 ui.warn(bookmsgmap[action][1] % b)
1083 # discovery can have set the value form invalid entry
1083 # discovery can have set the value form invalid entry
1084 if pushop.bkresult is not None:
1084 if pushop.bkresult is not None:
1085 pushop.bkresult = 1
1085 pushop.bkresult = 1
1086
1086
1087 class pulloperation(object):
1087 class pulloperation(object):
1088 """A object that represent a single pull operation
1088 """A object that represent a single pull operation
1089
1089
1090 It purpose is to carry pull related state and very common operation.
1090 It purpose is to carry pull related state and very common operation.
1091
1091
1092 A new should be created at the beginning of each pull and discarded
1092 A new should be created at the beginning of each pull and discarded
1093 afterward.
1093 afterward.
1094 """
1094 """
1095
1095
1096 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1096 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1097 remotebookmarks=None, streamclonerequested=None):
1097 remotebookmarks=None, streamclonerequested=None):
1098 # repo we pull into
1098 # repo we pull into
1099 self.repo = repo
1099 self.repo = repo
1100 # repo we pull from
1100 # repo we pull from
1101 self.remote = remote
1101 self.remote = remote
1102 # revision we try to pull (None is "all")
1102 # revision we try to pull (None is "all")
1103 self.heads = heads
1103 self.heads = heads
1104 # bookmark pulled explicitly
1104 # bookmark pulled explicitly
1105 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1105 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1106 for bookmark in bookmarks]
1106 for bookmark in bookmarks]
1107 # do we force pull?
1107 # do we force pull?
1108 self.force = force
1108 self.force = force
1109 # whether a streaming clone was requested
1109 # whether a streaming clone was requested
1110 self.streamclonerequested = streamclonerequested
1110 self.streamclonerequested = streamclonerequested
1111 # transaction manager
1111 # transaction manager
1112 self.trmanager = None
1112 self.trmanager = None
1113 # set of common changeset between local and remote before pull
1113 # set of common changeset between local and remote before pull
1114 self.common = None
1114 self.common = None
1115 # set of pulled head
1115 # set of pulled head
1116 self.rheads = None
1116 self.rheads = None
1117 # list of missing changeset to fetch remotely
1117 # list of missing changeset to fetch remotely
1118 self.fetch = None
1118 self.fetch = None
1119 # remote bookmarks data
1119 # remote bookmarks data
1120 self.remotebookmarks = remotebookmarks
1120 self.remotebookmarks = remotebookmarks
1121 # result of changegroup pulling (used as return code by pull)
1121 # result of changegroup pulling (used as return code by pull)
1122 self.cgresult = None
1122 self.cgresult = None
1123 # list of step already done
1123 # list of step already done
1124 self.stepsdone = set()
1124 self.stepsdone = set()
1125 # Whether we attempted a clone from pre-generated bundles.
1125 # Whether we attempted a clone from pre-generated bundles.
1126 self.clonebundleattempted = False
1126 self.clonebundleattempted = False
1127
1127
1128 @util.propertycache
1128 @util.propertycache
1129 def pulledsubset(self):
1129 def pulledsubset(self):
1130 """heads of the set of changeset target by the pull"""
1130 """heads of the set of changeset target by the pull"""
1131 # compute target subset
1131 # compute target subset
1132 if self.heads is None:
1132 if self.heads is None:
1133 # We pulled every thing possible
1133 # We pulled every thing possible
1134 # sync on everything common
1134 # sync on everything common
1135 c = set(self.common)
1135 c = set(self.common)
1136 ret = list(self.common)
1136 ret = list(self.common)
1137 for n in self.rheads:
1137 for n in self.rheads:
1138 if n not in c:
1138 if n not in c:
1139 ret.append(n)
1139 ret.append(n)
1140 return ret
1140 return ret
1141 else:
1141 else:
1142 # We pulled a specific subset
1142 # We pulled a specific subset
1143 # sync on this subset
1143 # sync on this subset
1144 return self.heads
1144 return self.heads
1145
1145
1146 @util.propertycache
1146 @util.propertycache
1147 def canusebundle2(self):
1147 def canusebundle2(self):
1148 return not _forcebundle1(self)
1148 return not _forcebundle1(self)
1149
1149
1150 @util.propertycache
1150 @util.propertycache
1151 def remotebundle2caps(self):
1151 def remotebundle2caps(self):
1152 return bundle2.bundle2caps(self.remote)
1152 return bundle2.bundle2caps(self.remote)
1153
1153
1154 def gettransaction(self):
1154 def gettransaction(self):
1155 # deprecated; talk to trmanager directly
1155 # deprecated; talk to trmanager directly
1156 return self.trmanager.transaction()
1156 return self.trmanager.transaction()
1157
1157
1158 class transactionmanager(object):
1158 class transactionmanager(object):
1159 """An object to manage the life cycle of a transaction
1159 """An object to manage the life cycle of a transaction
1160
1160
1161 It creates the transaction on demand and calls the appropriate hooks when
1161 It creates the transaction on demand and calls the appropriate hooks when
1162 closing the transaction."""
1162 closing the transaction."""
1163 def __init__(self, repo, source, url):
1163 def __init__(self, repo, source, url):
1164 self.repo = repo
1164 self.repo = repo
1165 self.source = source
1165 self.source = source
1166 self.url = url
1166 self.url = url
1167 self._tr = None
1167 self._tr = None
1168
1168
1169 def transaction(self):
1169 def transaction(self):
1170 """Return an open transaction object, constructing if necessary"""
1170 """Return an open transaction object, constructing if necessary"""
1171 if not self._tr:
1171 if not self._tr:
1172 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1172 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1173 self._tr = self.repo.transaction(trname)
1173 self._tr = self.repo.transaction(trname)
1174 self._tr.hookargs['source'] = self.source
1174 self._tr.hookargs['source'] = self.source
1175 self._tr.hookargs['url'] = self.url
1175 self._tr.hookargs['url'] = self.url
1176 return self._tr
1176 return self._tr
1177
1177
1178 def close(self):
1178 def close(self):
1179 """close transaction if created"""
1179 """close transaction if created"""
1180 if self._tr is not None:
1180 if self._tr is not None:
1181 self._tr.close()
1181 self._tr.close()
1182
1182
1183 def release(self):
1183 def release(self):
1184 """release transaction if created"""
1184 """release transaction if created"""
1185 if self._tr is not None:
1185 if self._tr is not None:
1186 self._tr.release()
1186 self._tr.release()
1187
1187
1188 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1188 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1189 streamclonerequested=None):
1189 streamclonerequested=None):
1190 """Fetch repository data from a remote.
1190 """Fetch repository data from a remote.
1191
1191
1192 This is the main function used to retrieve data from a remote repository.
1192 This is the main function used to retrieve data from a remote repository.
1193
1193
1194 ``repo`` is the local repository to clone into.
1194 ``repo`` is the local repository to clone into.
1195 ``remote`` is a peer instance.
1195 ``remote`` is a peer instance.
1196 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1196 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1197 default) means to pull everything from the remote.
1197 default) means to pull everything from the remote.
1198 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1198 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1199 default, all remote bookmarks are pulled.
1199 default, all remote bookmarks are pulled.
1200 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1200 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1201 initialization.
1201 initialization.
1202 ``streamclonerequested`` is a boolean indicating whether a "streaming
1202 ``streamclonerequested`` is a boolean indicating whether a "streaming
1203 clone" is requested. A "streaming clone" is essentially a raw file copy
1203 clone" is requested. A "streaming clone" is essentially a raw file copy
1204 of revlogs from the server. This only works when the local repository is
1204 of revlogs from the server. This only works when the local repository is
1205 empty. The default value of ``None`` means to respect the server
1205 empty. The default value of ``None`` means to respect the server
1206 configuration for preferring stream clones.
1206 configuration for preferring stream clones.
1207
1207
1208 Returns the ``pulloperation`` created for this pull.
1208 Returns the ``pulloperation`` created for this pull.
1209 """
1209 """
1210 if opargs is None:
1210 if opargs is None:
1211 opargs = {}
1211 opargs = {}
1212 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1212 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1213 streamclonerequested=streamclonerequested, **opargs)
1213 streamclonerequested=streamclonerequested, **opargs)
1214 if pullop.remote.local():
1214 if pullop.remote.local():
1215 missing = set(pullop.remote.requirements) - pullop.repo.supported
1215 missing = set(pullop.remote.requirements) - pullop.repo.supported
1216 if missing:
1216 if missing:
1217 msg = _("required features are not"
1217 msg = _("required features are not"
1218 " supported in the destination:"
1218 " supported in the destination:"
1219 " %s") % (', '.join(sorted(missing)))
1219 " %s") % (', '.join(sorted(missing)))
1220 raise error.Abort(msg)
1220 raise error.Abort(msg)
1221
1221
1222 wlock = lock = None
1222 wlock = lock = None
1223 try:
1223 try:
1224 wlock = pullop.repo.wlock()
1224 wlock = pullop.repo.wlock()
1225 lock = pullop.repo.lock()
1225 lock = pullop.repo.lock()
1226 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1226 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1227 streamclone.maybeperformlegacystreamclone(pullop)
1227 streamclone.maybeperformlegacystreamclone(pullop)
1228 # This should ideally be in _pullbundle2(). However, it needs to run
1228 # This should ideally be in _pullbundle2(). However, it needs to run
1229 # before discovery to avoid extra work.
1229 # before discovery to avoid extra work.
1230 _maybeapplyclonebundle(pullop)
1230 _maybeapplyclonebundle(pullop)
1231 _pulldiscovery(pullop)
1231 _pulldiscovery(pullop)
1232 if pullop.canusebundle2:
1232 if pullop.canusebundle2:
1233 _pullbundle2(pullop)
1233 _pullbundle2(pullop)
1234 _pullchangeset(pullop)
1234 _pullchangeset(pullop)
1235 _pullphase(pullop)
1235 _pullphase(pullop)
1236 _pullbookmarks(pullop)
1236 _pullbookmarks(pullop)
1237 _pullobsolete(pullop)
1237 _pullobsolete(pullop)
1238 pullop.trmanager.close()
1238 pullop.trmanager.close()
1239 finally:
1239 finally:
1240 lockmod.release(pullop.trmanager, lock, wlock)
1240 lockmod.release(pullop.trmanager, lock, wlock)
1241
1241
1242 return pullop
1242 return pullop
1243
1243
1244 # list of steps to perform discovery before pull
1244 # list of steps to perform discovery before pull
1245 pulldiscoveryorder = []
1245 pulldiscoveryorder = []
1246
1246
1247 # Mapping between step name and function
1247 # Mapping between step name and function
1248 #
1248 #
1249 # This exists to help extensions wrap steps if necessary
1249 # This exists to help extensions wrap steps if necessary
1250 pulldiscoverymapping = {}
1250 pulldiscoverymapping = {}
1251
1251
1252 def pulldiscovery(stepname):
1252 def pulldiscovery(stepname):
1253 """decorator for function performing discovery before pull
1253 """decorator for function performing discovery before pull
1254
1254
1255 The function is added to the step -> function mapping and appended to the
1255 The function is added to the step -> function mapping and appended to the
1256 list of steps. Beware that decorated function will be added in order (this
1256 list of steps. Beware that decorated function will be added in order (this
1257 may matter).
1257 may matter).
1258
1258
1259 You can only use this decorator for a new step, if you want to wrap a step
1259 You can only use this decorator for a new step, if you want to wrap a step
1260 from an extension, change the pulldiscovery dictionary directly."""
1260 from an extension, change the pulldiscovery dictionary directly."""
1261 def dec(func):
1261 def dec(func):
1262 assert stepname not in pulldiscoverymapping
1262 assert stepname not in pulldiscoverymapping
1263 pulldiscoverymapping[stepname] = func
1263 pulldiscoverymapping[stepname] = func
1264 pulldiscoveryorder.append(stepname)
1264 pulldiscoveryorder.append(stepname)
1265 return func
1265 return func
1266 return dec
1266 return dec
1267
1267
1268 def _pulldiscovery(pullop):
1268 def _pulldiscovery(pullop):
1269 """Run all discovery steps"""
1269 """Run all discovery steps"""
1270 for stepname in pulldiscoveryorder:
1270 for stepname in pulldiscoveryorder:
1271 step = pulldiscoverymapping[stepname]
1271 step = pulldiscoverymapping[stepname]
1272 step(pullop)
1272 step(pullop)
1273
1273
1274 @pulldiscovery('b1:bookmarks')
1274 @pulldiscovery('b1:bookmarks')
1275 def _pullbookmarkbundle1(pullop):
1275 def _pullbookmarkbundle1(pullop):
1276 """fetch bookmark data in bundle1 case
1276 """fetch bookmark data in bundle1 case
1277
1277
1278 If not using bundle2, we have to fetch bookmarks before changeset
1278 If not using bundle2, we have to fetch bookmarks before changeset
1279 discovery to reduce the chance and impact of race conditions."""
1279 discovery to reduce the chance and impact of race conditions."""
1280 if pullop.remotebookmarks is not None:
1280 if pullop.remotebookmarks is not None:
1281 return
1281 return
1282 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1282 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1283 # all known bundle2 servers now support listkeys, but lets be nice with
1283 # all known bundle2 servers now support listkeys, but lets be nice with
1284 # new implementation.
1284 # new implementation.
1285 return
1285 return
1286 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1286 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1287
1287
1288
1288
1289 @pulldiscovery('changegroup')
1289 @pulldiscovery('changegroup')
1290 def _pulldiscoverychangegroup(pullop):
1290 def _pulldiscoverychangegroup(pullop):
1291 """discovery phase for the pull
1291 """discovery phase for the pull
1292
1292
1293 Current handle changeset discovery only, will change handle all discovery
1293 Current handle changeset discovery only, will change handle all discovery
1294 at some point."""
1294 at some point."""
1295 tmp = discovery.findcommonincoming(pullop.repo,
1295 tmp = discovery.findcommonincoming(pullop.repo,
1296 pullop.remote,
1296 pullop.remote,
1297 heads=pullop.heads,
1297 heads=pullop.heads,
1298 force=pullop.force)
1298 force=pullop.force)
1299 common, fetch, rheads = tmp
1299 common, fetch, rheads = tmp
1300 nm = pullop.repo.unfiltered().changelog.nodemap
1300 nm = pullop.repo.unfiltered().changelog.nodemap
1301 if fetch and rheads:
1301 if fetch and rheads:
1302 # If a remote heads in filtered locally, lets drop it from the unknown
1302 # If a remote heads in filtered locally, lets drop it from the unknown
1303 # remote heads and put in back in common.
1303 # remote heads and put in back in common.
1304 #
1304 #
1305 # This is a hackish solution to catch most of "common but locally
1305 # This is a hackish solution to catch most of "common but locally
1306 # hidden situation". We do not performs discovery on unfiltered
1306 # hidden situation". We do not performs discovery on unfiltered
1307 # repository because it end up doing a pathological amount of round
1307 # repository because it end up doing a pathological amount of round
1308 # trip for w huge amount of changeset we do not care about.
1308 # trip for w huge amount of changeset we do not care about.
1309 #
1309 #
1310 # If a set of such "common but filtered" changeset exist on the server
1310 # If a set of such "common but filtered" changeset exist on the server
1311 # but are not including a remote heads, we'll not be able to detect it,
1311 # but are not including a remote heads, we'll not be able to detect it,
1312 scommon = set(common)
1312 scommon = set(common)
1313 filteredrheads = []
1313 filteredrheads = []
1314 for n in rheads:
1314 for n in rheads:
1315 if n in nm:
1315 if n in nm:
1316 if n not in scommon:
1316 if n not in scommon:
1317 common.append(n)
1317 common.append(n)
1318 else:
1318 else:
1319 filteredrheads.append(n)
1319 filteredrheads.append(n)
1320 if not filteredrheads:
1320 if not filteredrheads:
1321 fetch = []
1321 fetch = []
1322 rheads = filteredrheads
1322 rheads = filteredrheads
1323 pullop.common = common
1323 pullop.common = common
1324 pullop.fetch = fetch
1324 pullop.fetch = fetch
1325 pullop.rheads = rheads
1325 pullop.rheads = rheads
1326
1326
1327 def _pullbundle2(pullop):
1327 def _pullbundle2(pullop):
1328 """pull data using bundle2
1328 """pull data using bundle2
1329
1329
1330 For now, the only supported data are changegroup."""
1330 For now, the only supported data are changegroup."""
1331 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1331 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1332
1332
1333 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1333 # At the moment we don't do stream clones over bundle2. If that is
1334 # implemented then here's where the check for that will go.
1335 streaming = False
1334
1336
1335 # pulling changegroup
1337 # pulling changegroup
1336 pullop.stepsdone.add('changegroup')
1338 pullop.stepsdone.add('changegroup')
1337
1339
1338 kwargs['common'] = pullop.common
1340 kwargs['common'] = pullop.common
1339 kwargs['heads'] = pullop.heads or pullop.rheads
1341 kwargs['heads'] = pullop.heads or pullop.rheads
1340 kwargs['cg'] = pullop.fetch
1342 kwargs['cg'] = pullop.fetch
1341 if 'listkeys' in pullop.remotebundle2caps:
1343 if 'listkeys' in pullop.remotebundle2caps:
1342 kwargs['listkeys'] = ['phases']
1344 kwargs['listkeys'] = ['phases']
1343 if pullop.remotebookmarks is None:
1345 if pullop.remotebookmarks is None:
1344 # make sure to always includes bookmark data when migrating
1346 # make sure to always includes bookmark data when migrating
1345 # `hg incoming --bundle` to using this function.
1347 # `hg incoming --bundle` to using this function.
1346 kwargs['listkeys'].append('bookmarks')
1348 kwargs['listkeys'].append('bookmarks')
1347
1349
1348 # If this is a full pull / clone and the server supports the clone bundles
1350 # If this is a full pull / clone and the server supports the clone bundles
1349 # feature, tell the server whether we attempted a clone bundle. The
1351 # feature, tell the server whether we attempted a clone bundle. The
1350 # presence of this flag indicates the client supports clone bundles. This
1352 # presence of this flag indicates the client supports clone bundles. This
1351 # will enable the server to treat clients that support clone bundles
1353 # will enable the server to treat clients that support clone bundles
1352 # differently from those that don't.
1354 # differently from those that don't.
1353 if (pullop.remote.capable('clonebundles')
1355 if (pullop.remote.capable('clonebundles')
1354 and pullop.heads is None and list(pullop.common) == [nullid]):
1356 and pullop.heads is None and list(pullop.common) == [nullid]):
1355 kwargs['cbattempted'] = pullop.clonebundleattempted
1357 kwargs['cbattempted'] = pullop.clonebundleattempted
1356
1358
1357 if streaming:
1359 if streaming:
1358 pullop.repo.ui.status(_('streaming all changes\n'))
1360 pullop.repo.ui.status(_('streaming all changes\n'))
1359 elif not pullop.fetch:
1361 elif not pullop.fetch:
1360 pullop.repo.ui.status(_("no changes found\n"))
1362 pullop.repo.ui.status(_("no changes found\n"))
1361 pullop.cgresult = 0
1363 pullop.cgresult = 0
1362 else:
1364 else:
1363 if pullop.heads is None and list(pullop.common) == [nullid]:
1365 if pullop.heads is None and list(pullop.common) == [nullid]:
1364 pullop.repo.ui.status(_("requesting all changes\n"))
1366 pullop.repo.ui.status(_("requesting all changes\n"))
1365 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1367 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1366 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1368 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1367 if obsolete.commonversion(remoteversions) is not None:
1369 if obsolete.commonversion(remoteversions) is not None:
1368 kwargs['obsmarkers'] = True
1370 kwargs['obsmarkers'] = True
1369 pullop.stepsdone.add('obsmarkers')
1371 pullop.stepsdone.add('obsmarkers')
1370 _pullbundle2extraprepare(pullop, kwargs)
1372 _pullbundle2extraprepare(pullop, kwargs)
1371 bundle = pullop.remote.getbundle('pull', **kwargs)
1373 bundle = pullop.remote.getbundle('pull', **kwargs)
1372 try:
1374 try:
1373 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1375 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1374 except bundle2.AbortFromPart as exc:
1376 except bundle2.AbortFromPart as exc:
1375 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1377 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1376 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1378 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1377 except error.BundleValueError as exc:
1379 except error.BundleValueError as exc:
1378 raise error.Abort(_('missing support for %s') % exc)
1380 raise error.Abort(_('missing support for %s') % exc)
1379
1381
1380 if pullop.fetch:
1382 if pullop.fetch:
1381 results = [cg['return'] for cg in op.records['changegroup']]
1383 results = [cg['return'] for cg in op.records['changegroup']]
1382 pullop.cgresult = changegroup.combineresults(results)
1384 pullop.cgresult = changegroup.combineresults(results)
1383
1385
1384 # processing phases change
1386 # processing phases change
1385 for namespace, value in op.records['listkeys']:
1387 for namespace, value in op.records['listkeys']:
1386 if namespace == 'phases':
1388 if namespace == 'phases':
1387 _pullapplyphases(pullop, value)
1389 _pullapplyphases(pullop, value)
1388
1390
1389 # processing bookmark update
1391 # processing bookmark update
1390 for namespace, value in op.records['listkeys']:
1392 for namespace, value in op.records['listkeys']:
1391 if namespace == 'bookmarks':
1393 if namespace == 'bookmarks':
1392 pullop.remotebookmarks = value
1394 pullop.remotebookmarks = value
1393
1395
1394 # bookmark data were either already there or pulled in the bundle
1396 # bookmark data were either already there or pulled in the bundle
1395 if pullop.remotebookmarks is not None:
1397 if pullop.remotebookmarks is not None:
1396 _pullbookmarks(pullop)
1398 _pullbookmarks(pullop)
1397
1399
1398 def _pullbundle2extraprepare(pullop, kwargs):
1400 def _pullbundle2extraprepare(pullop, kwargs):
1399 """hook function so that extensions can extend the getbundle call"""
1401 """hook function so that extensions can extend the getbundle call"""
1400 pass
1402 pass
1401
1403
1402 def _pullchangeset(pullop):
1404 def _pullchangeset(pullop):
1403 """pull changeset from unbundle into the local repo"""
1405 """pull changeset from unbundle into the local repo"""
1404 # We delay the open of the transaction as late as possible so we
1406 # We delay the open of the transaction as late as possible so we
1405 # don't open transaction for nothing or you break future useful
1407 # don't open transaction for nothing or you break future useful
1406 # rollback call
1408 # rollback call
1407 if 'changegroup' in pullop.stepsdone:
1409 if 'changegroup' in pullop.stepsdone:
1408 return
1410 return
1409 pullop.stepsdone.add('changegroup')
1411 pullop.stepsdone.add('changegroup')
1410 if not pullop.fetch:
1412 if not pullop.fetch:
1411 pullop.repo.ui.status(_("no changes found\n"))
1413 pullop.repo.ui.status(_("no changes found\n"))
1412 pullop.cgresult = 0
1414 pullop.cgresult = 0
1413 return
1415 return
1414 pullop.gettransaction()
1416 pullop.gettransaction()
1415 if pullop.heads is None and list(pullop.common) == [nullid]:
1417 if pullop.heads is None and list(pullop.common) == [nullid]:
1416 pullop.repo.ui.status(_("requesting all changes\n"))
1418 pullop.repo.ui.status(_("requesting all changes\n"))
1417 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1419 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1418 # issue1320, avoid a race if remote changed after discovery
1420 # issue1320, avoid a race if remote changed after discovery
1419 pullop.heads = pullop.rheads
1421 pullop.heads = pullop.rheads
1420
1422
1421 if pullop.remote.capable('getbundle'):
1423 if pullop.remote.capable('getbundle'):
1422 # TODO: get bundlecaps from remote
1424 # TODO: get bundlecaps from remote
1423 cg = pullop.remote.getbundle('pull', common=pullop.common,
1425 cg = pullop.remote.getbundle('pull', common=pullop.common,
1424 heads=pullop.heads or pullop.rheads)
1426 heads=pullop.heads or pullop.rheads)
1425 elif pullop.heads is None:
1427 elif pullop.heads is None:
1426 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1428 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1427 elif not pullop.remote.capable('changegroupsubset'):
1429 elif not pullop.remote.capable('changegroupsubset'):
1428 raise error.Abort(_("partial pull cannot be done because "
1430 raise error.Abort(_("partial pull cannot be done because "
1429 "other repository doesn't support "
1431 "other repository doesn't support "
1430 "changegroupsubset."))
1432 "changegroupsubset."))
1431 else:
1433 else:
1432 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1434 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1433 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1435 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1434
1436
1435 def _pullphase(pullop):
1437 def _pullphase(pullop):
1436 # Get remote phases data from remote
1438 # Get remote phases data from remote
1437 if 'phases' in pullop.stepsdone:
1439 if 'phases' in pullop.stepsdone:
1438 return
1440 return
1439 remotephases = pullop.remote.listkeys('phases')
1441 remotephases = pullop.remote.listkeys('phases')
1440 _pullapplyphases(pullop, remotephases)
1442 _pullapplyphases(pullop, remotephases)
1441
1443
1442 def _pullapplyphases(pullop, remotephases):
1444 def _pullapplyphases(pullop, remotephases):
1443 """apply phase movement from observed remote state"""
1445 """apply phase movement from observed remote state"""
1444 if 'phases' in pullop.stepsdone:
1446 if 'phases' in pullop.stepsdone:
1445 return
1447 return
1446 pullop.stepsdone.add('phases')
1448 pullop.stepsdone.add('phases')
1447 publishing = bool(remotephases.get('publishing', False))
1449 publishing = bool(remotephases.get('publishing', False))
1448 if remotephases and not publishing:
1450 if remotephases and not publishing:
1449 # remote is new and non-publishing
1451 # remote is new and non-publishing
1450 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1452 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1451 pullop.pulledsubset,
1453 pullop.pulledsubset,
1452 remotephases)
1454 remotephases)
1453 dheads = pullop.pulledsubset
1455 dheads = pullop.pulledsubset
1454 else:
1456 else:
1455 # Remote is old or publishing all common changesets
1457 # Remote is old or publishing all common changesets
1456 # should be seen as public
1458 # should be seen as public
1457 pheads = pullop.pulledsubset
1459 pheads = pullop.pulledsubset
1458 dheads = []
1460 dheads = []
1459 unfi = pullop.repo.unfiltered()
1461 unfi = pullop.repo.unfiltered()
1460 phase = unfi._phasecache.phase
1462 phase = unfi._phasecache.phase
1461 rev = unfi.changelog.nodemap.get
1463 rev = unfi.changelog.nodemap.get
1462 public = phases.public
1464 public = phases.public
1463 draft = phases.draft
1465 draft = phases.draft
1464
1466
1465 # exclude changesets already public locally and update the others
1467 # exclude changesets already public locally and update the others
1466 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1468 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1467 if pheads:
1469 if pheads:
1468 tr = pullop.gettransaction()
1470 tr = pullop.gettransaction()
1469 phases.advanceboundary(pullop.repo, tr, public, pheads)
1471 phases.advanceboundary(pullop.repo, tr, public, pheads)
1470
1472
1471 # exclude changesets already draft locally and update the others
1473 # exclude changesets already draft locally and update the others
1472 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1474 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1473 if dheads:
1475 if dheads:
1474 tr = pullop.gettransaction()
1476 tr = pullop.gettransaction()
1475 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1477 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1476
1478
1477 def _pullbookmarks(pullop):
1479 def _pullbookmarks(pullop):
1478 """process the remote bookmark information to update the local one"""
1480 """process the remote bookmark information to update the local one"""
1479 if 'bookmarks' in pullop.stepsdone:
1481 if 'bookmarks' in pullop.stepsdone:
1480 return
1482 return
1481 pullop.stepsdone.add('bookmarks')
1483 pullop.stepsdone.add('bookmarks')
1482 repo = pullop.repo
1484 repo = pullop.repo
1483 remotebookmarks = pullop.remotebookmarks
1485 remotebookmarks = pullop.remotebookmarks
1484 remotebookmarks = bookmod.unhexlifybookmarks(remotebookmarks)
1486 remotebookmarks = bookmod.unhexlifybookmarks(remotebookmarks)
1485 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1487 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1486 pullop.remote.url(),
1488 pullop.remote.url(),
1487 pullop.gettransaction,
1489 pullop.gettransaction,
1488 explicit=pullop.explicitbookmarks)
1490 explicit=pullop.explicitbookmarks)
1489
1491
1490 def _pullobsolete(pullop):
1492 def _pullobsolete(pullop):
1491 """utility function to pull obsolete markers from a remote
1493 """utility function to pull obsolete markers from a remote
1492
1494
1493 The `gettransaction` is function that return the pull transaction, creating
1495 The `gettransaction` is function that return the pull transaction, creating
1494 one if necessary. We return the transaction to inform the calling code that
1496 one if necessary. We return the transaction to inform the calling code that
1495 a new transaction have been created (when applicable).
1497 a new transaction have been created (when applicable).
1496
1498
1497 Exists mostly to allow overriding for experimentation purpose"""
1499 Exists mostly to allow overriding for experimentation purpose"""
1498 if 'obsmarkers' in pullop.stepsdone:
1500 if 'obsmarkers' in pullop.stepsdone:
1499 return
1501 return
1500 pullop.stepsdone.add('obsmarkers')
1502 pullop.stepsdone.add('obsmarkers')
1501 tr = None
1503 tr = None
1502 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1504 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1503 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1505 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1504 remoteobs = pullop.remote.listkeys('obsolete')
1506 remoteobs = pullop.remote.listkeys('obsolete')
1505 if 'dump0' in remoteobs:
1507 if 'dump0' in remoteobs:
1506 tr = pullop.gettransaction()
1508 tr = pullop.gettransaction()
1507 markers = []
1509 markers = []
1508 for key in sorted(remoteobs, reverse=True):
1510 for key in sorted(remoteobs, reverse=True):
1509 if key.startswith('dump'):
1511 if key.startswith('dump'):
1510 data = util.b85decode(remoteobs[key])
1512 data = util.b85decode(remoteobs[key])
1511 version, newmarks = obsolete._readmarkers(data)
1513 version, newmarks = obsolete._readmarkers(data)
1512 markers += newmarks
1514 markers += newmarks
1513 if markers:
1515 if markers:
1514 pullop.repo.obsstore.add(tr, markers)
1516 pullop.repo.obsstore.add(tr, markers)
1515 pullop.repo.invalidatevolatilesets()
1517 pullop.repo.invalidatevolatilesets()
1516 return tr
1518 return tr
1517
1519
1518 def caps20to10(repo):
1520 def caps20to10(repo):
1519 """return a set with appropriate options to use bundle20 during getbundle"""
1521 """return a set with appropriate options to use bundle20 during getbundle"""
1520 caps = set(['HG20'])
1522 caps = set(['HG20'])
1521 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1523 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1522 caps.add('bundle2=' + urlreq.quote(capsblob))
1524 caps.add('bundle2=' + urlreq.quote(capsblob))
1523 return caps
1525 return caps
1524
1526
1525 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1527 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1526 getbundle2partsorder = []
1528 getbundle2partsorder = []
1527
1529
1528 # Mapping between step name and function
1530 # Mapping between step name and function
1529 #
1531 #
1530 # This exists to help extensions wrap steps if necessary
1532 # This exists to help extensions wrap steps if necessary
1531 getbundle2partsmapping = {}
1533 getbundle2partsmapping = {}
1532
1534
1533 def getbundle2partsgenerator(stepname, idx=None):
1535 def getbundle2partsgenerator(stepname, idx=None):
1534 """decorator for function generating bundle2 part for getbundle
1536 """decorator for function generating bundle2 part for getbundle
1535
1537
1536 The function is added to the step -> function mapping and appended to the
1538 The function is added to the step -> function mapping and appended to the
1537 list of steps. Beware that decorated functions will be added in order
1539 list of steps. Beware that decorated functions will be added in order
1538 (this may matter).
1540 (this may matter).
1539
1541
1540 You can only use this decorator for new steps, if you want to wrap a step
1542 You can only use this decorator for new steps, if you want to wrap a step
1541 from an extension, attack the getbundle2partsmapping dictionary directly."""
1543 from an extension, attack the getbundle2partsmapping dictionary directly."""
1542 def dec(func):
1544 def dec(func):
1543 assert stepname not in getbundle2partsmapping
1545 assert stepname not in getbundle2partsmapping
1544 getbundle2partsmapping[stepname] = func
1546 getbundle2partsmapping[stepname] = func
1545 if idx is None:
1547 if idx is None:
1546 getbundle2partsorder.append(stepname)
1548 getbundle2partsorder.append(stepname)
1547 else:
1549 else:
1548 getbundle2partsorder.insert(idx, stepname)
1550 getbundle2partsorder.insert(idx, stepname)
1549 return func
1551 return func
1550 return dec
1552 return dec
1551
1553
1552 def bundle2requested(bundlecaps):
1554 def bundle2requested(bundlecaps):
1553 if bundlecaps is not None:
1555 if bundlecaps is not None:
1554 return any(cap.startswith('HG2') for cap in bundlecaps)
1556 return any(cap.startswith('HG2') for cap in bundlecaps)
1555 return False
1557 return False
1556
1558
1557 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1559 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1558 **kwargs):
1560 **kwargs):
1559 """Return chunks constituting a bundle's raw data.
1561 """Return chunks constituting a bundle's raw data.
1560
1562
1561 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1563 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1562 passed.
1564 passed.
1563
1565
1564 Returns an iterator over raw chunks (of varying sizes).
1566 Returns an iterator over raw chunks (of varying sizes).
1565 """
1567 """
1566 usebundle2 = bundle2requested(bundlecaps)
1568 usebundle2 = bundle2requested(bundlecaps)
1567 # bundle10 case
1569 # bundle10 case
1568 if not usebundle2:
1570 if not usebundle2:
1569 if bundlecaps and not kwargs.get('cg', True):
1571 if bundlecaps and not kwargs.get('cg', True):
1570 raise ValueError(_('request for bundle10 must include changegroup'))
1572 raise ValueError(_('request for bundle10 must include changegroup'))
1571
1573
1572 if kwargs:
1574 if kwargs:
1573 raise ValueError(_('unsupported getbundle arguments: %s')
1575 raise ValueError(_('unsupported getbundle arguments: %s')
1574 % ', '.join(sorted(kwargs.keys())))
1576 % ', '.join(sorted(kwargs.keys())))
1575 outgoing = _computeoutgoing(repo, heads, common)
1577 outgoing = _computeoutgoing(repo, heads, common)
1576 bundler = changegroup.getbundler('01', repo)
1578 bundler = changegroup.getbundler('01', repo)
1577 return changegroup.getsubsetraw(repo, outgoing, bundler, source)
1579 return changegroup.getsubsetraw(repo, outgoing, bundler, source)
1578
1580
1579 # bundle20 case
1581 # bundle20 case
1580 b2caps = {}
1582 b2caps = {}
1581 for bcaps in bundlecaps:
1583 for bcaps in bundlecaps:
1582 if bcaps.startswith('bundle2='):
1584 if bcaps.startswith('bundle2='):
1583 blob = urlreq.unquote(bcaps[len('bundle2='):])
1585 blob = urlreq.unquote(bcaps[len('bundle2='):])
1584 b2caps.update(bundle2.decodecaps(blob))
1586 b2caps.update(bundle2.decodecaps(blob))
1585 bundler = bundle2.bundle20(repo.ui, b2caps)
1587 bundler = bundle2.bundle20(repo.ui, b2caps)
1586
1588
1587 kwargs['heads'] = heads
1589 kwargs['heads'] = heads
1588 kwargs['common'] = common
1590 kwargs['common'] = common
1589
1591
1590 for name in getbundle2partsorder:
1592 for name in getbundle2partsorder:
1591 func = getbundle2partsmapping[name]
1593 func = getbundle2partsmapping[name]
1592 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1594 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1593 **kwargs)
1595 **kwargs)
1594
1596
1595 return bundler.getchunks()
1597 return bundler.getchunks()
1596
1598
1597 @getbundle2partsgenerator('changegroup')
1599 @getbundle2partsgenerator('changegroup')
1598 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1600 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1599 b2caps=None, heads=None, common=None, **kwargs):
1601 b2caps=None, heads=None, common=None, **kwargs):
1600 """add a changegroup part to the requested bundle"""
1602 """add a changegroup part to the requested bundle"""
1601 cg = None
1603 cg = None
1602 if kwargs.get('cg', True):
1604 if kwargs.get('cg', True):
1603 # build changegroup bundle here.
1605 # build changegroup bundle here.
1604 version = '01'
1606 version = '01'
1605 cgversions = b2caps.get('changegroup')
1607 cgversions = b2caps.get('changegroup')
1606 if cgversions: # 3.1 and 3.2 ship with an empty value
1608 if cgversions: # 3.1 and 3.2 ship with an empty value
1607 cgversions = [v for v in cgversions
1609 cgversions = [v for v in cgversions
1608 if v in changegroup.supportedoutgoingversions(repo)]
1610 if v in changegroup.supportedoutgoingversions(repo)]
1609 if not cgversions:
1611 if not cgversions:
1610 raise ValueError(_('no common changegroup version'))
1612 raise ValueError(_('no common changegroup version'))
1611 version = max(cgversions)
1613 version = max(cgversions)
1612 outgoing = _computeoutgoing(repo, heads, common)
1614 outgoing = _computeoutgoing(repo, heads, common)
1613 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1615 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1614 version=version)
1616 version=version)
1615
1617
1616 if cg:
1618 if cg:
1617 part = bundler.newpart('changegroup', data=cg)
1619 part = bundler.newpart('changegroup', data=cg)
1618 if cgversions:
1620 if cgversions:
1619 part.addparam('version', version)
1621 part.addparam('version', version)
1620 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1622 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1621 if 'treemanifest' in repo.requirements:
1623 if 'treemanifest' in repo.requirements:
1622 part.addparam('treemanifest', '1')
1624 part.addparam('treemanifest', '1')
1623
1625
1624 @getbundle2partsgenerator('listkeys')
1626 @getbundle2partsgenerator('listkeys')
1625 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1627 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1626 b2caps=None, **kwargs):
1628 b2caps=None, **kwargs):
1627 """add parts containing listkeys namespaces to the requested bundle"""
1629 """add parts containing listkeys namespaces to the requested bundle"""
1628 listkeys = kwargs.get('listkeys', ())
1630 listkeys = kwargs.get('listkeys', ())
1629 for namespace in listkeys:
1631 for namespace in listkeys:
1630 part = bundler.newpart('listkeys')
1632 part = bundler.newpart('listkeys')
1631 part.addparam('namespace', namespace)
1633 part.addparam('namespace', namespace)
1632 keys = repo.listkeys(namespace).items()
1634 keys = repo.listkeys(namespace).items()
1633 part.data = pushkey.encodekeys(keys)
1635 part.data = pushkey.encodekeys(keys)
1634
1636
1635 @getbundle2partsgenerator('obsmarkers')
1637 @getbundle2partsgenerator('obsmarkers')
1636 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1638 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1637 b2caps=None, heads=None, **kwargs):
1639 b2caps=None, heads=None, **kwargs):
1638 """add an obsolescence markers part to the requested bundle"""
1640 """add an obsolescence markers part to the requested bundle"""
1639 if kwargs.get('obsmarkers', False):
1641 if kwargs.get('obsmarkers', False):
1640 if heads is None:
1642 if heads is None:
1641 heads = repo.heads()
1643 heads = repo.heads()
1642 subset = [c.node() for c in repo.set('::%ln', heads)]
1644 subset = [c.node() for c in repo.set('::%ln', heads)]
1643 markers = repo.obsstore.relevantmarkers(subset)
1645 markers = repo.obsstore.relevantmarkers(subset)
1644 markers = sorted(markers)
1646 markers = sorted(markers)
1645 buildobsmarkerspart(bundler, markers)
1647 buildobsmarkerspart(bundler, markers)
1646
1648
1647 @getbundle2partsgenerator('hgtagsfnodes')
1649 @getbundle2partsgenerator('hgtagsfnodes')
1648 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1650 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1649 b2caps=None, heads=None, common=None,
1651 b2caps=None, heads=None, common=None,
1650 **kwargs):
1652 **kwargs):
1651 """Transfer the .hgtags filenodes mapping.
1653 """Transfer the .hgtags filenodes mapping.
1652
1654
1653 Only values for heads in this bundle will be transferred.
1655 Only values for heads in this bundle will be transferred.
1654
1656
1655 The part data consists of pairs of 20 byte changeset node and .hgtags
1657 The part data consists of pairs of 20 byte changeset node and .hgtags
1656 filenodes raw values.
1658 filenodes raw values.
1657 """
1659 """
1658 # Don't send unless:
1660 # Don't send unless:
1659 # - changeset are being exchanged,
1661 # - changeset are being exchanged,
1660 # - the client supports it.
1662 # - the client supports it.
1661 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1663 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1662 return
1664 return
1663
1665
1664 outgoing = _computeoutgoing(repo, heads, common)
1666 outgoing = _computeoutgoing(repo, heads, common)
1665 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1667 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1666
1668
1667 def _getbookmarks(repo, **kwargs):
1669 def _getbookmarks(repo, **kwargs):
1668 """Returns bookmark to node mapping.
1670 """Returns bookmark to node mapping.
1669
1671
1670 This function is primarily used to generate `bookmarks` bundle2 part.
1672 This function is primarily used to generate `bookmarks` bundle2 part.
1671 It is a separate function in order to make it easy to wrap it
1673 It is a separate function in order to make it easy to wrap it
1672 in extensions. Passing `kwargs` to the function makes it easy to
1674 in extensions. Passing `kwargs` to the function makes it easy to
1673 add new parameters in extensions.
1675 add new parameters in extensions.
1674 """
1676 """
1675
1677
1676 return dict(bookmod.listbinbookmarks(repo))
1678 return dict(bookmod.listbinbookmarks(repo))
1677
1679
1678 def check_heads(repo, their_heads, context):
1680 def check_heads(repo, their_heads, context):
1679 """check if the heads of a repo have been modified
1681 """check if the heads of a repo have been modified
1680
1682
1681 Used by peer for unbundling.
1683 Used by peer for unbundling.
1682 """
1684 """
1683 heads = repo.heads()
1685 heads = repo.heads()
1684 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1686 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1685 if not (their_heads == ['force'] or their_heads == heads or
1687 if not (their_heads == ['force'] or their_heads == heads or
1686 their_heads == ['hashed', heads_hash]):
1688 their_heads == ['hashed', heads_hash]):
1687 # someone else committed/pushed/unbundled while we
1689 # someone else committed/pushed/unbundled while we
1688 # were transferring data
1690 # were transferring data
1689 raise error.PushRaced('repository changed while %s - '
1691 raise error.PushRaced('repository changed while %s - '
1690 'please try again' % context)
1692 'please try again' % context)
1691
1693
1692 def unbundle(repo, cg, heads, source, url):
1694 def unbundle(repo, cg, heads, source, url):
1693 """Apply a bundle to a repo.
1695 """Apply a bundle to a repo.
1694
1696
1695 this function makes sure the repo is locked during the application and have
1697 this function makes sure the repo is locked during the application and have
1696 mechanism to check that no push race occurred between the creation of the
1698 mechanism to check that no push race occurred between the creation of the
1697 bundle and its application.
1699 bundle and its application.
1698
1700
1699 If the push was raced as PushRaced exception is raised."""
1701 If the push was raced as PushRaced exception is raised."""
1700 r = 0
1702 r = 0
1701 # need a transaction when processing a bundle2 stream
1703 # need a transaction when processing a bundle2 stream
1702 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1704 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1703 lockandtr = [None, None, None]
1705 lockandtr = [None, None, None]
1704 recordout = None
1706 recordout = None
1705 # quick fix for output mismatch with bundle2 in 3.4
1707 # quick fix for output mismatch with bundle2 in 3.4
1706 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1708 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1707 False)
1709 False)
1708 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1710 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1709 captureoutput = True
1711 captureoutput = True
1710 try:
1712 try:
1711 # note: outside bundle1, 'heads' is expected to be empty and this
1713 # note: outside bundle1, 'heads' is expected to be empty and this
1712 # 'check_heads' call wil be a no-op
1714 # 'check_heads' call wil be a no-op
1713 check_heads(repo, heads, 'uploading changes')
1715 check_heads(repo, heads, 'uploading changes')
1714 # push can proceed
1716 # push can proceed
1715 if not util.safehasattr(cg, 'params'):
1717 if not util.safehasattr(cg, 'params'):
1716 # legacy case: bundle1 (changegroup 01)
1718 # legacy case: bundle1 (changegroup 01)
1717 lockandtr[1] = repo.lock()
1719 lockandtr[1] = repo.lock()
1718 r = cg.apply(repo, source, url)
1720 r = cg.apply(repo, source, url)
1719 else:
1721 else:
1720 r = None
1722 r = None
1721 try:
1723 try:
1722 def gettransaction():
1724 def gettransaction():
1723 if not lockandtr[2]:
1725 if not lockandtr[2]:
1724 lockandtr[0] = repo.wlock()
1726 lockandtr[0] = repo.wlock()
1725 lockandtr[1] = repo.lock()
1727 lockandtr[1] = repo.lock()
1726 lockandtr[2] = repo.transaction(source)
1728 lockandtr[2] = repo.transaction(source)
1727 lockandtr[2].hookargs['source'] = source
1729 lockandtr[2].hookargs['source'] = source
1728 lockandtr[2].hookargs['url'] = url
1730 lockandtr[2].hookargs['url'] = url
1729 lockandtr[2].hookargs['bundle2'] = '1'
1731 lockandtr[2].hookargs['bundle2'] = '1'
1730 return lockandtr[2]
1732 return lockandtr[2]
1731
1733
1732 # Do greedy locking by default until we're satisfied with lazy
1734 # Do greedy locking by default until we're satisfied with lazy
1733 # locking.
1735 # locking.
1734 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1736 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1735 gettransaction()
1737 gettransaction()
1736
1738
1737 op = bundle2.bundleoperation(repo, gettransaction,
1739 op = bundle2.bundleoperation(repo, gettransaction,
1738 captureoutput=captureoutput)
1740 captureoutput=captureoutput)
1739 try:
1741 try:
1740 op = bundle2.processbundle(repo, cg, op=op)
1742 op = bundle2.processbundle(repo, cg, op=op)
1741 finally:
1743 finally:
1742 r = op.reply
1744 r = op.reply
1743 if captureoutput and r is not None:
1745 if captureoutput and r is not None:
1744 repo.ui.pushbuffer(error=True, subproc=True)
1746 repo.ui.pushbuffer(error=True, subproc=True)
1745 def recordout(output):
1747 def recordout(output):
1746 r.newpart('output', data=output, mandatory=False)
1748 r.newpart('output', data=output, mandatory=False)
1747 if lockandtr[2] is not None:
1749 if lockandtr[2] is not None:
1748 lockandtr[2].close()
1750 lockandtr[2].close()
1749 except BaseException as exc:
1751 except BaseException as exc:
1750 exc.duringunbundle2 = True
1752 exc.duringunbundle2 = True
1751 if captureoutput and r is not None:
1753 if captureoutput and r is not None:
1752 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1754 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1753 def recordout(output):
1755 def recordout(output):
1754 part = bundle2.bundlepart('output', data=output,
1756 part = bundle2.bundlepart('output', data=output,
1755 mandatory=False)
1757 mandatory=False)
1756 parts.append(part)
1758 parts.append(part)
1757 raise
1759 raise
1758 finally:
1760 finally:
1759 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1761 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1760 if recordout is not None:
1762 if recordout is not None:
1761 recordout(repo.ui.popbuffer())
1763 recordout(repo.ui.popbuffer())
1762 return r
1764 return r
1763
1765
1764 def _maybeapplyclonebundle(pullop):
1766 def _maybeapplyclonebundle(pullop):
1765 """Apply a clone bundle from a remote, if possible."""
1767 """Apply a clone bundle from a remote, if possible."""
1766
1768
1767 repo = pullop.repo
1769 repo = pullop.repo
1768 remote = pullop.remote
1770 remote = pullop.remote
1769
1771
1770 if not repo.ui.configbool('ui', 'clonebundles', True):
1772 if not repo.ui.configbool('ui', 'clonebundles', True):
1771 return
1773 return
1772
1774
1773 # Only run if local repo is empty.
1775 # Only run if local repo is empty.
1774 if len(repo):
1776 if len(repo):
1775 return
1777 return
1776
1778
1777 if pullop.heads:
1779 if pullop.heads:
1778 return
1780 return
1779
1781
1780 if not remote.capable('clonebundles'):
1782 if not remote.capable('clonebundles'):
1781 return
1783 return
1782
1784
1783 res = remote._call('clonebundles')
1785 res = remote._call('clonebundles')
1784
1786
1785 # If we call the wire protocol command, that's good enough to record the
1787 # If we call the wire protocol command, that's good enough to record the
1786 # attempt.
1788 # attempt.
1787 pullop.clonebundleattempted = True
1789 pullop.clonebundleattempted = True
1788
1790
1789 entries = parseclonebundlesmanifest(repo, res)
1791 entries = parseclonebundlesmanifest(repo, res)
1790 if not entries:
1792 if not entries:
1791 repo.ui.note(_('no clone bundles available on remote; '
1793 repo.ui.note(_('no clone bundles available on remote; '
1792 'falling back to regular clone\n'))
1794 'falling back to regular clone\n'))
1793 return
1795 return
1794
1796
1795 entries = filterclonebundleentries(repo, entries)
1797 entries = filterclonebundleentries(repo, entries)
1796 if not entries:
1798 if not entries:
1797 # There is a thundering herd concern here. However, if a server
1799 # There is a thundering herd concern here. However, if a server
1798 # operator doesn't advertise bundles appropriate for its clients,
1800 # operator doesn't advertise bundles appropriate for its clients,
1799 # they deserve what's coming. Furthermore, from a client's
1801 # they deserve what's coming. Furthermore, from a client's
1800 # perspective, no automatic fallback would mean not being able to
1802 # perspective, no automatic fallback would mean not being able to
1801 # clone!
1803 # clone!
1802 repo.ui.warn(_('no compatible clone bundles available on server; '
1804 repo.ui.warn(_('no compatible clone bundles available on server; '
1803 'falling back to regular clone\n'))
1805 'falling back to regular clone\n'))
1804 repo.ui.warn(_('(you may want to report this to the server '
1806 repo.ui.warn(_('(you may want to report this to the server '
1805 'operator)\n'))
1807 'operator)\n'))
1806 return
1808 return
1807
1809
1808 entries = sortclonebundleentries(repo.ui, entries)
1810 entries = sortclonebundleentries(repo.ui, entries)
1809
1811
1810 url = entries[0]['URL']
1812 url = entries[0]['URL']
1811 repo.ui.status(_('applying clone bundle from %s\n') % url)
1813 repo.ui.status(_('applying clone bundle from %s\n') % url)
1812 if trypullbundlefromurl(repo.ui, repo, url):
1814 if trypullbundlefromurl(repo.ui, repo, url):
1813 repo.ui.status(_('finished applying clone bundle\n'))
1815 repo.ui.status(_('finished applying clone bundle\n'))
1814 # Bundle failed.
1816 # Bundle failed.
1815 #
1817 #
1816 # We abort by default to avoid the thundering herd of
1818 # We abort by default to avoid the thundering herd of
1817 # clients flooding a server that was expecting expensive
1819 # clients flooding a server that was expecting expensive
1818 # clone load to be offloaded.
1820 # clone load to be offloaded.
1819 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1821 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1820 repo.ui.warn(_('falling back to normal clone\n'))
1822 repo.ui.warn(_('falling back to normal clone\n'))
1821 else:
1823 else:
1822 raise error.Abort(_('error applying bundle'),
1824 raise error.Abort(_('error applying bundle'),
1823 hint=_('if this error persists, consider contacting '
1825 hint=_('if this error persists, consider contacting '
1824 'the server operator or disable clone '
1826 'the server operator or disable clone '
1825 'bundles via '
1827 'bundles via '
1826 '"--config ui.clonebundles=false"'))
1828 '"--config ui.clonebundles=false"'))
1827
1829
1828 def parseclonebundlesmanifest(repo, s):
1830 def parseclonebundlesmanifest(repo, s):
1829 """Parses the raw text of a clone bundles manifest.
1831 """Parses the raw text of a clone bundles manifest.
1830
1832
1831 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1833 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1832 to the URL and other keys are the attributes for the entry.
1834 to the URL and other keys are the attributes for the entry.
1833 """
1835 """
1834 m = []
1836 m = []
1835 for line in s.splitlines():
1837 for line in s.splitlines():
1836 fields = line.split()
1838 fields = line.split()
1837 if not fields:
1839 if not fields:
1838 continue
1840 continue
1839 attrs = {'URL': fields[0]}
1841 attrs = {'URL': fields[0]}
1840 for rawattr in fields[1:]:
1842 for rawattr in fields[1:]:
1841 key, value = rawattr.split('=', 1)
1843 key, value = rawattr.split('=', 1)
1842 key = urlreq.unquote(key)
1844 key = urlreq.unquote(key)
1843 value = urlreq.unquote(value)
1845 value = urlreq.unquote(value)
1844 attrs[key] = value
1846 attrs[key] = value
1845
1847
1846 # Parse BUNDLESPEC into components. This makes client-side
1848 # Parse BUNDLESPEC into components. This makes client-side
1847 # preferences easier to specify since you can prefer a single
1849 # preferences easier to specify since you can prefer a single
1848 # component of the BUNDLESPEC.
1850 # component of the BUNDLESPEC.
1849 if key == 'BUNDLESPEC':
1851 if key == 'BUNDLESPEC':
1850 try:
1852 try:
1851 comp, version, params = parsebundlespec(repo, value,
1853 comp, version, params = parsebundlespec(repo, value,
1852 externalnames=True)
1854 externalnames=True)
1853 attrs['COMPRESSION'] = comp
1855 attrs['COMPRESSION'] = comp
1854 attrs['VERSION'] = version
1856 attrs['VERSION'] = version
1855 except error.InvalidBundleSpecification:
1857 except error.InvalidBundleSpecification:
1856 pass
1858 pass
1857 except error.UnsupportedBundleSpecification:
1859 except error.UnsupportedBundleSpecification:
1858 pass
1860 pass
1859
1861
1860 m.append(attrs)
1862 m.append(attrs)
1861
1863
1862 return m
1864 return m
1863
1865
1864 def filterclonebundleentries(repo, entries):
1866 def filterclonebundleentries(repo, entries):
1865 """Remove incompatible clone bundle manifest entries.
1867 """Remove incompatible clone bundle manifest entries.
1866
1868
1867 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1869 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1868 and returns a new list consisting of only the entries that this client
1870 and returns a new list consisting of only the entries that this client
1869 should be able to apply.
1871 should be able to apply.
1870
1872
1871 There is no guarantee we'll be able to apply all returned entries because
1873 There is no guarantee we'll be able to apply all returned entries because
1872 the metadata we use to filter on may be missing or wrong.
1874 the metadata we use to filter on may be missing or wrong.
1873 """
1875 """
1874 newentries = []
1876 newentries = []
1875 for entry in entries:
1877 for entry in entries:
1876 spec = entry.get('BUNDLESPEC')
1878 spec = entry.get('BUNDLESPEC')
1877 if spec:
1879 if spec:
1878 try:
1880 try:
1879 parsebundlespec(repo, spec, strict=True)
1881 parsebundlespec(repo, spec, strict=True)
1880 except error.InvalidBundleSpecification as e:
1882 except error.InvalidBundleSpecification as e:
1881 repo.ui.debug(str(e) + '\n')
1883 repo.ui.debug(str(e) + '\n')
1882 continue
1884 continue
1883 except error.UnsupportedBundleSpecification as e:
1885 except error.UnsupportedBundleSpecification as e:
1884 repo.ui.debug('filtering %s because unsupported bundle '
1886 repo.ui.debug('filtering %s because unsupported bundle '
1885 'spec: %s\n' % (entry['URL'], str(e)))
1887 'spec: %s\n' % (entry['URL'], str(e)))
1886 continue
1888 continue
1887
1889
1888 if 'REQUIRESNI' in entry and not sslutil.hassni:
1890 if 'REQUIRESNI' in entry and not sslutil.hassni:
1889 repo.ui.debug('filtering %s because SNI not supported\n' %
1891 repo.ui.debug('filtering %s because SNI not supported\n' %
1890 entry['URL'])
1892 entry['URL'])
1891 continue
1893 continue
1892
1894
1893 newentries.append(entry)
1895 newentries.append(entry)
1894
1896
1895 return newentries
1897 return newentries
1896
1898
1897 class clonebundleentry(object):
1899 class clonebundleentry(object):
1898 """Represents an item in a clone bundles manifest.
1900 """Represents an item in a clone bundles manifest.
1899
1901
1900 This rich class is needed to support sorting since sorted() in Python 3
1902 This rich class is needed to support sorting since sorted() in Python 3
1901 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
1903 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
1902 won't work.
1904 won't work.
1903 """
1905 """
1904
1906
1905 def __init__(self, value, prefers):
1907 def __init__(self, value, prefers):
1906 self.value = value
1908 self.value = value
1907 self.prefers = prefers
1909 self.prefers = prefers
1908
1910
1909 def _cmp(self, other):
1911 def _cmp(self, other):
1910 for prefkey, prefvalue in self.prefers:
1912 for prefkey, prefvalue in self.prefers:
1911 avalue = self.value.get(prefkey)
1913 avalue = self.value.get(prefkey)
1912 bvalue = other.value.get(prefkey)
1914 bvalue = other.value.get(prefkey)
1913
1915
1914 # Special case for b missing attribute and a matches exactly.
1916 # Special case for b missing attribute and a matches exactly.
1915 if avalue is not None and bvalue is None and avalue == prefvalue:
1917 if avalue is not None and bvalue is None and avalue == prefvalue:
1916 return -1
1918 return -1
1917
1919
1918 # Special case for a missing attribute and b matches exactly.
1920 # Special case for a missing attribute and b matches exactly.
1919 if bvalue is not None and avalue is None and bvalue == prefvalue:
1921 if bvalue is not None and avalue is None and bvalue == prefvalue:
1920 return 1
1922 return 1
1921
1923
1922 # We can't compare unless attribute present on both.
1924 # We can't compare unless attribute present on both.
1923 if avalue is None or bvalue is None:
1925 if avalue is None or bvalue is None:
1924 continue
1926 continue
1925
1927
1926 # Same values should fall back to next attribute.
1928 # Same values should fall back to next attribute.
1927 if avalue == bvalue:
1929 if avalue == bvalue:
1928 continue
1930 continue
1929
1931
1930 # Exact matches come first.
1932 # Exact matches come first.
1931 if avalue == prefvalue:
1933 if avalue == prefvalue:
1932 return -1
1934 return -1
1933 if bvalue == prefvalue:
1935 if bvalue == prefvalue:
1934 return 1
1936 return 1
1935
1937
1936 # Fall back to next attribute.
1938 # Fall back to next attribute.
1937 continue
1939 continue
1938
1940
1939 # If we got here we couldn't sort by attributes and prefers. Fall
1941 # If we got here we couldn't sort by attributes and prefers. Fall
1940 # back to index order.
1942 # back to index order.
1941 return 0
1943 return 0
1942
1944
1943 def __lt__(self, other):
1945 def __lt__(self, other):
1944 return self._cmp(other) < 0
1946 return self._cmp(other) < 0
1945
1947
1946 def __gt__(self, other):
1948 def __gt__(self, other):
1947 return self._cmp(other) > 0
1949 return self._cmp(other) > 0
1948
1950
1949 def __eq__(self, other):
1951 def __eq__(self, other):
1950 return self._cmp(other) == 0
1952 return self._cmp(other) == 0
1951
1953
1952 def __le__(self, other):
1954 def __le__(self, other):
1953 return self._cmp(other) <= 0
1955 return self._cmp(other) <= 0
1954
1956
1955 def __ge__(self, other):
1957 def __ge__(self, other):
1956 return self._cmp(other) >= 0
1958 return self._cmp(other) >= 0
1957
1959
1958 def __ne__(self, other):
1960 def __ne__(self, other):
1959 return self._cmp(other) != 0
1961 return self._cmp(other) != 0
1960
1962
1961 def sortclonebundleentries(ui, entries):
1963 def sortclonebundleentries(ui, entries):
1962 prefers = ui.configlist('ui', 'clonebundleprefers', default=[])
1964 prefers = ui.configlist('ui', 'clonebundleprefers', default=[])
1963 if not prefers:
1965 if not prefers:
1964 return list(entries)
1966 return list(entries)
1965
1967
1966 prefers = [p.split('=', 1) for p in prefers]
1968 prefers = [p.split('=', 1) for p in prefers]
1967
1969
1968 items = sorted(clonebundleentry(v, prefers) for v in entries)
1970 items = sorted(clonebundleentry(v, prefers) for v in entries)
1969 return [i.value for i in items]
1971 return [i.value for i in items]
1970
1972
1971 def trypullbundlefromurl(ui, repo, url):
1973 def trypullbundlefromurl(ui, repo, url):
1972 """Attempt to apply a bundle from a URL."""
1974 """Attempt to apply a bundle from a URL."""
1973 lock = repo.lock()
1975 lock = repo.lock()
1974 try:
1976 try:
1975 tr = repo.transaction('bundleurl')
1977 tr = repo.transaction('bundleurl')
1976 try:
1978 try:
1977 try:
1979 try:
1978 fh = urlmod.open(ui, url)
1980 fh = urlmod.open(ui, url)
1979 cg = readbundle(ui, fh, 'stream')
1981 cg = readbundle(ui, fh, 'stream')
1980
1982
1981 if isinstance(cg, bundle2.unbundle20):
1983 if isinstance(cg, bundle2.unbundle20):
1982 bundle2.processbundle(repo, cg, lambda: tr)
1984 bundle2.processbundle(repo, cg, lambda: tr)
1983 elif isinstance(cg, streamclone.streamcloneapplier):
1985 elif isinstance(cg, streamclone.streamcloneapplier):
1984 cg.apply(repo)
1986 cg.apply(repo)
1985 else:
1987 else:
1986 cg.apply(repo, 'clonebundles', url)
1988 cg.apply(repo, 'clonebundles', url)
1987 tr.close()
1989 tr.close()
1988 return True
1990 return True
1989 except urlerr.httperror as e:
1991 except urlerr.httperror as e:
1990 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1992 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1991 except urlerr.urlerror as e:
1993 except urlerr.urlerror as e:
1992 ui.warn(_('error fetching bundle: %s\n') % e.reason[1])
1994 ui.warn(_('error fetching bundle: %s\n') % e.reason[1])
1993
1995
1994 return False
1996 return False
1995 finally:
1997 finally:
1996 tr.release()
1998 tr.release()
1997 finally:
1999 finally:
1998 lock.release()
2000 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now