##// END OF EJS Templates
exchange: reject new compression engines for v1 bundles (issue5506)...
Gregory Szorc -
r31473:ffed3bf5 stable
parent child Browse files
Show More
@@ -1,2011 +1,2020 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 hex,
15 hex,
16 nullid,
16 nullid,
17 )
17 )
18 from . import (
18 from . import (
19 base85,
19 base85,
20 bookmarks as bookmod,
20 bookmarks as bookmod,
21 bundle2,
21 bundle2,
22 changegroup,
22 changegroup,
23 discovery,
23 discovery,
24 error,
24 error,
25 lock as lockmod,
25 lock as lockmod,
26 obsolete,
26 obsolete,
27 phases,
27 phases,
28 pushkey,
28 pushkey,
29 scmutil,
29 scmutil,
30 sslutil,
30 sslutil,
31 streamclone,
31 streamclone,
32 tags,
32 tags,
33 url as urlmod,
33 url as urlmod,
34 util,
34 util,
35 )
35 )
36
36
37 urlerr = util.urlerr
37 urlerr = util.urlerr
38 urlreq = util.urlreq
38 urlreq = util.urlreq
39
39
40 # Maps bundle version human names to changegroup versions.
40 # Maps bundle version human names to changegroup versions.
41 _bundlespeccgversions = {'v1': '01',
41 _bundlespeccgversions = {'v1': '01',
42 'v2': '02',
42 'v2': '02',
43 'packed1': 's1',
43 'packed1': 's1',
44 'bundle2': '02', #legacy
44 'bundle2': '02', #legacy
45 }
45 }
46
46
47 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
48 _bundlespecv1compengines = set(['gzip', 'bzip2', 'none'])
49
47 def parsebundlespec(repo, spec, strict=True, externalnames=False):
50 def parsebundlespec(repo, spec, strict=True, externalnames=False):
48 """Parse a bundle string specification into parts.
51 """Parse a bundle string specification into parts.
49
52
50 Bundle specifications denote a well-defined bundle/exchange format.
53 Bundle specifications denote a well-defined bundle/exchange format.
51 The content of a given specification should not change over time in
54 The content of a given specification should not change over time in
52 order to ensure that bundles produced by a newer version of Mercurial are
55 order to ensure that bundles produced by a newer version of Mercurial are
53 readable from an older version.
56 readable from an older version.
54
57
55 The string currently has the form:
58 The string currently has the form:
56
59
57 <compression>-<type>[;<parameter0>[;<parameter1>]]
60 <compression>-<type>[;<parameter0>[;<parameter1>]]
58
61
59 Where <compression> is one of the supported compression formats
62 Where <compression> is one of the supported compression formats
60 and <type> is (currently) a version string. A ";" can follow the type and
63 and <type> is (currently) a version string. A ";" can follow the type and
61 all text afterwards is interpreted as URI encoded, ";" delimited key=value
64 all text afterwards is interpreted as URI encoded, ";" delimited key=value
62 pairs.
65 pairs.
63
66
64 If ``strict`` is True (the default) <compression> is required. Otherwise,
67 If ``strict`` is True (the default) <compression> is required. Otherwise,
65 it is optional.
68 it is optional.
66
69
67 If ``externalnames`` is False (the default), the human-centric names will
70 If ``externalnames`` is False (the default), the human-centric names will
68 be converted to their internal representation.
71 be converted to their internal representation.
69
72
70 Returns a 3-tuple of (compression, version, parameters). Compression will
73 Returns a 3-tuple of (compression, version, parameters). Compression will
71 be ``None`` if not in strict mode and a compression isn't defined.
74 be ``None`` if not in strict mode and a compression isn't defined.
72
75
73 An ``InvalidBundleSpecification`` is raised when the specification is
76 An ``InvalidBundleSpecification`` is raised when the specification is
74 not syntactically well formed.
77 not syntactically well formed.
75
78
76 An ``UnsupportedBundleSpecification`` is raised when the compression or
79 An ``UnsupportedBundleSpecification`` is raised when the compression or
77 bundle type/version is not recognized.
80 bundle type/version is not recognized.
78
81
79 Note: this function will likely eventually return a more complex data
82 Note: this function will likely eventually return a more complex data
80 structure, including bundle2 part information.
83 structure, including bundle2 part information.
81 """
84 """
82 def parseparams(s):
85 def parseparams(s):
83 if ';' not in s:
86 if ';' not in s:
84 return s, {}
87 return s, {}
85
88
86 params = {}
89 params = {}
87 version, paramstr = s.split(';', 1)
90 version, paramstr = s.split(';', 1)
88
91
89 for p in paramstr.split(';'):
92 for p in paramstr.split(';'):
90 if '=' not in p:
93 if '=' not in p:
91 raise error.InvalidBundleSpecification(
94 raise error.InvalidBundleSpecification(
92 _('invalid bundle specification: '
95 _('invalid bundle specification: '
93 'missing "=" in parameter: %s') % p)
96 'missing "=" in parameter: %s') % p)
94
97
95 key, value = p.split('=', 1)
98 key, value = p.split('=', 1)
96 key = urlreq.unquote(key)
99 key = urlreq.unquote(key)
97 value = urlreq.unquote(value)
100 value = urlreq.unquote(value)
98 params[key] = value
101 params[key] = value
99
102
100 return version, params
103 return version, params
101
104
102
105
103 if strict and '-' not in spec:
106 if strict and '-' not in spec:
104 raise error.InvalidBundleSpecification(
107 raise error.InvalidBundleSpecification(
105 _('invalid bundle specification; '
108 _('invalid bundle specification; '
106 'must be prefixed with compression: %s') % spec)
109 'must be prefixed with compression: %s') % spec)
107
110
108 if '-' in spec:
111 if '-' in spec:
109 compression, version = spec.split('-', 1)
112 compression, version = spec.split('-', 1)
110
113
111 if compression not in util.compengines.supportedbundlenames:
114 if compression not in util.compengines.supportedbundlenames:
112 raise error.UnsupportedBundleSpecification(
115 raise error.UnsupportedBundleSpecification(
113 _('%s compression is not supported') % compression)
116 _('%s compression is not supported') % compression)
114
117
115 version, params = parseparams(version)
118 version, params = parseparams(version)
116
119
117 if version not in _bundlespeccgversions:
120 if version not in _bundlespeccgversions:
118 raise error.UnsupportedBundleSpecification(
121 raise error.UnsupportedBundleSpecification(
119 _('%s is not a recognized bundle version') % version)
122 _('%s is not a recognized bundle version') % version)
120 else:
123 else:
121 # Value could be just the compression or just the version, in which
124 # Value could be just the compression or just the version, in which
122 # case some defaults are assumed (but only when not in strict mode).
125 # case some defaults are assumed (but only when not in strict mode).
123 assert not strict
126 assert not strict
124
127
125 spec, params = parseparams(spec)
128 spec, params = parseparams(spec)
126
129
127 if spec in util.compengines.supportedbundlenames:
130 if spec in util.compengines.supportedbundlenames:
128 compression = spec
131 compression = spec
129 version = 'v1'
132 version = 'v1'
130 if 'generaldelta' in repo.requirements:
133 if 'generaldelta' in repo.requirements:
131 version = 'v2'
134 version = 'v2'
132 elif spec in _bundlespeccgversions:
135 elif spec in _bundlespeccgversions:
133 if spec == 'packed1':
136 if spec == 'packed1':
134 compression = 'none'
137 compression = 'none'
135 else:
138 else:
136 compression = 'bzip2'
139 compression = 'bzip2'
137 version = spec
140 version = spec
138 else:
141 else:
139 raise error.UnsupportedBundleSpecification(
142 raise error.UnsupportedBundleSpecification(
140 _('%s is not a recognized bundle specification') % spec)
143 _('%s is not a recognized bundle specification') % spec)
141
144
145 # Bundle version 1 only supports a known set of compression engines.
146 if version == 'v1' and compression not in _bundlespecv1compengines:
147 raise error.UnsupportedBundleSpecification(
148 _('compression engine %s is not supported on v1 bundles') %
149 compression)
150
142 # The specification for packed1 can optionally declare the data formats
151 # The specification for packed1 can optionally declare the data formats
143 # required to apply it. If we see this metadata, compare against what the
152 # required to apply it. If we see this metadata, compare against what the
144 # repo supports and error if the bundle isn't compatible.
153 # repo supports and error if the bundle isn't compatible.
145 if version == 'packed1' and 'requirements' in params:
154 if version == 'packed1' and 'requirements' in params:
146 requirements = set(params['requirements'].split(','))
155 requirements = set(params['requirements'].split(','))
147 missingreqs = requirements - repo.supportedformats
156 missingreqs = requirements - repo.supportedformats
148 if missingreqs:
157 if missingreqs:
149 raise error.UnsupportedBundleSpecification(
158 raise error.UnsupportedBundleSpecification(
150 _('missing support for repository features: %s') %
159 _('missing support for repository features: %s') %
151 ', '.join(sorted(missingreqs)))
160 ', '.join(sorted(missingreqs)))
152
161
153 if not externalnames:
162 if not externalnames:
154 engine = util.compengines.forbundlename(compression)
163 engine = util.compengines.forbundlename(compression)
155 compression = engine.bundletype()[1]
164 compression = engine.bundletype()[1]
156 version = _bundlespeccgversions[version]
165 version = _bundlespeccgversions[version]
157 return compression, version, params
166 return compression, version, params
158
167
159 def readbundle(ui, fh, fname, vfs=None):
168 def readbundle(ui, fh, fname, vfs=None):
160 header = changegroup.readexactly(fh, 4)
169 header = changegroup.readexactly(fh, 4)
161
170
162 alg = None
171 alg = None
163 if not fname:
172 if not fname:
164 fname = "stream"
173 fname = "stream"
165 if not header.startswith('HG') and header.startswith('\0'):
174 if not header.startswith('HG') and header.startswith('\0'):
166 fh = changegroup.headerlessfixup(fh, header)
175 fh = changegroup.headerlessfixup(fh, header)
167 header = "HG10"
176 header = "HG10"
168 alg = 'UN'
177 alg = 'UN'
169 elif vfs:
178 elif vfs:
170 fname = vfs.join(fname)
179 fname = vfs.join(fname)
171
180
172 magic, version = header[0:2], header[2:4]
181 magic, version = header[0:2], header[2:4]
173
182
174 if magic != 'HG':
183 if magic != 'HG':
175 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
184 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
176 if version == '10':
185 if version == '10':
177 if alg is None:
186 if alg is None:
178 alg = changegroup.readexactly(fh, 2)
187 alg = changegroup.readexactly(fh, 2)
179 return changegroup.cg1unpacker(fh, alg)
188 return changegroup.cg1unpacker(fh, alg)
180 elif version.startswith('2'):
189 elif version.startswith('2'):
181 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
190 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
182 elif version == 'S1':
191 elif version == 'S1':
183 return streamclone.streamcloneapplier(fh)
192 return streamclone.streamcloneapplier(fh)
184 else:
193 else:
185 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
194 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
186
195
187 def getbundlespec(ui, fh):
196 def getbundlespec(ui, fh):
188 """Infer the bundlespec from a bundle file handle.
197 """Infer the bundlespec from a bundle file handle.
189
198
190 The input file handle is seeked and the original seek position is not
199 The input file handle is seeked and the original seek position is not
191 restored.
200 restored.
192 """
201 """
193 def speccompression(alg):
202 def speccompression(alg):
194 try:
203 try:
195 return util.compengines.forbundletype(alg).bundletype()[0]
204 return util.compengines.forbundletype(alg).bundletype()[0]
196 except KeyError:
205 except KeyError:
197 return None
206 return None
198
207
199 b = readbundle(ui, fh, None)
208 b = readbundle(ui, fh, None)
200 if isinstance(b, changegroup.cg1unpacker):
209 if isinstance(b, changegroup.cg1unpacker):
201 alg = b._type
210 alg = b._type
202 if alg == '_truncatedBZ':
211 if alg == '_truncatedBZ':
203 alg = 'BZ'
212 alg = 'BZ'
204 comp = speccompression(alg)
213 comp = speccompression(alg)
205 if not comp:
214 if not comp:
206 raise error.Abort(_('unknown compression algorithm: %s') % alg)
215 raise error.Abort(_('unknown compression algorithm: %s') % alg)
207 return '%s-v1' % comp
216 return '%s-v1' % comp
208 elif isinstance(b, bundle2.unbundle20):
217 elif isinstance(b, bundle2.unbundle20):
209 if 'Compression' in b.params:
218 if 'Compression' in b.params:
210 comp = speccompression(b.params['Compression'])
219 comp = speccompression(b.params['Compression'])
211 if not comp:
220 if not comp:
212 raise error.Abort(_('unknown compression algorithm: %s') % comp)
221 raise error.Abort(_('unknown compression algorithm: %s') % comp)
213 else:
222 else:
214 comp = 'none'
223 comp = 'none'
215
224
216 version = None
225 version = None
217 for part in b.iterparts():
226 for part in b.iterparts():
218 if part.type == 'changegroup':
227 if part.type == 'changegroup':
219 version = part.params['version']
228 version = part.params['version']
220 if version in ('01', '02'):
229 if version in ('01', '02'):
221 version = 'v2'
230 version = 'v2'
222 else:
231 else:
223 raise error.Abort(_('changegroup version %s does not have '
232 raise error.Abort(_('changegroup version %s does not have '
224 'a known bundlespec') % version,
233 'a known bundlespec') % version,
225 hint=_('try upgrading your Mercurial '
234 hint=_('try upgrading your Mercurial '
226 'client'))
235 'client'))
227
236
228 if not version:
237 if not version:
229 raise error.Abort(_('could not identify changegroup version in '
238 raise error.Abort(_('could not identify changegroup version in '
230 'bundle'))
239 'bundle'))
231
240
232 return '%s-%s' % (comp, version)
241 return '%s-%s' % (comp, version)
233 elif isinstance(b, streamclone.streamcloneapplier):
242 elif isinstance(b, streamclone.streamcloneapplier):
234 requirements = streamclone.readbundle1header(fh)[2]
243 requirements = streamclone.readbundle1header(fh)[2]
235 params = 'requirements=%s' % ','.join(sorted(requirements))
244 params = 'requirements=%s' % ','.join(sorted(requirements))
236 return 'none-packed1;%s' % urlreq.quote(params)
245 return 'none-packed1;%s' % urlreq.quote(params)
237 else:
246 else:
238 raise error.Abort(_('unknown bundle type: %s') % b)
247 raise error.Abort(_('unknown bundle type: %s') % b)
239
248
240 def buildobsmarkerspart(bundler, markers):
249 def buildobsmarkerspart(bundler, markers):
241 """add an obsmarker part to the bundler with <markers>
250 """add an obsmarker part to the bundler with <markers>
242
251
243 No part is created if markers is empty.
252 No part is created if markers is empty.
244 Raises ValueError if the bundler doesn't support any known obsmarker format.
253 Raises ValueError if the bundler doesn't support any known obsmarker format.
245 """
254 """
246 if markers:
255 if markers:
247 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
256 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
248 version = obsolete.commonversion(remoteversions)
257 version = obsolete.commonversion(remoteversions)
249 if version is None:
258 if version is None:
250 raise ValueError('bundler does not support common obsmarker format')
259 raise ValueError('bundler does not support common obsmarker format')
251 stream = obsolete.encodemarkers(markers, True, version=version)
260 stream = obsolete.encodemarkers(markers, True, version=version)
252 return bundler.newpart('obsmarkers', data=stream)
261 return bundler.newpart('obsmarkers', data=stream)
253 return None
262 return None
254
263
255 def _computeoutgoing(repo, heads, common):
264 def _computeoutgoing(repo, heads, common):
256 """Computes which revs are outgoing given a set of common
265 """Computes which revs are outgoing given a set of common
257 and a set of heads.
266 and a set of heads.
258
267
259 This is a separate function so extensions can have access to
268 This is a separate function so extensions can have access to
260 the logic.
269 the logic.
261
270
262 Returns a discovery.outgoing object.
271 Returns a discovery.outgoing object.
263 """
272 """
264 cl = repo.changelog
273 cl = repo.changelog
265 if common:
274 if common:
266 hasnode = cl.hasnode
275 hasnode = cl.hasnode
267 common = [n for n in common if hasnode(n)]
276 common = [n for n in common if hasnode(n)]
268 else:
277 else:
269 common = [nullid]
278 common = [nullid]
270 if not heads:
279 if not heads:
271 heads = cl.heads()
280 heads = cl.heads()
272 return discovery.outgoing(repo, common, heads)
281 return discovery.outgoing(repo, common, heads)
273
282
274 def _forcebundle1(op):
283 def _forcebundle1(op):
275 """return true if a pull/push must use bundle1
284 """return true if a pull/push must use bundle1
276
285
277 This function is used to allow testing of the older bundle version"""
286 This function is used to allow testing of the older bundle version"""
278 ui = op.repo.ui
287 ui = op.repo.ui
279 forcebundle1 = False
288 forcebundle1 = False
280 # The goal is this config is to allow developer to choose the bundle
289 # The goal is this config is to allow developer to choose the bundle
281 # version used during exchanged. This is especially handy during test.
290 # version used during exchanged. This is especially handy during test.
282 # Value is a list of bundle version to be picked from, highest version
291 # Value is a list of bundle version to be picked from, highest version
283 # should be used.
292 # should be used.
284 #
293 #
285 # developer config: devel.legacy.exchange
294 # developer config: devel.legacy.exchange
286 exchange = ui.configlist('devel', 'legacy.exchange')
295 exchange = ui.configlist('devel', 'legacy.exchange')
287 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
296 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
288 return forcebundle1 or not op.remote.capable('bundle2')
297 return forcebundle1 or not op.remote.capable('bundle2')
289
298
290 class pushoperation(object):
299 class pushoperation(object):
291 """A object that represent a single push operation
300 """A object that represent a single push operation
292
301
293 Its purpose is to carry push related state and very common operations.
302 Its purpose is to carry push related state and very common operations.
294
303
295 A new pushoperation should be created at the beginning of each push and
304 A new pushoperation should be created at the beginning of each push and
296 discarded afterward.
305 discarded afterward.
297 """
306 """
298
307
299 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
308 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
300 bookmarks=()):
309 bookmarks=()):
301 # repo we push from
310 # repo we push from
302 self.repo = repo
311 self.repo = repo
303 self.ui = repo.ui
312 self.ui = repo.ui
304 # repo we push to
313 # repo we push to
305 self.remote = remote
314 self.remote = remote
306 # force option provided
315 # force option provided
307 self.force = force
316 self.force = force
308 # revs to be pushed (None is "all")
317 # revs to be pushed (None is "all")
309 self.revs = revs
318 self.revs = revs
310 # bookmark explicitly pushed
319 # bookmark explicitly pushed
311 self.bookmarks = bookmarks
320 self.bookmarks = bookmarks
312 # allow push of new branch
321 # allow push of new branch
313 self.newbranch = newbranch
322 self.newbranch = newbranch
314 # did a local lock get acquired?
323 # did a local lock get acquired?
315 self.locallocked = None
324 self.locallocked = None
316 # step already performed
325 # step already performed
317 # (used to check what steps have been already performed through bundle2)
326 # (used to check what steps have been already performed through bundle2)
318 self.stepsdone = set()
327 self.stepsdone = set()
319 # Integer version of the changegroup push result
328 # Integer version of the changegroup push result
320 # - None means nothing to push
329 # - None means nothing to push
321 # - 0 means HTTP error
330 # - 0 means HTTP error
322 # - 1 means we pushed and remote head count is unchanged *or*
331 # - 1 means we pushed and remote head count is unchanged *or*
323 # we have outgoing changesets but refused to push
332 # we have outgoing changesets but refused to push
324 # - other values as described by addchangegroup()
333 # - other values as described by addchangegroup()
325 self.cgresult = None
334 self.cgresult = None
326 # Boolean value for the bookmark push
335 # Boolean value for the bookmark push
327 self.bkresult = None
336 self.bkresult = None
328 # discover.outgoing object (contains common and outgoing data)
337 # discover.outgoing object (contains common and outgoing data)
329 self.outgoing = None
338 self.outgoing = None
330 # all remote heads before the push
339 # all remote heads before the push
331 self.remoteheads = None
340 self.remoteheads = None
332 # testable as a boolean indicating if any nodes are missing locally.
341 # testable as a boolean indicating if any nodes are missing locally.
333 self.incoming = None
342 self.incoming = None
334 # phases changes that must be pushed along side the changesets
343 # phases changes that must be pushed along side the changesets
335 self.outdatedphases = None
344 self.outdatedphases = None
336 # phases changes that must be pushed if changeset push fails
345 # phases changes that must be pushed if changeset push fails
337 self.fallbackoutdatedphases = None
346 self.fallbackoutdatedphases = None
338 # outgoing obsmarkers
347 # outgoing obsmarkers
339 self.outobsmarkers = set()
348 self.outobsmarkers = set()
340 # outgoing bookmarks
349 # outgoing bookmarks
341 self.outbookmarks = []
350 self.outbookmarks = []
342 # transaction manager
351 # transaction manager
343 self.trmanager = None
352 self.trmanager = None
344 # map { pushkey partid -> callback handling failure}
353 # map { pushkey partid -> callback handling failure}
345 # used to handle exception from mandatory pushkey part failure
354 # used to handle exception from mandatory pushkey part failure
346 self.pkfailcb = {}
355 self.pkfailcb = {}
347
356
348 @util.propertycache
357 @util.propertycache
349 def futureheads(self):
358 def futureheads(self):
350 """future remote heads if the changeset push succeeds"""
359 """future remote heads if the changeset push succeeds"""
351 return self.outgoing.missingheads
360 return self.outgoing.missingheads
352
361
353 @util.propertycache
362 @util.propertycache
354 def fallbackheads(self):
363 def fallbackheads(self):
355 """future remote heads if the changeset push fails"""
364 """future remote heads if the changeset push fails"""
356 if self.revs is None:
365 if self.revs is None:
357 # not target to push, all common are relevant
366 # not target to push, all common are relevant
358 return self.outgoing.commonheads
367 return self.outgoing.commonheads
359 unfi = self.repo.unfiltered()
368 unfi = self.repo.unfiltered()
360 # I want cheads = heads(::missingheads and ::commonheads)
369 # I want cheads = heads(::missingheads and ::commonheads)
361 # (missingheads is revs with secret changeset filtered out)
370 # (missingheads is revs with secret changeset filtered out)
362 #
371 #
363 # This can be expressed as:
372 # This can be expressed as:
364 # cheads = ( (missingheads and ::commonheads)
373 # cheads = ( (missingheads and ::commonheads)
365 # + (commonheads and ::missingheads))"
374 # + (commonheads and ::missingheads))"
366 # )
375 # )
367 #
376 #
368 # while trying to push we already computed the following:
377 # while trying to push we already computed the following:
369 # common = (::commonheads)
378 # common = (::commonheads)
370 # missing = ((commonheads::missingheads) - commonheads)
379 # missing = ((commonheads::missingheads) - commonheads)
371 #
380 #
372 # We can pick:
381 # We can pick:
373 # * missingheads part of common (::commonheads)
382 # * missingheads part of common (::commonheads)
374 common = self.outgoing.common
383 common = self.outgoing.common
375 nm = self.repo.changelog.nodemap
384 nm = self.repo.changelog.nodemap
376 cheads = [node for node in self.revs if nm[node] in common]
385 cheads = [node for node in self.revs if nm[node] in common]
377 # and
386 # and
378 # * commonheads parents on missing
387 # * commonheads parents on missing
379 revset = unfi.set('%ln and parents(roots(%ln))',
388 revset = unfi.set('%ln and parents(roots(%ln))',
380 self.outgoing.commonheads,
389 self.outgoing.commonheads,
381 self.outgoing.missing)
390 self.outgoing.missing)
382 cheads.extend(c.node() for c in revset)
391 cheads.extend(c.node() for c in revset)
383 return cheads
392 return cheads
384
393
385 @property
394 @property
386 def commonheads(self):
395 def commonheads(self):
387 """set of all common heads after changeset bundle push"""
396 """set of all common heads after changeset bundle push"""
388 if self.cgresult:
397 if self.cgresult:
389 return self.futureheads
398 return self.futureheads
390 else:
399 else:
391 return self.fallbackheads
400 return self.fallbackheads
392
401
393 # mapping of message used when pushing bookmark
402 # mapping of message used when pushing bookmark
394 bookmsgmap = {'update': (_("updating bookmark %s\n"),
403 bookmsgmap = {'update': (_("updating bookmark %s\n"),
395 _('updating bookmark %s failed!\n')),
404 _('updating bookmark %s failed!\n')),
396 'export': (_("exporting bookmark %s\n"),
405 'export': (_("exporting bookmark %s\n"),
397 _('exporting bookmark %s failed!\n')),
406 _('exporting bookmark %s failed!\n')),
398 'delete': (_("deleting remote bookmark %s\n"),
407 'delete': (_("deleting remote bookmark %s\n"),
399 _('deleting remote bookmark %s failed!\n')),
408 _('deleting remote bookmark %s failed!\n')),
400 }
409 }
401
410
402
411
403 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
412 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
404 opargs=None):
413 opargs=None):
405 '''Push outgoing changesets (limited by revs) from a local
414 '''Push outgoing changesets (limited by revs) from a local
406 repository to remote. Return an integer:
415 repository to remote. Return an integer:
407 - None means nothing to push
416 - None means nothing to push
408 - 0 means HTTP error
417 - 0 means HTTP error
409 - 1 means we pushed and remote head count is unchanged *or*
418 - 1 means we pushed and remote head count is unchanged *or*
410 we have outgoing changesets but refused to push
419 we have outgoing changesets but refused to push
411 - other values as described by addchangegroup()
420 - other values as described by addchangegroup()
412 '''
421 '''
413 if opargs is None:
422 if opargs is None:
414 opargs = {}
423 opargs = {}
415 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
424 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
416 **opargs)
425 **opargs)
417 if pushop.remote.local():
426 if pushop.remote.local():
418 missing = (set(pushop.repo.requirements)
427 missing = (set(pushop.repo.requirements)
419 - pushop.remote.local().supported)
428 - pushop.remote.local().supported)
420 if missing:
429 if missing:
421 msg = _("required features are not"
430 msg = _("required features are not"
422 " supported in the destination:"
431 " supported in the destination:"
423 " %s") % (', '.join(sorted(missing)))
432 " %s") % (', '.join(sorted(missing)))
424 raise error.Abort(msg)
433 raise error.Abort(msg)
425
434
426 # there are two ways to push to remote repo:
435 # there are two ways to push to remote repo:
427 #
436 #
428 # addchangegroup assumes local user can lock remote
437 # addchangegroup assumes local user can lock remote
429 # repo (local filesystem, old ssh servers).
438 # repo (local filesystem, old ssh servers).
430 #
439 #
431 # unbundle assumes local user cannot lock remote repo (new ssh
440 # unbundle assumes local user cannot lock remote repo (new ssh
432 # servers, http servers).
441 # servers, http servers).
433
442
434 if not pushop.remote.canpush():
443 if not pushop.remote.canpush():
435 raise error.Abort(_("destination does not support push"))
444 raise error.Abort(_("destination does not support push"))
436 # get local lock as we might write phase data
445 # get local lock as we might write phase data
437 localwlock = locallock = None
446 localwlock = locallock = None
438 try:
447 try:
439 # bundle2 push may receive a reply bundle touching bookmarks or other
448 # bundle2 push may receive a reply bundle touching bookmarks or other
440 # things requiring the wlock. Take it now to ensure proper ordering.
449 # things requiring the wlock. Take it now to ensure proper ordering.
441 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
450 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
442 if (not _forcebundle1(pushop)) and maypushback:
451 if (not _forcebundle1(pushop)) and maypushback:
443 localwlock = pushop.repo.wlock()
452 localwlock = pushop.repo.wlock()
444 locallock = pushop.repo.lock()
453 locallock = pushop.repo.lock()
445 pushop.locallocked = True
454 pushop.locallocked = True
446 except IOError as err:
455 except IOError as err:
447 pushop.locallocked = False
456 pushop.locallocked = False
448 if err.errno != errno.EACCES:
457 if err.errno != errno.EACCES:
449 raise
458 raise
450 # source repo cannot be locked.
459 # source repo cannot be locked.
451 # We do not abort the push, but just disable the local phase
460 # We do not abort the push, but just disable the local phase
452 # synchronisation.
461 # synchronisation.
453 msg = 'cannot lock source repository: %s\n' % err
462 msg = 'cannot lock source repository: %s\n' % err
454 pushop.ui.debug(msg)
463 pushop.ui.debug(msg)
455 try:
464 try:
456 if pushop.locallocked:
465 if pushop.locallocked:
457 pushop.trmanager = transactionmanager(pushop.repo,
466 pushop.trmanager = transactionmanager(pushop.repo,
458 'push-response',
467 'push-response',
459 pushop.remote.url())
468 pushop.remote.url())
460 pushop.repo.checkpush(pushop)
469 pushop.repo.checkpush(pushop)
461 lock = None
470 lock = None
462 unbundle = pushop.remote.capable('unbundle')
471 unbundle = pushop.remote.capable('unbundle')
463 if not unbundle:
472 if not unbundle:
464 lock = pushop.remote.lock()
473 lock = pushop.remote.lock()
465 try:
474 try:
466 _pushdiscovery(pushop)
475 _pushdiscovery(pushop)
467 if not _forcebundle1(pushop):
476 if not _forcebundle1(pushop):
468 _pushbundle2(pushop)
477 _pushbundle2(pushop)
469 _pushchangeset(pushop)
478 _pushchangeset(pushop)
470 _pushsyncphase(pushop)
479 _pushsyncphase(pushop)
471 _pushobsolete(pushop)
480 _pushobsolete(pushop)
472 _pushbookmark(pushop)
481 _pushbookmark(pushop)
473 finally:
482 finally:
474 if lock is not None:
483 if lock is not None:
475 lock.release()
484 lock.release()
476 if pushop.trmanager:
485 if pushop.trmanager:
477 pushop.trmanager.close()
486 pushop.trmanager.close()
478 finally:
487 finally:
479 if pushop.trmanager:
488 if pushop.trmanager:
480 pushop.trmanager.release()
489 pushop.trmanager.release()
481 if locallock is not None:
490 if locallock is not None:
482 locallock.release()
491 locallock.release()
483 if localwlock is not None:
492 if localwlock is not None:
484 localwlock.release()
493 localwlock.release()
485
494
486 return pushop
495 return pushop
487
496
488 # list of steps to perform discovery before push
497 # list of steps to perform discovery before push
489 pushdiscoveryorder = []
498 pushdiscoveryorder = []
490
499
491 # Mapping between step name and function
500 # Mapping between step name and function
492 #
501 #
493 # This exists to help extensions wrap steps if necessary
502 # This exists to help extensions wrap steps if necessary
494 pushdiscoverymapping = {}
503 pushdiscoverymapping = {}
495
504
496 def pushdiscovery(stepname):
505 def pushdiscovery(stepname):
497 """decorator for function performing discovery before push
506 """decorator for function performing discovery before push
498
507
499 The function is added to the step -> function mapping and appended to the
508 The function is added to the step -> function mapping and appended to the
500 list of steps. Beware that decorated function will be added in order (this
509 list of steps. Beware that decorated function will be added in order (this
501 may matter).
510 may matter).
502
511
503 You can only use this decorator for a new step, if you want to wrap a step
512 You can only use this decorator for a new step, if you want to wrap a step
504 from an extension, change the pushdiscovery dictionary directly."""
513 from an extension, change the pushdiscovery dictionary directly."""
505 def dec(func):
514 def dec(func):
506 assert stepname not in pushdiscoverymapping
515 assert stepname not in pushdiscoverymapping
507 pushdiscoverymapping[stepname] = func
516 pushdiscoverymapping[stepname] = func
508 pushdiscoveryorder.append(stepname)
517 pushdiscoveryorder.append(stepname)
509 return func
518 return func
510 return dec
519 return dec
511
520
512 def _pushdiscovery(pushop):
521 def _pushdiscovery(pushop):
513 """Run all discovery steps"""
522 """Run all discovery steps"""
514 for stepname in pushdiscoveryorder:
523 for stepname in pushdiscoveryorder:
515 step = pushdiscoverymapping[stepname]
524 step = pushdiscoverymapping[stepname]
516 step(pushop)
525 step(pushop)
517
526
518 @pushdiscovery('changeset')
527 @pushdiscovery('changeset')
519 def _pushdiscoverychangeset(pushop):
528 def _pushdiscoverychangeset(pushop):
520 """discover the changeset that need to be pushed"""
529 """discover the changeset that need to be pushed"""
521 fci = discovery.findcommonincoming
530 fci = discovery.findcommonincoming
522 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
531 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
523 common, inc, remoteheads = commoninc
532 common, inc, remoteheads = commoninc
524 fco = discovery.findcommonoutgoing
533 fco = discovery.findcommonoutgoing
525 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
534 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
526 commoninc=commoninc, force=pushop.force)
535 commoninc=commoninc, force=pushop.force)
527 pushop.outgoing = outgoing
536 pushop.outgoing = outgoing
528 pushop.remoteheads = remoteheads
537 pushop.remoteheads = remoteheads
529 pushop.incoming = inc
538 pushop.incoming = inc
530
539
531 @pushdiscovery('phase')
540 @pushdiscovery('phase')
532 def _pushdiscoveryphase(pushop):
541 def _pushdiscoveryphase(pushop):
533 """discover the phase that needs to be pushed
542 """discover the phase that needs to be pushed
534
543
535 (computed for both success and failure case for changesets push)"""
544 (computed for both success and failure case for changesets push)"""
536 outgoing = pushop.outgoing
545 outgoing = pushop.outgoing
537 unfi = pushop.repo.unfiltered()
546 unfi = pushop.repo.unfiltered()
538 remotephases = pushop.remote.listkeys('phases')
547 remotephases = pushop.remote.listkeys('phases')
539 publishing = remotephases.get('publishing', False)
548 publishing = remotephases.get('publishing', False)
540 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
549 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
541 and remotephases # server supports phases
550 and remotephases # server supports phases
542 and not pushop.outgoing.missing # no changesets to be pushed
551 and not pushop.outgoing.missing # no changesets to be pushed
543 and publishing):
552 and publishing):
544 # When:
553 # When:
545 # - this is a subrepo push
554 # - this is a subrepo push
546 # - and remote support phase
555 # - and remote support phase
547 # - and no changeset are to be pushed
556 # - and no changeset are to be pushed
548 # - and remote is publishing
557 # - and remote is publishing
549 # We may be in issue 3871 case!
558 # We may be in issue 3871 case!
550 # We drop the possible phase synchronisation done by
559 # We drop the possible phase synchronisation done by
551 # courtesy to publish changesets possibly locally draft
560 # courtesy to publish changesets possibly locally draft
552 # on the remote.
561 # on the remote.
553 remotephases = {'publishing': 'True'}
562 remotephases = {'publishing': 'True'}
554 ana = phases.analyzeremotephases(pushop.repo,
563 ana = phases.analyzeremotephases(pushop.repo,
555 pushop.fallbackheads,
564 pushop.fallbackheads,
556 remotephases)
565 remotephases)
557 pheads, droots = ana
566 pheads, droots = ana
558 extracond = ''
567 extracond = ''
559 if not publishing:
568 if not publishing:
560 extracond = ' and public()'
569 extracond = ' and public()'
561 revset = 'heads((%%ln::%%ln) %s)' % extracond
570 revset = 'heads((%%ln::%%ln) %s)' % extracond
562 # Get the list of all revs draft on remote by public here.
571 # Get the list of all revs draft on remote by public here.
563 # XXX Beware that revset break if droots is not strictly
572 # XXX Beware that revset break if droots is not strictly
564 # XXX root we may want to ensure it is but it is costly
573 # XXX root we may want to ensure it is but it is costly
565 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
574 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
566 if not outgoing.missing:
575 if not outgoing.missing:
567 future = fallback
576 future = fallback
568 else:
577 else:
569 # adds changeset we are going to push as draft
578 # adds changeset we are going to push as draft
570 #
579 #
571 # should not be necessary for publishing server, but because of an
580 # should not be necessary for publishing server, but because of an
572 # issue fixed in xxxxx we have to do it anyway.
581 # issue fixed in xxxxx we have to do it anyway.
573 fdroots = list(unfi.set('roots(%ln + %ln::)',
582 fdroots = list(unfi.set('roots(%ln + %ln::)',
574 outgoing.missing, droots))
583 outgoing.missing, droots))
575 fdroots = [f.node() for f in fdroots]
584 fdroots = [f.node() for f in fdroots]
576 future = list(unfi.set(revset, fdroots, pushop.futureheads))
585 future = list(unfi.set(revset, fdroots, pushop.futureheads))
577 pushop.outdatedphases = future
586 pushop.outdatedphases = future
578 pushop.fallbackoutdatedphases = fallback
587 pushop.fallbackoutdatedphases = fallback
579
588
580 @pushdiscovery('obsmarker')
589 @pushdiscovery('obsmarker')
581 def _pushdiscoveryobsmarkers(pushop):
590 def _pushdiscoveryobsmarkers(pushop):
582 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
591 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
583 and pushop.repo.obsstore
592 and pushop.repo.obsstore
584 and 'obsolete' in pushop.remote.listkeys('namespaces')):
593 and 'obsolete' in pushop.remote.listkeys('namespaces')):
585 repo = pushop.repo
594 repo = pushop.repo
586 # very naive computation, that can be quite expensive on big repo.
595 # very naive computation, that can be quite expensive on big repo.
587 # However: evolution is currently slow on them anyway.
596 # However: evolution is currently slow on them anyway.
588 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
597 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
589 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
598 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
590
599
591 @pushdiscovery('bookmarks')
600 @pushdiscovery('bookmarks')
592 def _pushdiscoverybookmarks(pushop):
601 def _pushdiscoverybookmarks(pushop):
593 ui = pushop.ui
602 ui = pushop.ui
594 repo = pushop.repo.unfiltered()
603 repo = pushop.repo.unfiltered()
595 remote = pushop.remote
604 remote = pushop.remote
596 ui.debug("checking for updated bookmarks\n")
605 ui.debug("checking for updated bookmarks\n")
597 ancestors = ()
606 ancestors = ()
598 if pushop.revs:
607 if pushop.revs:
599 revnums = map(repo.changelog.rev, pushop.revs)
608 revnums = map(repo.changelog.rev, pushop.revs)
600 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
609 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
601 remotebookmark = remote.listkeys('bookmarks')
610 remotebookmark = remote.listkeys('bookmarks')
602
611
603 explicit = set([repo._bookmarks.expandname(bookmark)
612 explicit = set([repo._bookmarks.expandname(bookmark)
604 for bookmark in pushop.bookmarks])
613 for bookmark in pushop.bookmarks])
605
614
606 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
615 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
607 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
616 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
608
617
609 def safehex(x):
618 def safehex(x):
610 if x is None:
619 if x is None:
611 return x
620 return x
612 return hex(x)
621 return hex(x)
613
622
614 def hexifycompbookmarks(bookmarks):
623 def hexifycompbookmarks(bookmarks):
615 for b, scid, dcid in bookmarks:
624 for b, scid, dcid in bookmarks:
616 yield b, safehex(scid), safehex(dcid)
625 yield b, safehex(scid), safehex(dcid)
617
626
618 comp = [hexifycompbookmarks(marks) for marks in comp]
627 comp = [hexifycompbookmarks(marks) for marks in comp]
619 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
628 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
620
629
621 for b, scid, dcid in advsrc:
630 for b, scid, dcid in advsrc:
622 if b in explicit:
631 if b in explicit:
623 explicit.remove(b)
632 explicit.remove(b)
624 if not ancestors or repo[scid].rev() in ancestors:
633 if not ancestors or repo[scid].rev() in ancestors:
625 pushop.outbookmarks.append((b, dcid, scid))
634 pushop.outbookmarks.append((b, dcid, scid))
626 # search added bookmark
635 # search added bookmark
627 for b, scid, dcid in addsrc:
636 for b, scid, dcid in addsrc:
628 if b in explicit:
637 if b in explicit:
629 explicit.remove(b)
638 explicit.remove(b)
630 pushop.outbookmarks.append((b, '', scid))
639 pushop.outbookmarks.append((b, '', scid))
631 # search for overwritten bookmark
640 # search for overwritten bookmark
632 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
641 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
633 if b in explicit:
642 if b in explicit:
634 explicit.remove(b)
643 explicit.remove(b)
635 pushop.outbookmarks.append((b, dcid, scid))
644 pushop.outbookmarks.append((b, dcid, scid))
636 # search for bookmark to delete
645 # search for bookmark to delete
637 for b, scid, dcid in adddst:
646 for b, scid, dcid in adddst:
638 if b in explicit:
647 if b in explicit:
639 explicit.remove(b)
648 explicit.remove(b)
640 # treat as "deleted locally"
649 # treat as "deleted locally"
641 pushop.outbookmarks.append((b, dcid, ''))
650 pushop.outbookmarks.append((b, dcid, ''))
642 # identical bookmarks shouldn't get reported
651 # identical bookmarks shouldn't get reported
643 for b, scid, dcid in same:
652 for b, scid, dcid in same:
644 if b in explicit:
653 if b in explicit:
645 explicit.remove(b)
654 explicit.remove(b)
646
655
647 if explicit:
656 if explicit:
648 explicit = sorted(explicit)
657 explicit = sorted(explicit)
649 # we should probably list all of them
658 # we should probably list all of them
650 ui.warn(_('bookmark %s does not exist on the local '
659 ui.warn(_('bookmark %s does not exist on the local '
651 'or remote repository!\n') % explicit[0])
660 'or remote repository!\n') % explicit[0])
652 pushop.bkresult = 2
661 pushop.bkresult = 2
653
662
654 pushop.outbookmarks.sort()
663 pushop.outbookmarks.sort()
655
664
656 def _pushcheckoutgoing(pushop):
665 def _pushcheckoutgoing(pushop):
657 outgoing = pushop.outgoing
666 outgoing = pushop.outgoing
658 unfi = pushop.repo.unfiltered()
667 unfi = pushop.repo.unfiltered()
659 if not outgoing.missing:
668 if not outgoing.missing:
660 # nothing to push
669 # nothing to push
661 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
670 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
662 return False
671 return False
663 # something to push
672 # something to push
664 if not pushop.force:
673 if not pushop.force:
665 # if repo.obsstore == False --> no obsolete
674 # if repo.obsstore == False --> no obsolete
666 # then, save the iteration
675 # then, save the iteration
667 if unfi.obsstore:
676 if unfi.obsstore:
668 # this message are here for 80 char limit reason
677 # this message are here for 80 char limit reason
669 mso = _("push includes obsolete changeset: %s!")
678 mso = _("push includes obsolete changeset: %s!")
670 mst = {"unstable": _("push includes unstable changeset: %s!"),
679 mst = {"unstable": _("push includes unstable changeset: %s!"),
671 "bumped": _("push includes bumped changeset: %s!"),
680 "bumped": _("push includes bumped changeset: %s!"),
672 "divergent": _("push includes divergent changeset: %s!")}
681 "divergent": _("push includes divergent changeset: %s!")}
673 # If we are to push if there is at least one
682 # If we are to push if there is at least one
674 # obsolete or unstable changeset in missing, at
683 # obsolete or unstable changeset in missing, at
675 # least one of the missinghead will be obsolete or
684 # least one of the missinghead will be obsolete or
676 # unstable. So checking heads only is ok
685 # unstable. So checking heads only is ok
677 for node in outgoing.missingheads:
686 for node in outgoing.missingheads:
678 ctx = unfi[node]
687 ctx = unfi[node]
679 if ctx.obsolete():
688 if ctx.obsolete():
680 raise error.Abort(mso % ctx)
689 raise error.Abort(mso % ctx)
681 elif ctx.troubled():
690 elif ctx.troubled():
682 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
691 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
683
692
684 discovery.checkheads(pushop)
693 discovery.checkheads(pushop)
685 return True
694 return True
686
695
687 # List of names of steps to perform for an outgoing bundle2, order matters.
696 # List of names of steps to perform for an outgoing bundle2, order matters.
688 b2partsgenorder = []
697 b2partsgenorder = []
689
698
690 # Mapping between step name and function
699 # Mapping between step name and function
691 #
700 #
692 # This exists to help extensions wrap steps if necessary
701 # This exists to help extensions wrap steps if necessary
693 b2partsgenmapping = {}
702 b2partsgenmapping = {}
694
703
695 def b2partsgenerator(stepname, idx=None):
704 def b2partsgenerator(stepname, idx=None):
696 """decorator for function generating bundle2 part
705 """decorator for function generating bundle2 part
697
706
698 The function is added to the step -> function mapping and appended to the
707 The function is added to the step -> function mapping and appended to the
699 list of steps. Beware that decorated functions will be added in order
708 list of steps. Beware that decorated functions will be added in order
700 (this may matter).
709 (this may matter).
701
710
702 You can only use this decorator for new steps, if you want to wrap a step
711 You can only use this decorator for new steps, if you want to wrap a step
703 from an extension, attack the b2partsgenmapping dictionary directly."""
712 from an extension, attack the b2partsgenmapping dictionary directly."""
704 def dec(func):
713 def dec(func):
705 assert stepname not in b2partsgenmapping
714 assert stepname not in b2partsgenmapping
706 b2partsgenmapping[stepname] = func
715 b2partsgenmapping[stepname] = func
707 if idx is None:
716 if idx is None:
708 b2partsgenorder.append(stepname)
717 b2partsgenorder.append(stepname)
709 else:
718 else:
710 b2partsgenorder.insert(idx, stepname)
719 b2partsgenorder.insert(idx, stepname)
711 return func
720 return func
712 return dec
721 return dec
713
722
714 def _pushb2ctxcheckheads(pushop, bundler):
723 def _pushb2ctxcheckheads(pushop, bundler):
715 """Generate race condition checking parts
724 """Generate race condition checking parts
716
725
717 Exists as an independent function to aid extensions
726 Exists as an independent function to aid extensions
718 """
727 """
719 if not pushop.force:
728 if not pushop.force:
720 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
729 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
721
730
722 @b2partsgenerator('changeset')
731 @b2partsgenerator('changeset')
723 def _pushb2ctx(pushop, bundler):
732 def _pushb2ctx(pushop, bundler):
724 """handle changegroup push through bundle2
733 """handle changegroup push through bundle2
725
734
726 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
735 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
727 """
736 """
728 if 'changesets' in pushop.stepsdone:
737 if 'changesets' in pushop.stepsdone:
729 return
738 return
730 pushop.stepsdone.add('changesets')
739 pushop.stepsdone.add('changesets')
731 # Send known heads to the server for race detection.
740 # Send known heads to the server for race detection.
732 if not _pushcheckoutgoing(pushop):
741 if not _pushcheckoutgoing(pushop):
733 return
742 return
734 pushop.repo.prepushoutgoinghooks(pushop)
743 pushop.repo.prepushoutgoinghooks(pushop)
735
744
736 _pushb2ctxcheckheads(pushop, bundler)
745 _pushb2ctxcheckheads(pushop, bundler)
737
746
738 b2caps = bundle2.bundle2caps(pushop.remote)
747 b2caps = bundle2.bundle2caps(pushop.remote)
739 version = '01'
748 version = '01'
740 cgversions = b2caps.get('changegroup')
749 cgversions = b2caps.get('changegroup')
741 if cgversions: # 3.1 and 3.2 ship with an empty value
750 if cgversions: # 3.1 and 3.2 ship with an empty value
742 cgversions = [v for v in cgversions
751 cgversions = [v for v in cgversions
743 if v in changegroup.supportedoutgoingversions(
752 if v in changegroup.supportedoutgoingversions(
744 pushop.repo)]
753 pushop.repo)]
745 if not cgversions:
754 if not cgversions:
746 raise ValueError(_('no common changegroup version'))
755 raise ValueError(_('no common changegroup version'))
747 version = max(cgversions)
756 version = max(cgversions)
748 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
757 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
749 pushop.outgoing,
758 pushop.outgoing,
750 version=version)
759 version=version)
751 cgpart = bundler.newpart('changegroup', data=cg)
760 cgpart = bundler.newpart('changegroup', data=cg)
752 if cgversions:
761 if cgversions:
753 cgpart.addparam('version', version)
762 cgpart.addparam('version', version)
754 if 'treemanifest' in pushop.repo.requirements:
763 if 'treemanifest' in pushop.repo.requirements:
755 cgpart.addparam('treemanifest', '1')
764 cgpart.addparam('treemanifest', '1')
756 def handlereply(op):
765 def handlereply(op):
757 """extract addchangegroup returns from server reply"""
766 """extract addchangegroup returns from server reply"""
758 cgreplies = op.records.getreplies(cgpart.id)
767 cgreplies = op.records.getreplies(cgpart.id)
759 assert len(cgreplies['changegroup']) == 1
768 assert len(cgreplies['changegroup']) == 1
760 pushop.cgresult = cgreplies['changegroup'][0]['return']
769 pushop.cgresult = cgreplies['changegroup'][0]['return']
761 return handlereply
770 return handlereply
762
771
763 @b2partsgenerator('phase')
772 @b2partsgenerator('phase')
764 def _pushb2phases(pushop, bundler):
773 def _pushb2phases(pushop, bundler):
765 """handle phase push through bundle2"""
774 """handle phase push through bundle2"""
766 if 'phases' in pushop.stepsdone:
775 if 'phases' in pushop.stepsdone:
767 return
776 return
768 b2caps = bundle2.bundle2caps(pushop.remote)
777 b2caps = bundle2.bundle2caps(pushop.remote)
769 if not 'pushkey' in b2caps:
778 if not 'pushkey' in b2caps:
770 return
779 return
771 pushop.stepsdone.add('phases')
780 pushop.stepsdone.add('phases')
772 part2node = []
781 part2node = []
773
782
774 def handlefailure(pushop, exc):
783 def handlefailure(pushop, exc):
775 targetid = int(exc.partid)
784 targetid = int(exc.partid)
776 for partid, node in part2node:
785 for partid, node in part2node:
777 if partid == targetid:
786 if partid == targetid:
778 raise error.Abort(_('updating %s to public failed') % node)
787 raise error.Abort(_('updating %s to public failed') % node)
779
788
780 enc = pushkey.encode
789 enc = pushkey.encode
781 for newremotehead in pushop.outdatedphases:
790 for newremotehead in pushop.outdatedphases:
782 part = bundler.newpart('pushkey')
791 part = bundler.newpart('pushkey')
783 part.addparam('namespace', enc('phases'))
792 part.addparam('namespace', enc('phases'))
784 part.addparam('key', enc(newremotehead.hex()))
793 part.addparam('key', enc(newremotehead.hex()))
785 part.addparam('old', enc(str(phases.draft)))
794 part.addparam('old', enc(str(phases.draft)))
786 part.addparam('new', enc(str(phases.public)))
795 part.addparam('new', enc(str(phases.public)))
787 part2node.append((part.id, newremotehead))
796 part2node.append((part.id, newremotehead))
788 pushop.pkfailcb[part.id] = handlefailure
797 pushop.pkfailcb[part.id] = handlefailure
789
798
790 def handlereply(op):
799 def handlereply(op):
791 for partid, node in part2node:
800 for partid, node in part2node:
792 partrep = op.records.getreplies(partid)
801 partrep = op.records.getreplies(partid)
793 results = partrep['pushkey']
802 results = partrep['pushkey']
794 assert len(results) <= 1
803 assert len(results) <= 1
795 msg = None
804 msg = None
796 if not results:
805 if not results:
797 msg = _('server ignored update of %s to public!\n') % node
806 msg = _('server ignored update of %s to public!\n') % node
798 elif not int(results[0]['return']):
807 elif not int(results[0]['return']):
799 msg = _('updating %s to public failed!\n') % node
808 msg = _('updating %s to public failed!\n') % node
800 if msg is not None:
809 if msg is not None:
801 pushop.ui.warn(msg)
810 pushop.ui.warn(msg)
802 return handlereply
811 return handlereply
803
812
804 @b2partsgenerator('obsmarkers')
813 @b2partsgenerator('obsmarkers')
805 def _pushb2obsmarkers(pushop, bundler):
814 def _pushb2obsmarkers(pushop, bundler):
806 if 'obsmarkers' in pushop.stepsdone:
815 if 'obsmarkers' in pushop.stepsdone:
807 return
816 return
808 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
817 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
809 if obsolete.commonversion(remoteversions) is None:
818 if obsolete.commonversion(remoteversions) is None:
810 return
819 return
811 pushop.stepsdone.add('obsmarkers')
820 pushop.stepsdone.add('obsmarkers')
812 if pushop.outobsmarkers:
821 if pushop.outobsmarkers:
813 markers = sorted(pushop.outobsmarkers)
822 markers = sorted(pushop.outobsmarkers)
814 buildobsmarkerspart(bundler, markers)
823 buildobsmarkerspart(bundler, markers)
815
824
816 @b2partsgenerator('bookmarks')
825 @b2partsgenerator('bookmarks')
817 def _pushb2bookmarks(pushop, bundler):
826 def _pushb2bookmarks(pushop, bundler):
818 """handle bookmark push through bundle2"""
827 """handle bookmark push through bundle2"""
819 if 'bookmarks' in pushop.stepsdone:
828 if 'bookmarks' in pushop.stepsdone:
820 return
829 return
821 b2caps = bundle2.bundle2caps(pushop.remote)
830 b2caps = bundle2.bundle2caps(pushop.remote)
822 if 'pushkey' not in b2caps:
831 if 'pushkey' not in b2caps:
823 return
832 return
824 pushop.stepsdone.add('bookmarks')
833 pushop.stepsdone.add('bookmarks')
825 part2book = []
834 part2book = []
826 enc = pushkey.encode
835 enc = pushkey.encode
827
836
828 def handlefailure(pushop, exc):
837 def handlefailure(pushop, exc):
829 targetid = int(exc.partid)
838 targetid = int(exc.partid)
830 for partid, book, action in part2book:
839 for partid, book, action in part2book:
831 if partid == targetid:
840 if partid == targetid:
832 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
841 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
833 # we should not be called for part we did not generated
842 # we should not be called for part we did not generated
834 assert False
843 assert False
835
844
836 for book, old, new in pushop.outbookmarks:
845 for book, old, new in pushop.outbookmarks:
837 part = bundler.newpart('pushkey')
846 part = bundler.newpart('pushkey')
838 part.addparam('namespace', enc('bookmarks'))
847 part.addparam('namespace', enc('bookmarks'))
839 part.addparam('key', enc(book))
848 part.addparam('key', enc(book))
840 part.addparam('old', enc(old))
849 part.addparam('old', enc(old))
841 part.addparam('new', enc(new))
850 part.addparam('new', enc(new))
842 action = 'update'
851 action = 'update'
843 if not old:
852 if not old:
844 action = 'export'
853 action = 'export'
845 elif not new:
854 elif not new:
846 action = 'delete'
855 action = 'delete'
847 part2book.append((part.id, book, action))
856 part2book.append((part.id, book, action))
848 pushop.pkfailcb[part.id] = handlefailure
857 pushop.pkfailcb[part.id] = handlefailure
849
858
850 def handlereply(op):
859 def handlereply(op):
851 ui = pushop.ui
860 ui = pushop.ui
852 for partid, book, action in part2book:
861 for partid, book, action in part2book:
853 partrep = op.records.getreplies(partid)
862 partrep = op.records.getreplies(partid)
854 results = partrep['pushkey']
863 results = partrep['pushkey']
855 assert len(results) <= 1
864 assert len(results) <= 1
856 if not results:
865 if not results:
857 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
866 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
858 else:
867 else:
859 ret = int(results[0]['return'])
868 ret = int(results[0]['return'])
860 if ret:
869 if ret:
861 ui.status(bookmsgmap[action][0] % book)
870 ui.status(bookmsgmap[action][0] % book)
862 else:
871 else:
863 ui.warn(bookmsgmap[action][1] % book)
872 ui.warn(bookmsgmap[action][1] % book)
864 if pushop.bkresult is not None:
873 if pushop.bkresult is not None:
865 pushop.bkresult = 1
874 pushop.bkresult = 1
866 return handlereply
875 return handlereply
867
876
868
877
869 def _pushbundle2(pushop):
878 def _pushbundle2(pushop):
870 """push data to the remote using bundle2
879 """push data to the remote using bundle2
871
880
872 The only currently supported type of data is changegroup but this will
881 The only currently supported type of data is changegroup but this will
873 evolve in the future."""
882 evolve in the future."""
874 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
883 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
875 pushback = (pushop.trmanager
884 pushback = (pushop.trmanager
876 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
885 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
877
886
878 # create reply capability
887 # create reply capability
879 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
888 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
880 allowpushback=pushback))
889 allowpushback=pushback))
881 bundler.newpart('replycaps', data=capsblob)
890 bundler.newpart('replycaps', data=capsblob)
882 replyhandlers = []
891 replyhandlers = []
883 for partgenname in b2partsgenorder:
892 for partgenname in b2partsgenorder:
884 partgen = b2partsgenmapping[partgenname]
893 partgen = b2partsgenmapping[partgenname]
885 ret = partgen(pushop, bundler)
894 ret = partgen(pushop, bundler)
886 if callable(ret):
895 if callable(ret):
887 replyhandlers.append(ret)
896 replyhandlers.append(ret)
888 # do not push if nothing to push
897 # do not push if nothing to push
889 if bundler.nbparts <= 1:
898 if bundler.nbparts <= 1:
890 return
899 return
891 stream = util.chunkbuffer(bundler.getchunks())
900 stream = util.chunkbuffer(bundler.getchunks())
892 try:
901 try:
893 try:
902 try:
894 reply = pushop.remote.unbundle(
903 reply = pushop.remote.unbundle(
895 stream, ['force'], pushop.remote.url())
904 stream, ['force'], pushop.remote.url())
896 except error.BundleValueError as exc:
905 except error.BundleValueError as exc:
897 raise error.Abort(_('missing support for %s') % exc)
906 raise error.Abort(_('missing support for %s') % exc)
898 try:
907 try:
899 trgetter = None
908 trgetter = None
900 if pushback:
909 if pushback:
901 trgetter = pushop.trmanager.transaction
910 trgetter = pushop.trmanager.transaction
902 op = bundle2.processbundle(pushop.repo, reply, trgetter)
911 op = bundle2.processbundle(pushop.repo, reply, trgetter)
903 except error.BundleValueError as exc:
912 except error.BundleValueError as exc:
904 raise error.Abort(_('missing support for %s') % exc)
913 raise error.Abort(_('missing support for %s') % exc)
905 except bundle2.AbortFromPart as exc:
914 except bundle2.AbortFromPart as exc:
906 pushop.ui.status(_('remote: %s\n') % exc)
915 pushop.ui.status(_('remote: %s\n') % exc)
907 if exc.hint is not None:
916 if exc.hint is not None:
908 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
917 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
909 raise error.Abort(_('push failed on remote'))
918 raise error.Abort(_('push failed on remote'))
910 except error.PushkeyFailed as exc:
919 except error.PushkeyFailed as exc:
911 partid = int(exc.partid)
920 partid = int(exc.partid)
912 if partid not in pushop.pkfailcb:
921 if partid not in pushop.pkfailcb:
913 raise
922 raise
914 pushop.pkfailcb[partid](pushop, exc)
923 pushop.pkfailcb[partid](pushop, exc)
915 for rephand in replyhandlers:
924 for rephand in replyhandlers:
916 rephand(op)
925 rephand(op)
917
926
918 def _pushchangeset(pushop):
927 def _pushchangeset(pushop):
919 """Make the actual push of changeset bundle to remote repo"""
928 """Make the actual push of changeset bundle to remote repo"""
920 if 'changesets' in pushop.stepsdone:
929 if 'changesets' in pushop.stepsdone:
921 return
930 return
922 pushop.stepsdone.add('changesets')
931 pushop.stepsdone.add('changesets')
923 if not _pushcheckoutgoing(pushop):
932 if not _pushcheckoutgoing(pushop):
924 return
933 return
925 pushop.repo.prepushoutgoinghooks(pushop)
934 pushop.repo.prepushoutgoinghooks(pushop)
926 outgoing = pushop.outgoing
935 outgoing = pushop.outgoing
927 unbundle = pushop.remote.capable('unbundle')
936 unbundle = pushop.remote.capable('unbundle')
928 # TODO: get bundlecaps from remote
937 # TODO: get bundlecaps from remote
929 bundlecaps = None
938 bundlecaps = None
930 # create a changegroup from local
939 # create a changegroup from local
931 if pushop.revs is None and not (outgoing.excluded
940 if pushop.revs is None and not (outgoing.excluded
932 or pushop.repo.changelog.filteredrevs):
941 or pushop.repo.changelog.filteredrevs):
933 # push everything,
942 # push everything,
934 # use the fast path, no race possible on push
943 # use the fast path, no race possible on push
935 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
944 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
936 cg = changegroup.getsubset(pushop.repo,
945 cg = changegroup.getsubset(pushop.repo,
937 outgoing,
946 outgoing,
938 bundler,
947 bundler,
939 'push',
948 'push',
940 fastpath=True)
949 fastpath=True)
941 else:
950 else:
942 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
951 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
943 bundlecaps)
952 bundlecaps)
944
953
945 # apply changegroup to remote
954 # apply changegroup to remote
946 if unbundle:
955 if unbundle:
947 # local repo finds heads on server, finds out what
956 # local repo finds heads on server, finds out what
948 # revs it must push. once revs transferred, if server
957 # revs it must push. once revs transferred, if server
949 # finds it has different heads (someone else won
958 # finds it has different heads (someone else won
950 # commit/push race), server aborts.
959 # commit/push race), server aborts.
951 if pushop.force:
960 if pushop.force:
952 remoteheads = ['force']
961 remoteheads = ['force']
953 else:
962 else:
954 remoteheads = pushop.remoteheads
963 remoteheads = pushop.remoteheads
955 # ssh: return remote's addchangegroup()
964 # ssh: return remote's addchangegroup()
956 # http: return remote's addchangegroup() or 0 for error
965 # http: return remote's addchangegroup() or 0 for error
957 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
966 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
958 pushop.repo.url())
967 pushop.repo.url())
959 else:
968 else:
960 # we return an integer indicating remote head count
969 # we return an integer indicating remote head count
961 # change
970 # change
962 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
971 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
963 pushop.repo.url())
972 pushop.repo.url())
964
973
965 def _pushsyncphase(pushop):
974 def _pushsyncphase(pushop):
966 """synchronise phase information locally and remotely"""
975 """synchronise phase information locally and remotely"""
967 cheads = pushop.commonheads
976 cheads = pushop.commonheads
968 # even when we don't push, exchanging phase data is useful
977 # even when we don't push, exchanging phase data is useful
969 remotephases = pushop.remote.listkeys('phases')
978 remotephases = pushop.remote.listkeys('phases')
970 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
979 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
971 and remotephases # server supports phases
980 and remotephases # server supports phases
972 and pushop.cgresult is None # nothing was pushed
981 and pushop.cgresult is None # nothing was pushed
973 and remotephases.get('publishing', False)):
982 and remotephases.get('publishing', False)):
974 # When:
983 # When:
975 # - this is a subrepo push
984 # - this is a subrepo push
976 # - and remote support phase
985 # - and remote support phase
977 # - and no changeset was pushed
986 # - and no changeset was pushed
978 # - and remote is publishing
987 # - and remote is publishing
979 # We may be in issue 3871 case!
988 # We may be in issue 3871 case!
980 # We drop the possible phase synchronisation done by
989 # We drop the possible phase synchronisation done by
981 # courtesy to publish changesets possibly locally draft
990 # courtesy to publish changesets possibly locally draft
982 # on the remote.
991 # on the remote.
983 remotephases = {'publishing': 'True'}
992 remotephases = {'publishing': 'True'}
984 if not remotephases: # old server or public only reply from non-publishing
993 if not remotephases: # old server or public only reply from non-publishing
985 _localphasemove(pushop, cheads)
994 _localphasemove(pushop, cheads)
986 # don't push any phase data as there is nothing to push
995 # don't push any phase data as there is nothing to push
987 else:
996 else:
988 ana = phases.analyzeremotephases(pushop.repo, cheads,
997 ana = phases.analyzeremotephases(pushop.repo, cheads,
989 remotephases)
998 remotephases)
990 pheads, droots = ana
999 pheads, droots = ana
991 ### Apply remote phase on local
1000 ### Apply remote phase on local
992 if remotephases.get('publishing', False):
1001 if remotephases.get('publishing', False):
993 _localphasemove(pushop, cheads)
1002 _localphasemove(pushop, cheads)
994 else: # publish = False
1003 else: # publish = False
995 _localphasemove(pushop, pheads)
1004 _localphasemove(pushop, pheads)
996 _localphasemove(pushop, cheads, phases.draft)
1005 _localphasemove(pushop, cheads, phases.draft)
997 ### Apply local phase on remote
1006 ### Apply local phase on remote
998
1007
999 if pushop.cgresult:
1008 if pushop.cgresult:
1000 if 'phases' in pushop.stepsdone:
1009 if 'phases' in pushop.stepsdone:
1001 # phases already pushed though bundle2
1010 # phases already pushed though bundle2
1002 return
1011 return
1003 outdated = pushop.outdatedphases
1012 outdated = pushop.outdatedphases
1004 else:
1013 else:
1005 outdated = pushop.fallbackoutdatedphases
1014 outdated = pushop.fallbackoutdatedphases
1006
1015
1007 pushop.stepsdone.add('phases')
1016 pushop.stepsdone.add('phases')
1008
1017
1009 # filter heads already turned public by the push
1018 # filter heads already turned public by the push
1010 outdated = [c for c in outdated if c.node() not in pheads]
1019 outdated = [c for c in outdated if c.node() not in pheads]
1011 # fallback to independent pushkey command
1020 # fallback to independent pushkey command
1012 for newremotehead in outdated:
1021 for newremotehead in outdated:
1013 r = pushop.remote.pushkey('phases',
1022 r = pushop.remote.pushkey('phases',
1014 newremotehead.hex(),
1023 newremotehead.hex(),
1015 str(phases.draft),
1024 str(phases.draft),
1016 str(phases.public))
1025 str(phases.public))
1017 if not r:
1026 if not r:
1018 pushop.ui.warn(_('updating %s to public failed!\n')
1027 pushop.ui.warn(_('updating %s to public failed!\n')
1019 % newremotehead)
1028 % newremotehead)
1020
1029
1021 def _localphasemove(pushop, nodes, phase=phases.public):
1030 def _localphasemove(pushop, nodes, phase=phases.public):
1022 """move <nodes> to <phase> in the local source repo"""
1031 """move <nodes> to <phase> in the local source repo"""
1023 if pushop.trmanager:
1032 if pushop.trmanager:
1024 phases.advanceboundary(pushop.repo,
1033 phases.advanceboundary(pushop.repo,
1025 pushop.trmanager.transaction(),
1034 pushop.trmanager.transaction(),
1026 phase,
1035 phase,
1027 nodes)
1036 nodes)
1028 else:
1037 else:
1029 # repo is not locked, do not change any phases!
1038 # repo is not locked, do not change any phases!
1030 # Informs the user that phases should have been moved when
1039 # Informs the user that phases should have been moved when
1031 # applicable.
1040 # applicable.
1032 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1041 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1033 phasestr = phases.phasenames[phase]
1042 phasestr = phases.phasenames[phase]
1034 if actualmoves:
1043 if actualmoves:
1035 pushop.ui.status(_('cannot lock source repo, skipping '
1044 pushop.ui.status(_('cannot lock source repo, skipping '
1036 'local %s phase update\n') % phasestr)
1045 'local %s phase update\n') % phasestr)
1037
1046
1038 def _pushobsolete(pushop):
1047 def _pushobsolete(pushop):
1039 """utility function to push obsolete markers to a remote"""
1048 """utility function to push obsolete markers to a remote"""
1040 if 'obsmarkers' in pushop.stepsdone:
1049 if 'obsmarkers' in pushop.stepsdone:
1041 return
1050 return
1042 repo = pushop.repo
1051 repo = pushop.repo
1043 remote = pushop.remote
1052 remote = pushop.remote
1044 pushop.stepsdone.add('obsmarkers')
1053 pushop.stepsdone.add('obsmarkers')
1045 if pushop.outobsmarkers:
1054 if pushop.outobsmarkers:
1046 pushop.ui.debug('try to push obsolete markers to remote\n')
1055 pushop.ui.debug('try to push obsolete markers to remote\n')
1047 rslts = []
1056 rslts = []
1048 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1057 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1049 for key in sorted(remotedata, reverse=True):
1058 for key in sorted(remotedata, reverse=True):
1050 # reverse sort to ensure we end with dump0
1059 # reverse sort to ensure we end with dump0
1051 data = remotedata[key]
1060 data = remotedata[key]
1052 rslts.append(remote.pushkey('obsolete', key, '', data))
1061 rslts.append(remote.pushkey('obsolete', key, '', data))
1053 if [r for r in rslts if not r]:
1062 if [r for r in rslts if not r]:
1054 msg = _('failed to push some obsolete markers!\n')
1063 msg = _('failed to push some obsolete markers!\n')
1055 repo.ui.warn(msg)
1064 repo.ui.warn(msg)
1056
1065
1057 def _pushbookmark(pushop):
1066 def _pushbookmark(pushop):
1058 """Update bookmark position on remote"""
1067 """Update bookmark position on remote"""
1059 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1068 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1060 return
1069 return
1061 pushop.stepsdone.add('bookmarks')
1070 pushop.stepsdone.add('bookmarks')
1062 ui = pushop.ui
1071 ui = pushop.ui
1063 remote = pushop.remote
1072 remote = pushop.remote
1064
1073
1065 for b, old, new in pushop.outbookmarks:
1074 for b, old, new in pushop.outbookmarks:
1066 action = 'update'
1075 action = 'update'
1067 if not old:
1076 if not old:
1068 action = 'export'
1077 action = 'export'
1069 elif not new:
1078 elif not new:
1070 action = 'delete'
1079 action = 'delete'
1071 if remote.pushkey('bookmarks', b, old, new):
1080 if remote.pushkey('bookmarks', b, old, new):
1072 ui.status(bookmsgmap[action][0] % b)
1081 ui.status(bookmsgmap[action][0] % b)
1073 else:
1082 else:
1074 ui.warn(bookmsgmap[action][1] % b)
1083 ui.warn(bookmsgmap[action][1] % b)
1075 # discovery can have set the value form invalid entry
1084 # discovery can have set the value form invalid entry
1076 if pushop.bkresult is not None:
1085 if pushop.bkresult is not None:
1077 pushop.bkresult = 1
1086 pushop.bkresult = 1
1078
1087
1079 class pulloperation(object):
1088 class pulloperation(object):
1080 """A object that represent a single pull operation
1089 """A object that represent a single pull operation
1081
1090
1082 It purpose is to carry pull related state and very common operation.
1091 It purpose is to carry pull related state and very common operation.
1083
1092
1084 A new should be created at the beginning of each pull and discarded
1093 A new should be created at the beginning of each pull and discarded
1085 afterward.
1094 afterward.
1086 """
1095 """
1087
1096
1088 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1097 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1089 remotebookmarks=None, streamclonerequested=None):
1098 remotebookmarks=None, streamclonerequested=None):
1090 # repo we pull into
1099 # repo we pull into
1091 self.repo = repo
1100 self.repo = repo
1092 # repo we pull from
1101 # repo we pull from
1093 self.remote = remote
1102 self.remote = remote
1094 # revision we try to pull (None is "all")
1103 # revision we try to pull (None is "all")
1095 self.heads = heads
1104 self.heads = heads
1096 # bookmark pulled explicitly
1105 # bookmark pulled explicitly
1097 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1106 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1098 for bookmark in bookmarks]
1107 for bookmark in bookmarks]
1099 # do we force pull?
1108 # do we force pull?
1100 self.force = force
1109 self.force = force
1101 # whether a streaming clone was requested
1110 # whether a streaming clone was requested
1102 self.streamclonerequested = streamclonerequested
1111 self.streamclonerequested = streamclonerequested
1103 # transaction manager
1112 # transaction manager
1104 self.trmanager = None
1113 self.trmanager = None
1105 # set of common changeset between local and remote before pull
1114 # set of common changeset between local and remote before pull
1106 self.common = None
1115 self.common = None
1107 # set of pulled head
1116 # set of pulled head
1108 self.rheads = None
1117 self.rheads = None
1109 # list of missing changeset to fetch remotely
1118 # list of missing changeset to fetch remotely
1110 self.fetch = None
1119 self.fetch = None
1111 # remote bookmarks data
1120 # remote bookmarks data
1112 self.remotebookmarks = remotebookmarks
1121 self.remotebookmarks = remotebookmarks
1113 # result of changegroup pulling (used as return code by pull)
1122 # result of changegroup pulling (used as return code by pull)
1114 self.cgresult = None
1123 self.cgresult = None
1115 # list of step already done
1124 # list of step already done
1116 self.stepsdone = set()
1125 self.stepsdone = set()
1117 # Whether we attempted a clone from pre-generated bundles.
1126 # Whether we attempted a clone from pre-generated bundles.
1118 self.clonebundleattempted = False
1127 self.clonebundleattempted = False
1119
1128
1120 @util.propertycache
1129 @util.propertycache
1121 def pulledsubset(self):
1130 def pulledsubset(self):
1122 """heads of the set of changeset target by the pull"""
1131 """heads of the set of changeset target by the pull"""
1123 # compute target subset
1132 # compute target subset
1124 if self.heads is None:
1133 if self.heads is None:
1125 # We pulled every thing possible
1134 # We pulled every thing possible
1126 # sync on everything common
1135 # sync on everything common
1127 c = set(self.common)
1136 c = set(self.common)
1128 ret = list(self.common)
1137 ret = list(self.common)
1129 for n in self.rheads:
1138 for n in self.rheads:
1130 if n not in c:
1139 if n not in c:
1131 ret.append(n)
1140 ret.append(n)
1132 return ret
1141 return ret
1133 else:
1142 else:
1134 # We pulled a specific subset
1143 # We pulled a specific subset
1135 # sync on this subset
1144 # sync on this subset
1136 return self.heads
1145 return self.heads
1137
1146
1138 @util.propertycache
1147 @util.propertycache
1139 def canusebundle2(self):
1148 def canusebundle2(self):
1140 return not _forcebundle1(self)
1149 return not _forcebundle1(self)
1141
1150
1142 @util.propertycache
1151 @util.propertycache
1143 def remotebundle2caps(self):
1152 def remotebundle2caps(self):
1144 return bundle2.bundle2caps(self.remote)
1153 return bundle2.bundle2caps(self.remote)
1145
1154
1146 def gettransaction(self):
1155 def gettransaction(self):
1147 # deprecated; talk to trmanager directly
1156 # deprecated; talk to trmanager directly
1148 return self.trmanager.transaction()
1157 return self.trmanager.transaction()
1149
1158
1150 class transactionmanager(object):
1159 class transactionmanager(object):
1151 """An object to manage the life cycle of a transaction
1160 """An object to manage the life cycle of a transaction
1152
1161
1153 It creates the transaction on demand and calls the appropriate hooks when
1162 It creates the transaction on demand and calls the appropriate hooks when
1154 closing the transaction."""
1163 closing the transaction."""
1155 def __init__(self, repo, source, url):
1164 def __init__(self, repo, source, url):
1156 self.repo = repo
1165 self.repo = repo
1157 self.source = source
1166 self.source = source
1158 self.url = url
1167 self.url = url
1159 self._tr = None
1168 self._tr = None
1160
1169
1161 def transaction(self):
1170 def transaction(self):
1162 """Return an open transaction object, constructing if necessary"""
1171 """Return an open transaction object, constructing if necessary"""
1163 if not self._tr:
1172 if not self._tr:
1164 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1173 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1165 self._tr = self.repo.transaction(trname)
1174 self._tr = self.repo.transaction(trname)
1166 self._tr.hookargs['source'] = self.source
1175 self._tr.hookargs['source'] = self.source
1167 self._tr.hookargs['url'] = self.url
1176 self._tr.hookargs['url'] = self.url
1168 return self._tr
1177 return self._tr
1169
1178
1170 def close(self):
1179 def close(self):
1171 """close transaction if created"""
1180 """close transaction if created"""
1172 if self._tr is not None:
1181 if self._tr is not None:
1173 self._tr.close()
1182 self._tr.close()
1174
1183
1175 def release(self):
1184 def release(self):
1176 """release transaction if created"""
1185 """release transaction if created"""
1177 if self._tr is not None:
1186 if self._tr is not None:
1178 self._tr.release()
1187 self._tr.release()
1179
1188
1180 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1189 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1181 streamclonerequested=None):
1190 streamclonerequested=None):
1182 """Fetch repository data from a remote.
1191 """Fetch repository data from a remote.
1183
1192
1184 This is the main function used to retrieve data from a remote repository.
1193 This is the main function used to retrieve data from a remote repository.
1185
1194
1186 ``repo`` is the local repository to clone into.
1195 ``repo`` is the local repository to clone into.
1187 ``remote`` is a peer instance.
1196 ``remote`` is a peer instance.
1188 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1197 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1189 default) means to pull everything from the remote.
1198 default) means to pull everything from the remote.
1190 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1199 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1191 default, all remote bookmarks are pulled.
1200 default, all remote bookmarks are pulled.
1192 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1201 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1193 initialization.
1202 initialization.
1194 ``streamclonerequested`` is a boolean indicating whether a "streaming
1203 ``streamclonerequested`` is a boolean indicating whether a "streaming
1195 clone" is requested. A "streaming clone" is essentially a raw file copy
1204 clone" is requested. A "streaming clone" is essentially a raw file copy
1196 of revlogs from the server. This only works when the local repository is
1205 of revlogs from the server. This only works when the local repository is
1197 empty. The default value of ``None`` means to respect the server
1206 empty. The default value of ``None`` means to respect the server
1198 configuration for preferring stream clones.
1207 configuration for preferring stream clones.
1199
1208
1200 Returns the ``pulloperation`` created for this pull.
1209 Returns the ``pulloperation`` created for this pull.
1201 """
1210 """
1202 if opargs is None:
1211 if opargs is None:
1203 opargs = {}
1212 opargs = {}
1204 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1213 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1205 streamclonerequested=streamclonerequested, **opargs)
1214 streamclonerequested=streamclonerequested, **opargs)
1206 if pullop.remote.local():
1215 if pullop.remote.local():
1207 missing = set(pullop.remote.requirements) - pullop.repo.supported
1216 missing = set(pullop.remote.requirements) - pullop.repo.supported
1208 if missing:
1217 if missing:
1209 msg = _("required features are not"
1218 msg = _("required features are not"
1210 " supported in the destination:"
1219 " supported in the destination:"
1211 " %s") % (', '.join(sorted(missing)))
1220 " %s") % (', '.join(sorted(missing)))
1212 raise error.Abort(msg)
1221 raise error.Abort(msg)
1213
1222
1214 wlock = lock = None
1223 wlock = lock = None
1215 try:
1224 try:
1216 wlock = pullop.repo.wlock()
1225 wlock = pullop.repo.wlock()
1217 lock = pullop.repo.lock()
1226 lock = pullop.repo.lock()
1218 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1227 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1219 streamclone.maybeperformlegacystreamclone(pullop)
1228 streamclone.maybeperformlegacystreamclone(pullop)
1220 # This should ideally be in _pullbundle2(). However, it needs to run
1229 # This should ideally be in _pullbundle2(). However, it needs to run
1221 # before discovery to avoid extra work.
1230 # before discovery to avoid extra work.
1222 _maybeapplyclonebundle(pullop)
1231 _maybeapplyclonebundle(pullop)
1223 _pulldiscovery(pullop)
1232 _pulldiscovery(pullop)
1224 if pullop.canusebundle2:
1233 if pullop.canusebundle2:
1225 _pullbundle2(pullop)
1234 _pullbundle2(pullop)
1226 _pullchangeset(pullop)
1235 _pullchangeset(pullop)
1227 _pullphase(pullop)
1236 _pullphase(pullop)
1228 _pullbookmarks(pullop)
1237 _pullbookmarks(pullop)
1229 _pullobsolete(pullop)
1238 _pullobsolete(pullop)
1230 pullop.trmanager.close()
1239 pullop.trmanager.close()
1231 finally:
1240 finally:
1232 lockmod.release(pullop.trmanager, lock, wlock)
1241 lockmod.release(pullop.trmanager, lock, wlock)
1233
1242
1234 return pullop
1243 return pullop
1235
1244
1236 # list of steps to perform discovery before pull
1245 # list of steps to perform discovery before pull
1237 pulldiscoveryorder = []
1246 pulldiscoveryorder = []
1238
1247
1239 # Mapping between step name and function
1248 # Mapping between step name and function
1240 #
1249 #
1241 # This exists to help extensions wrap steps if necessary
1250 # This exists to help extensions wrap steps if necessary
1242 pulldiscoverymapping = {}
1251 pulldiscoverymapping = {}
1243
1252
1244 def pulldiscovery(stepname):
1253 def pulldiscovery(stepname):
1245 """decorator for function performing discovery before pull
1254 """decorator for function performing discovery before pull
1246
1255
1247 The function is added to the step -> function mapping and appended to the
1256 The function is added to the step -> function mapping and appended to the
1248 list of steps. Beware that decorated function will be added in order (this
1257 list of steps. Beware that decorated function will be added in order (this
1249 may matter).
1258 may matter).
1250
1259
1251 You can only use this decorator for a new step, if you want to wrap a step
1260 You can only use this decorator for a new step, if you want to wrap a step
1252 from an extension, change the pulldiscovery dictionary directly."""
1261 from an extension, change the pulldiscovery dictionary directly."""
1253 def dec(func):
1262 def dec(func):
1254 assert stepname not in pulldiscoverymapping
1263 assert stepname not in pulldiscoverymapping
1255 pulldiscoverymapping[stepname] = func
1264 pulldiscoverymapping[stepname] = func
1256 pulldiscoveryorder.append(stepname)
1265 pulldiscoveryorder.append(stepname)
1257 return func
1266 return func
1258 return dec
1267 return dec
1259
1268
1260 def _pulldiscovery(pullop):
1269 def _pulldiscovery(pullop):
1261 """Run all discovery steps"""
1270 """Run all discovery steps"""
1262 for stepname in pulldiscoveryorder:
1271 for stepname in pulldiscoveryorder:
1263 step = pulldiscoverymapping[stepname]
1272 step = pulldiscoverymapping[stepname]
1264 step(pullop)
1273 step(pullop)
1265
1274
1266 @pulldiscovery('b1:bookmarks')
1275 @pulldiscovery('b1:bookmarks')
1267 def _pullbookmarkbundle1(pullop):
1276 def _pullbookmarkbundle1(pullop):
1268 """fetch bookmark data in bundle1 case
1277 """fetch bookmark data in bundle1 case
1269
1278
1270 If not using bundle2, we have to fetch bookmarks before changeset
1279 If not using bundle2, we have to fetch bookmarks before changeset
1271 discovery to reduce the chance and impact of race conditions."""
1280 discovery to reduce the chance and impact of race conditions."""
1272 if pullop.remotebookmarks is not None:
1281 if pullop.remotebookmarks is not None:
1273 return
1282 return
1274 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1283 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1275 # all known bundle2 servers now support listkeys, but lets be nice with
1284 # all known bundle2 servers now support listkeys, but lets be nice with
1276 # new implementation.
1285 # new implementation.
1277 return
1286 return
1278 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1287 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1279
1288
1280
1289
1281 @pulldiscovery('changegroup')
1290 @pulldiscovery('changegroup')
1282 def _pulldiscoverychangegroup(pullop):
1291 def _pulldiscoverychangegroup(pullop):
1283 """discovery phase for the pull
1292 """discovery phase for the pull
1284
1293
1285 Current handle changeset discovery only, will change handle all discovery
1294 Current handle changeset discovery only, will change handle all discovery
1286 at some point."""
1295 at some point."""
1287 tmp = discovery.findcommonincoming(pullop.repo,
1296 tmp = discovery.findcommonincoming(pullop.repo,
1288 pullop.remote,
1297 pullop.remote,
1289 heads=pullop.heads,
1298 heads=pullop.heads,
1290 force=pullop.force)
1299 force=pullop.force)
1291 common, fetch, rheads = tmp
1300 common, fetch, rheads = tmp
1292 nm = pullop.repo.unfiltered().changelog.nodemap
1301 nm = pullop.repo.unfiltered().changelog.nodemap
1293 if fetch and rheads:
1302 if fetch and rheads:
1294 # If a remote heads in filtered locally, lets drop it from the unknown
1303 # If a remote heads in filtered locally, lets drop it from the unknown
1295 # remote heads and put in back in common.
1304 # remote heads and put in back in common.
1296 #
1305 #
1297 # This is a hackish solution to catch most of "common but locally
1306 # This is a hackish solution to catch most of "common but locally
1298 # hidden situation". We do not performs discovery on unfiltered
1307 # hidden situation". We do not performs discovery on unfiltered
1299 # repository because it end up doing a pathological amount of round
1308 # repository because it end up doing a pathological amount of round
1300 # trip for w huge amount of changeset we do not care about.
1309 # trip for w huge amount of changeset we do not care about.
1301 #
1310 #
1302 # If a set of such "common but filtered" changeset exist on the server
1311 # If a set of such "common but filtered" changeset exist on the server
1303 # but are not including a remote heads, we'll not be able to detect it,
1312 # but are not including a remote heads, we'll not be able to detect it,
1304 scommon = set(common)
1313 scommon = set(common)
1305 filteredrheads = []
1314 filteredrheads = []
1306 for n in rheads:
1315 for n in rheads:
1307 if n in nm:
1316 if n in nm:
1308 if n not in scommon:
1317 if n not in scommon:
1309 common.append(n)
1318 common.append(n)
1310 else:
1319 else:
1311 filteredrheads.append(n)
1320 filteredrheads.append(n)
1312 if not filteredrheads:
1321 if not filteredrheads:
1313 fetch = []
1322 fetch = []
1314 rheads = filteredrheads
1323 rheads = filteredrheads
1315 pullop.common = common
1324 pullop.common = common
1316 pullop.fetch = fetch
1325 pullop.fetch = fetch
1317 pullop.rheads = rheads
1326 pullop.rheads = rheads
1318
1327
1319 def _pullbundle2(pullop):
1328 def _pullbundle2(pullop):
1320 """pull data using bundle2
1329 """pull data using bundle2
1321
1330
1322 For now, the only supported data are changegroup."""
1331 For now, the only supported data are changegroup."""
1323 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1332 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1324
1333
1325 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1334 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1326
1335
1327 # pulling changegroup
1336 # pulling changegroup
1328 pullop.stepsdone.add('changegroup')
1337 pullop.stepsdone.add('changegroup')
1329
1338
1330 kwargs['common'] = pullop.common
1339 kwargs['common'] = pullop.common
1331 kwargs['heads'] = pullop.heads or pullop.rheads
1340 kwargs['heads'] = pullop.heads or pullop.rheads
1332 kwargs['cg'] = pullop.fetch
1341 kwargs['cg'] = pullop.fetch
1333 if 'listkeys' in pullop.remotebundle2caps:
1342 if 'listkeys' in pullop.remotebundle2caps:
1334 kwargs['listkeys'] = ['phases']
1343 kwargs['listkeys'] = ['phases']
1335 if pullop.remotebookmarks is None:
1344 if pullop.remotebookmarks is None:
1336 # make sure to always includes bookmark data when migrating
1345 # make sure to always includes bookmark data when migrating
1337 # `hg incoming --bundle` to using this function.
1346 # `hg incoming --bundle` to using this function.
1338 kwargs['listkeys'].append('bookmarks')
1347 kwargs['listkeys'].append('bookmarks')
1339
1348
1340 # If this is a full pull / clone and the server supports the clone bundles
1349 # If this is a full pull / clone and the server supports the clone bundles
1341 # feature, tell the server whether we attempted a clone bundle. The
1350 # feature, tell the server whether we attempted a clone bundle. The
1342 # presence of this flag indicates the client supports clone bundles. This
1351 # presence of this flag indicates the client supports clone bundles. This
1343 # will enable the server to treat clients that support clone bundles
1352 # will enable the server to treat clients that support clone bundles
1344 # differently from those that don't.
1353 # differently from those that don't.
1345 if (pullop.remote.capable('clonebundles')
1354 if (pullop.remote.capable('clonebundles')
1346 and pullop.heads is None and list(pullop.common) == [nullid]):
1355 and pullop.heads is None and list(pullop.common) == [nullid]):
1347 kwargs['cbattempted'] = pullop.clonebundleattempted
1356 kwargs['cbattempted'] = pullop.clonebundleattempted
1348
1357
1349 if streaming:
1358 if streaming:
1350 pullop.repo.ui.status(_('streaming all changes\n'))
1359 pullop.repo.ui.status(_('streaming all changes\n'))
1351 elif not pullop.fetch:
1360 elif not pullop.fetch:
1352 pullop.repo.ui.status(_("no changes found\n"))
1361 pullop.repo.ui.status(_("no changes found\n"))
1353 pullop.cgresult = 0
1362 pullop.cgresult = 0
1354 else:
1363 else:
1355 if pullop.heads is None and list(pullop.common) == [nullid]:
1364 if pullop.heads is None and list(pullop.common) == [nullid]:
1356 pullop.repo.ui.status(_("requesting all changes\n"))
1365 pullop.repo.ui.status(_("requesting all changes\n"))
1357 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1366 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1358 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1367 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1359 if obsolete.commonversion(remoteversions) is not None:
1368 if obsolete.commonversion(remoteversions) is not None:
1360 kwargs['obsmarkers'] = True
1369 kwargs['obsmarkers'] = True
1361 pullop.stepsdone.add('obsmarkers')
1370 pullop.stepsdone.add('obsmarkers')
1362 _pullbundle2extraprepare(pullop, kwargs)
1371 _pullbundle2extraprepare(pullop, kwargs)
1363 bundle = pullop.remote.getbundle('pull', **kwargs)
1372 bundle = pullop.remote.getbundle('pull', **kwargs)
1364 try:
1373 try:
1365 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1374 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1366 except bundle2.AbortFromPart as exc:
1375 except bundle2.AbortFromPart as exc:
1367 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1376 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1368 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1377 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1369 except error.BundleValueError as exc:
1378 except error.BundleValueError as exc:
1370 raise error.Abort(_('missing support for %s') % exc)
1379 raise error.Abort(_('missing support for %s') % exc)
1371
1380
1372 if pullop.fetch:
1381 if pullop.fetch:
1373 results = [cg['return'] for cg in op.records['changegroup']]
1382 results = [cg['return'] for cg in op.records['changegroup']]
1374 pullop.cgresult = changegroup.combineresults(results)
1383 pullop.cgresult = changegroup.combineresults(results)
1375
1384
1376 # processing phases change
1385 # processing phases change
1377 for namespace, value in op.records['listkeys']:
1386 for namespace, value in op.records['listkeys']:
1378 if namespace == 'phases':
1387 if namespace == 'phases':
1379 _pullapplyphases(pullop, value)
1388 _pullapplyphases(pullop, value)
1380
1389
1381 # processing bookmark update
1390 # processing bookmark update
1382 for namespace, value in op.records['listkeys']:
1391 for namespace, value in op.records['listkeys']:
1383 if namespace == 'bookmarks':
1392 if namespace == 'bookmarks':
1384 pullop.remotebookmarks = value
1393 pullop.remotebookmarks = value
1385
1394
1386 # bookmark data were either already there or pulled in the bundle
1395 # bookmark data were either already there or pulled in the bundle
1387 if pullop.remotebookmarks is not None:
1396 if pullop.remotebookmarks is not None:
1388 _pullbookmarks(pullop)
1397 _pullbookmarks(pullop)
1389
1398
1390 def _pullbundle2extraprepare(pullop, kwargs):
1399 def _pullbundle2extraprepare(pullop, kwargs):
1391 """hook function so that extensions can extend the getbundle call"""
1400 """hook function so that extensions can extend the getbundle call"""
1392 pass
1401 pass
1393
1402
1394 def _pullchangeset(pullop):
1403 def _pullchangeset(pullop):
1395 """pull changeset from unbundle into the local repo"""
1404 """pull changeset from unbundle into the local repo"""
1396 # We delay the open of the transaction as late as possible so we
1405 # We delay the open of the transaction as late as possible so we
1397 # don't open transaction for nothing or you break future useful
1406 # don't open transaction for nothing or you break future useful
1398 # rollback call
1407 # rollback call
1399 if 'changegroup' in pullop.stepsdone:
1408 if 'changegroup' in pullop.stepsdone:
1400 return
1409 return
1401 pullop.stepsdone.add('changegroup')
1410 pullop.stepsdone.add('changegroup')
1402 if not pullop.fetch:
1411 if not pullop.fetch:
1403 pullop.repo.ui.status(_("no changes found\n"))
1412 pullop.repo.ui.status(_("no changes found\n"))
1404 pullop.cgresult = 0
1413 pullop.cgresult = 0
1405 return
1414 return
1406 pullop.gettransaction()
1415 pullop.gettransaction()
1407 if pullop.heads is None and list(pullop.common) == [nullid]:
1416 if pullop.heads is None and list(pullop.common) == [nullid]:
1408 pullop.repo.ui.status(_("requesting all changes\n"))
1417 pullop.repo.ui.status(_("requesting all changes\n"))
1409 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1418 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1410 # issue1320, avoid a race if remote changed after discovery
1419 # issue1320, avoid a race if remote changed after discovery
1411 pullop.heads = pullop.rheads
1420 pullop.heads = pullop.rheads
1412
1421
1413 if pullop.remote.capable('getbundle'):
1422 if pullop.remote.capable('getbundle'):
1414 # TODO: get bundlecaps from remote
1423 # TODO: get bundlecaps from remote
1415 cg = pullop.remote.getbundle('pull', common=pullop.common,
1424 cg = pullop.remote.getbundle('pull', common=pullop.common,
1416 heads=pullop.heads or pullop.rheads)
1425 heads=pullop.heads or pullop.rheads)
1417 elif pullop.heads is None:
1426 elif pullop.heads is None:
1418 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1427 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1419 elif not pullop.remote.capable('changegroupsubset'):
1428 elif not pullop.remote.capable('changegroupsubset'):
1420 raise error.Abort(_("partial pull cannot be done because "
1429 raise error.Abort(_("partial pull cannot be done because "
1421 "other repository doesn't support "
1430 "other repository doesn't support "
1422 "changegroupsubset."))
1431 "changegroupsubset."))
1423 else:
1432 else:
1424 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1433 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1425 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1434 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1426
1435
1427 def _pullphase(pullop):
1436 def _pullphase(pullop):
1428 # Get remote phases data from remote
1437 # Get remote phases data from remote
1429 if 'phases' in pullop.stepsdone:
1438 if 'phases' in pullop.stepsdone:
1430 return
1439 return
1431 remotephases = pullop.remote.listkeys('phases')
1440 remotephases = pullop.remote.listkeys('phases')
1432 _pullapplyphases(pullop, remotephases)
1441 _pullapplyphases(pullop, remotephases)
1433
1442
1434 def _pullapplyphases(pullop, remotephases):
1443 def _pullapplyphases(pullop, remotephases):
1435 """apply phase movement from observed remote state"""
1444 """apply phase movement from observed remote state"""
1436 if 'phases' in pullop.stepsdone:
1445 if 'phases' in pullop.stepsdone:
1437 return
1446 return
1438 pullop.stepsdone.add('phases')
1447 pullop.stepsdone.add('phases')
1439 publishing = bool(remotephases.get('publishing', False))
1448 publishing = bool(remotephases.get('publishing', False))
1440 if remotephases and not publishing:
1449 if remotephases and not publishing:
1441 # remote is new and non-publishing
1450 # remote is new and non-publishing
1442 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1451 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1443 pullop.pulledsubset,
1452 pullop.pulledsubset,
1444 remotephases)
1453 remotephases)
1445 dheads = pullop.pulledsubset
1454 dheads = pullop.pulledsubset
1446 else:
1455 else:
1447 # Remote is old or publishing all common changesets
1456 # Remote is old or publishing all common changesets
1448 # should be seen as public
1457 # should be seen as public
1449 pheads = pullop.pulledsubset
1458 pheads = pullop.pulledsubset
1450 dheads = []
1459 dheads = []
1451 unfi = pullop.repo.unfiltered()
1460 unfi = pullop.repo.unfiltered()
1452 phase = unfi._phasecache.phase
1461 phase = unfi._phasecache.phase
1453 rev = unfi.changelog.nodemap.get
1462 rev = unfi.changelog.nodemap.get
1454 public = phases.public
1463 public = phases.public
1455 draft = phases.draft
1464 draft = phases.draft
1456
1465
1457 # exclude changesets already public locally and update the others
1466 # exclude changesets already public locally and update the others
1458 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1467 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1459 if pheads:
1468 if pheads:
1460 tr = pullop.gettransaction()
1469 tr = pullop.gettransaction()
1461 phases.advanceboundary(pullop.repo, tr, public, pheads)
1470 phases.advanceboundary(pullop.repo, tr, public, pheads)
1462
1471
1463 # exclude changesets already draft locally and update the others
1472 # exclude changesets already draft locally and update the others
1464 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1473 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1465 if dheads:
1474 if dheads:
1466 tr = pullop.gettransaction()
1475 tr = pullop.gettransaction()
1467 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1476 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1468
1477
1469 def _pullbookmarks(pullop):
1478 def _pullbookmarks(pullop):
1470 """process the remote bookmark information to update the local one"""
1479 """process the remote bookmark information to update the local one"""
1471 if 'bookmarks' in pullop.stepsdone:
1480 if 'bookmarks' in pullop.stepsdone:
1472 return
1481 return
1473 pullop.stepsdone.add('bookmarks')
1482 pullop.stepsdone.add('bookmarks')
1474 repo = pullop.repo
1483 repo = pullop.repo
1475 remotebookmarks = pullop.remotebookmarks
1484 remotebookmarks = pullop.remotebookmarks
1476 remotebookmarks = bookmod.unhexlifybookmarks(remotebookmarks)
1485 remotebookmarks = bookmod.unhexlifybookmarks(remotebookmarks)
1477 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1486 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1478 pullop.remote.url(),
1487 pullop.remote.url(),
1479 pullop.gettransaction,
1488 pullop.gettransaction,
1480 explicit=pullop.explicitbookmarks)
1489 explicit=pullop.explicitbookmarks)
1481
1490
1482 def _pullobsolete(pullop):
1491 def _pullobsolete(pullop):
1483 """utility function to pull obsolete markers from a remote
1492 """utility function to pull obsolete markers from a remote
1484
1493
1485 The `gettransaction` is function that return the pull transaction, creating
1494 The `gettransaction` is function that return the pull transaction, creating
1486 one if necessary. We return the transaction to inform the calling code that
1495 one if necessary. We return the transaction to inform the calling code that
1487 a new transaction have been created (when applicable).
1496 a new transaction have been created (when applicable).
1488
1497
1489 Exists mostly to allow overriding for experimentation purpose"""
1498 Exists mostly to allow overriding for experimentation purpose"""
1490 if 'obsmarkers' in pullop.stepsdone:
1499 if 'obsmarkers' in pullop.stepsdone:
1491 return
1500 return
1492 pullop.stepsdone.add('obsmarkers')
1501 pullop.stepsdone.add('obsmarkers')
1493 tr = None
1502 tr = None
1494 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1503 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1495 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1504 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1496 remoteobs = pullop.remote.listkeys('obsolete')
1505 remoteobs = pullop.remote.listkeys('obsolete')
1497 if 'dump0' in remoteobs:
1506 if 'dump0' in remoteobs:
1498 tr = pullop.gettransaction()
1507 tr = pullop.gettransaction()
1499 markers = []
1508 markers = []
1500 for key in sorted(remoteobs, reverse=True):
1509 for key in sorted(remoteobs, reverse=True):
1501 if key.startswith('dump'):
1510 if key.startswith('dump'):
1502 data = base85.b85decode(remoteobs[key])
1511 data = base85.b85decode(remoteobs[key])
1503 version, newmarks = obsolete._readmarkers(data)
1512 version, newmarks = obsolete._readmarkers(data)
1504 markers += newmarks
1513 markers += newmarks
1505 if markers:
1514 if markers:
1506 pullop.repo.obsstore.add(tr, markers)
1515 pullop.repo.obsstore.add(tr, markers)
1507 pullop.repo.invalidatevolatilesets()
1516 pullop.repo.invalidatevolatilesets()
1508 return tr
1517 return tr
1509
1518
1510 def caps20to10(repo):
1519 def caps20to10(repo):
1511 """return a set with appropriate options to use bundle20 during getbundle"""
1520 """return a set with appropriate options to use bundle20 during getbundle"""
1512 caps = set(['HG20'])
1521 caps = set(['HG20'])
1513 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1522 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1514 caps.add('bundle2=' + urlreq.quote(capsblob))
1523 caps.add('bundle2=' + urlreq.quote(capsblob))
1515 return caps
1524 return caps
1516
1525
1517 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1526 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1518 getbundle2partsorder = []
1527 getbundle2partsorder = []
1519
1528
1520 # Mapping between step name and function
1529 # Mapping between step name and function
1521 #
1530 #
1522 # This exists to help extensions wrap steps if necessary
1531 # This exists to help extensions wrap steps if necessary
1523 getbundle2partsmapping = {}
1532 getbundle2partsmapping = {}
1524
1533
1525 def getbundle2partsgenerator(stepname, idx=None):
1534 def getbundle2partsgenerator(stepname, idx=None):
1526 """decorator for function generating bundle2 part for getbundle
1535 """decorator for function generating bundle2 part for getbundle
1527
1536
1528 The function is added to the step -> function mapping and appended to the
1537 The function is added to the step -> function mapping and appended to the
1529 list of steps. Beware that decorated functions will be added in order
1538 list of steps. Beware that decorated functions will be added in order
1530 (this may matter).
1539 (this may matter).
1531
1540
1532 You can only use this decorator for new steps, if you want to wrap a step
1541 You can only use this decorator for new steps, if you want to wrap a step
1533 from an extension, attack the getbundle2partsmapping dictionary directly."""
1542 from an extension, attack the getbundle2partsmapping dictionary directly."""
1534 def dec(func):
1543 def dec(func):
1535 assert stepname not in getbundle2partsmapping
1544 assert stepname not in getbundle2partsmapping
1536 getbundle2partsmapping[stepname] = func
1545 getbundle2partsmapping[stepname] = func
1537 if idx is None:
1546 if idx is None:
1538 getbundle2partsorder.append(stepname)
1547 getbundle2partsorder.append(stepname)
1539 else:
1548 else:
1540 getbundle2partsorder.insert(idx, stepname)
1549 getbundle2partsorder.insert(idx, stepname)
1541 return func
1550 return func
1542 return dec
1551 return dec
1543
1552
1544 def bundle2requested(bundlecaps):
1553 def bundle2requested(bundlecaps):
1545 if bundlecaps is not None:
1554 if bundlecaps is not None:
1546 return any(cap.startswith('HG2') for cap in bundlecaps)
1555 return any(cap.startswith('HG2') for cap in bundlecaps)
1547 return False
1556 return False
1548
1557
1549 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1558 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1550 **kwargs):
1559 **kwargs):
1551 """Return chunks constituting a bundle's raw data.
1560 """Return chunks constituting a bundle's raw data.
1552
1561
1553 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1562 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1554 passed.
1563 passed.
1555
1564
1556 Returns an iterator over raw chunks (of varying sizes).
1565 Returns an iterator over raw chunks (of varying sizes).
1557 """
1566 """
1558 usebundle2 = bundle2requested(bundlecaps)
1567 usebundle2 = bundle2requested(bundlecaps)
1559 # bundle10 case
1568 # bundle10 case
1560 if not usebundle2:
1569 if not usebundle2:
1561 if bundlecaps and not kwargs.get('cg', True):
1570 if bundlecaps and not kwargs.get('cg', True):
1562 raise ValueError(_('request for bundle10 must include changegroup'))
1571 raise ValueError(_('request for bundle10 must include changegroup'))
1563
1572
1564 if kwargs:
1573 if kwargs:
1565 raise ValueError(_('unsupported getbundle arguments: %s')
1574 raise ValueError(_('unsupported getbundle arguments: %s')
1566 % ', '.join(sorted(kwargs.keys())))
1575 % ', '.join(sorted(kwargs.keys())))
1567 outgoing = _computeoutgoing(repo, heads, common)
1576 outgoing = _computeoutgoing(repo, heads, common)
1568 bundler = changegroup.getbundler('01', repo, bundlecaps)
1577 bundler = changegroup.getbundler('01', repo, bundlecaps)
1569 return changegroup.getsubsetraw(repo, outgoing, bundler, source)
1578 return changegroup.getsubsetraw(repo, outgoing, bundler, source)
1570
1579
1571 # bundle20 case
1580 # bundle20 case
1572 b2caps = {}
1581 b2caps = {}
1573 for bcaps in bundlecaps:
1582 for bcaps in bundlecaps:
1574 if bcaps.startswith('bundle2='):
1583 if bcaps.startswith('bundle2='):
1575 blob = urlreq.unquote(bcaps[len('bundle2='):])
1584 blob = urlreq.unquote(bcaps[len('bundle2='):])
1576 b2caps.update(bundle2.decodecaps(blob))
1585 b2caps.update(bundle2.decodecaps(blob))
1577 bundler = bundle2.bundle20(repo.ui, b2caps)
1586 bundler = bundle2.bundle20(repo.ui, b2caps)
1578
1587
1579 kwargs['heads'] = heads
1588 kwargs['heads'] = heads
1580 kwargs['common'] = common
1589 kwargs['common'] = common
1581
1590
1582 for name in getbundle2partsorder:
1591 for name in getbundle2partsorder:
1583 func = getbundle2partsmapping[name]
1592 func = getbundle2partsmapping[name]
1584 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1593 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1585 **kwargs)
1594 **kwargs)
1586
1595
1587 return bundler.getchunks()
1596 return bundler.getchunks()
1588
1597
1589 @getbundle2partsgenerator('changegroup')
1598 @getbundle2partsgenerator('changegroup')
1590 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1599 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1591 b2caps=None, heads=None, common=None, **kwargs):
1600 b2caps=None, heads=None, common=None, **kwargs):
1592 """add a changegroup part to the requested bundle"""
1601 """add a changegroup part to the requested bundle"""
1593 cg = None
1602 cg = None
1594 if kwargs.get('cg', True):
1603 if kwargs.get('cg', True):
1595 # build changegroup bundle here.
1604 # build changegroup bundle here.
1596 version = '01'
1605 version = '01'
1597 cgversions = b2caps.get('changegroup')
1606 cgversions = b2caps.get('changegroup')
1598 if cgversions: # 3.1 and 3.2 ship with an empty value
1607 if cgversions: # 3.1 and 3.2 ship with an empty value
1599 cgversions = [v for v in cgversions
1608 cgversions = [v for v in cgversions
1600 if v in changegroup.supportedoutgoingversions(repo)]
1609 if v in changegroup.supportedoutgoingversions(repo)]
1601 if not cgversions:
1610 if not cgversions:
1602 raise ValueError(_('no common changegroup version'))
1611 raise ValueError(_('no common changegroup version'))
1603 version = max(cgversions)
1612 version = max(cgversions)
1604 outgoing = _computeoutgoing(repo, heads, common)
1613 outgoing = _computeoutgoing(repo, heads, common)
1605 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1614 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1606 bundlecaps=bundlecaps,
1615 bundlecaps=bundlecaps,
1607 version=version)
1616 version=version)
1608
1617
1609 if cg:
1618 if cg:
1610 part = bundler.newpart('changegroup', data=cg)
1619 part = bundler.newpart('changegroup', data=cg)
1611 if cgversions:
1620 if cgversions:
1612 part.addparam('version', version)
1621 part.addparam('version', version)
1613 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1622 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1614 if 'treemanifest' in repo.requirements:
1623 if 'treemanifest' in repo.requirements:
1615 part.addparam('treemanifest', '1')
1624 part.addparam('treemanifest', '1')
1616
1625
1617 @getbundle2partsgenerator('listkeys')
1626 @getbundle2partsgenerator('listkeys')
1618 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1627 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1619 b2caps=None, **kwargs):
1628 b2caps=None, **kwargs):
1620 """add parts containing listkeys namespaces to the requested bundle"""
1629 """add parts containing listkeys namespaces to the requested bundle"""
1621 listkeys = kwargs.get('listkeys', ())
1630 listkeys = kwargs.get('listkeys', ())
1622 for namespace in listkeys:
1631 for namespace in listkeys:
1623 part = bundler.newpart('listkeys')
1632 part = bundler.newpart('listkeys')
1624 part.addparam('namespace', namespace)
1633 part.addparam('namespace', namespace)
1625 keys = repo.listkeys(namespace).items()
1634 keys = repo.listkeys(namespace).items()
1626 part.data = pushkey.encodekeys(keys)
1635 part.data = pushkey.encodekeys(keys)
1627
1636
1628 @getbundle2partsgenerator('obsmarkers')
1637 @getbundle2partsgenerator('obsmarkers')
1629 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1638 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1630 b2caps=None, heads=None, **kwargs):
1639 b2caps=None, heads=None, **kwargs):
1631 """add an obsolescence markers part to the requested bundle"""
1640 """add an obsolescence markers part to the requested bundle"""
1632 if kwargs.get('obsmarkers', False):
1641 if kwargs.get('obsmarkers', False):
1633 if heads is None:
1642 if heads is None:
1634 heads = repo.heads()
1643 heads = repo.heads()
1635 subset = [c.node() for c in repo.set('::%ln', heads)]
1644 subset = [c.node() for c in repo.set('::%ln', heads)]
1636 markers = repo.obsstore.relevantmarkers(subset)
1645 markers = repo.obsstore.relevantmarkers(subset)
1637 markers = sorted(markers)
1646 markers = sorted(markers)
1638 buildobsmarkerspart(bundler, markers)
1647 buildobsmarkerspart(bundler, markers)
1639
1648
1640 @getbundle2partsgenerator('hgtagsfnodes')
1649 @getbundle2partsgenerator('hgtagsfnodes')
1641 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1650 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1642 b2caps=None, heads=None, common=None,
1651 b2caps=None, heads=None, common=None,
1643 **kwargs):
1652 **kwargs):
1644 """Transfer the .hgtags filenodes mapping.
1653 """Transfer the .hgtags filenodes mapping.
1645
1654
1646 Only values for heads in this bundle will be transferred.
1655 Only values for heads in this bundle will be transferred.
1647
1656
1648 The part data consists of pairs of 20 byte changeset node and .hgtags
1657 The part data consists of pairs of 20 byte changeset node and .hgtags
1649 filenodes raw values.
1658 filenodes raw values.
1650 """
1659 """
1651 # Don't send unless:
1660 # Don't send unless:
1652 # - changeset are being exchanged,
1661 # - changeset are being exchanged,
1653 # - the client supports it.
1662 # - the client supports it.
1654 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1663 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1655 return
1664 return
1656
1665
1657 outgoing = _computeoutgoing(repo, heads, common)
1666 outgoing = _computeoutgoing(repo, heads, common)
1658
1667
1659 if not outgoing.missingheads:
1668 if not outgoing.missingheads:
1660 return
1669 return
1661
1670
1662 cache = tags.hgtagsfnodescache(repo.unfiltered())
1671 cache = tags.hgtagsfnodescache(repo.unfiltered())
1663 chunks = []
1672 chunks = []
1664
1673
1665 # .hgtags fnodes are only relevant for head changesets. While we could
1674 # .hgtags fnodes are only relevant for head changesets. While we could
1666 # transfer values for all known nodes, there will likely be little to
1675 # transfer values for all known nodes, there will likely be little to
1667 # no benefit.
1676 # no benefit.
1668 #
1677 #
1669 # We don't bother using a generator to produce output data because
1678 # We don't bother using a generator to produce output data because
1670 # a) we only have 40 bytes per head and even esoteric numbers of heads
1679 # a) we only have 40 bytes per head and even esoteric numbers of heads
1671 # consume little memory (1M heads is 40MB) b) we don't want to send the
1680 # consume little memory (1M heads is 40MB) b) we don't want to send the
1672 # part if we don't have entries and knowing if we have entries requires
1681 # part if we don't have entries and knowing if we have entries requires
1673 # cache lookups.
1682 # cache lookups.
1674 for node in outgoing.missingheads:
1683 for node in outgoing.missingheads:
1675 # Don't compute missing, as this may slow down serving.
1684 # Don't compute missing, as this may slow down serving.
1676 fnode = cache.getfnode(node, computemissing=False)
1685 fnode = cache.getfnode(node, computemissing=False)
1677 if fnode is not None:
1686 if fnode is not None:
1678 chunks.extend([node, fnode])
1687 chunks.extend([node, fnode])
1679
1688
1680 if chunks:
1689 if chunks:
1681 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1690 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1682
1691
1683 def _getbookmarks(repo, **kwargs):
1692 def _getbookmarks(repo, **kwargs):
1684 """Returns bookmark to node mapping.
1693 """Returns bookmark to node mapping.
1685
1694
1686 This function is primarily used to generate `bookmarks` bundle2 part.
1695 This function is primarily used to generate `bookmarks` bundle2 part.
1687 It is a separate function in order to make it easy to wrap it
1696 It is a separate function in order to make it easy to wrap it
1688 in extensions. Passing `kwargs` to the function makes it easy to
1697 in extensions. Passing `kwargs` to the function makes it easy to
1689 add new parameters in extensions.
1698 add new parameters in extensions.
1690 """
1699 """
1691
1700
1692 return dict(bookmod.listbinbookmarks(repo))
1701 return dict(bookmod.listbinbookmarks(repo))
1693
1702
1694 def check_heads(repo, their_heads, context):
1703 def check_heads(repo, their_heads, context):
1695 """check if the heads of a repo have been modified
1704 """check if the heads of a repo have been modified
1696
1705
1697 Used by peer for unbundling.
1706 Used by peer for unbundling.
1698 """
1707 """
1699 heads = repo.heads()
1708 heads = repo.heads()
1700 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1709 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1701 if not (their_heads == ['force'] or their_heads == heads or
1710 if not (their_heads == ['force'] or their_heads == heads or
1702 their_heads == ['hashed', heads_hash]):
1711 their_heads == ['hashed', heads_hash]):
1703 # someone else committed/pushed/unbundled while we
1712 # someone else committed/pushed/unbundled while we
1704 # were transferring data
1713 # were transferring data
1705 raise error.PushRaced('repository changed while %s - '
1714 raise error.PushRaced('repository changed while %s - '
1706 'please try again' % context)
1715 'please try again' % context)
1707
1716
1708 def unbundle(repo, cg, heads, source, url):
1717 def unbundle(repo, cg, heads, source, url):
1709 """Apply a bundle to a repo.
1718 """Apply a bundle to a repo.
1710
1719
1711 this function makes sure the repo is locked during the application and have
1720 this function makes sure the repo is locked during the application and have
1712 mechanism to check that no push race occurred between the creation of the
1721 mechanism to check that no push race occurred between the creation of the
1713 bundle and its application.
1722 bundle and its application.
1714
1723
1715 If the push was raced as PushRaced exception is raised."""
1724 If the push was raced as PushRaced exception is raised."""
1716 r = 0
1725 r = 0
1717 # need a transaction when processing a bundle2 stream
1726 # need a transaction when processing a bundle2 stream
1718 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1727 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1719 lockandtr = [None, None, None]
1728 lockandtr = [None, None, None]
1720 recordout = None
1729 recordout = None
1721 # quick fix for output mismatch with bundle2 in 3.4
1730 # quick fix for output mismatch with bundle2 in 3.4
1722 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1731 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1723 False)
1732 False)
1724 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1733 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1725 captureoutput = True
1734 captureoutput = True
1726 try:
1735 try:
1727 check_heads(repo, heads, 'uploading changes')
1736 check_heads(repo, heads, 'uploading changes')
1728 # push can proceed
1737 # push can proceed
1729 if util.safehasattr(cg, 'params'):
1738 if util.safehasattr(cg, 'params'):
1730 r = None
1739 r = None
1731 try:
1740 try:
1732 def gettransaction():
1741 def gettransaction():
1733 if not lockandtr[2]:
1742 if not lockandtr[2]:
1734 lockandtr[0] = repo.wlock()
1743 lockandtr[0] = repo.wlock()
1735 lockandtr[1] = repo.lock()
1744 lockandtr[1] = repo.lock()
1736 lockandtr[2] = repo.transaction(source)
1745 lockandtr[2] = repo.transaction(source)
1737 lockandtr[2].hookargs['source'] = source
1746 lockandtr[2].hookargs['source'] = source
1738 lockandtr[2].hookargs['url'] = url
1747 lockandtr[2].hookargs['url'] = url
1739 lockandtr[2].hookargs['bundle2'] = '1'
1748 lockandtr[2].hookargs['bundle2'] = '1'
1740 return lockandtr[2]
1749 return lockandtr[2]
1741
1750
1742 # Do greedy locking by default until we're satisfied with lazy
1751 # Do greedy locking by default until we're satisfied with lazy
1743 # locking.
1752 # locking.
1744 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1753 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1745 gettransaction()
1754 gettransaction()
1746
1755
1747 op = bundle2.bundleoperation(repo, gettransaction,
1756 op = bundle2.bundleoperation(repo, gettransaction,
1748 captureoutput=captureoutput)
1757 captureoutput=captureoutput)
1749 try:
1758 try:
1750 op = bundle2.processbundle(repo, cg, op=op)
1759 op = bundle2.processbundle(repo, cg, op=op)
1751 finally:
1760 finally:
1752 r = op.reply
1761 r = op.reply
1753 if captureoutput and r is not None:
1762 if captureoutput and r is not None:
1754 repo.ui.pushbuffer(error=True, subproc=True)
1763 repo.ui.pushbuffer(error=True, subproc=True)
1755 def recordout(output):
1764 def recordout(output):
1756 r.newpart('output', data=output, mandatory=False)
1765 r.newpart('output', data=output, mandatory=False)
1757 if lockandtr[2] is not None:
1766 if lockandtr[2] is not None:
1758 lockandtr[2].close()
1767 lockandtr[2].close()
1759 except BaseException as exc:
1768 except BaseException as exc:
1760 exc.duringunbundle2 = True
1769 exc.duringunbundle2 = True
1761 if captureoutput and r is not None:
1770 if captureoutput and r is not None:
1762 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1771 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1763 def recordout(output):
1772 def recordout(output):
1764 part = bundle2.bundlepart('output', data=output,
1773 part = bundle2.bundlepart('output', data=output,
1765 mandatory=False)
1774 mandatory=False)
1766 parts.append(part)
1775 parts.append(part)
1767 raise
1776 raise
1768 else:
1777 else:
1769 lockandtr[1] = repo.lock()
1778 lockandtr[1] = repo.lock()
1770 r = cg.apply(repo, source, url)
1779 r = cg.apply(repo, source, url)
1771 finally:
1780 finally:
1772 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1781 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1773 if recordout is not None:
1782 if recordout is not None:
1774 recordout(repo.ui.popbuffer())
1783 recordout(repo.ui.popbuffer())
1775 return r
1784 return r
1776
1785
1777 def _maybeapplyclonebundle(pullop):
1786 def _maybeapplyclonebundle(pullop):
1778 """Apply a clone bundle from a remote, if possible."""
1787 """Apply a clone bundle from a remote, if possible."""
1779
1788
1780 repo = pullop.repo
1789 repo = pullop.repo
1781 remote = pullop.remote
1790 remote = pullop.remote
1782
1791
1783 if not repo.ui.configbool('ui', 'clonebundles', True):
1792 if not repo.ui.configbool('ui', 'clonebundles', True):
1784 return
1793 return
1785
1794
1786 # Only run if local repo is empty.
1795 # Only run if local repo is empty.
1787 if len(repo):
1796 if len(repo):
1788 return
1797 return
1789
1798
1790 if pullop.heads:
1799 if pullop.heads:
1791 return
1800 return
1792
1801
1793 if not remote.capable('clonebundles'):
1802 if not remote.capable('clonebundles'):
1794 return
1803 return
1795
1804
1796 res = remote._call('clonebundles')
1805 res = remote._call('clonebundles')
1797
1806
1798 # If we call the wire protocol command, that's good enough to record the
1807 # If we call the wire protocol command, that's good enough to record the
1799 # attempt.
1808 # attempt.
1800 pullop.clonebundleattempted = True
1809 pullop.clonebundleattempted = True
1801
1810
1802 entries = parseclonebundlesmanifest(repo, res)
1811 entries = parseclonebundlesmanifest(repo, res)
1803 if not entries:
1812 if not entries:
1804 repo.ui.note(_('no clone bundles available on remote; '
1813 repo.ui.note(_('no clone bundles available on remote; '
1805 'falling back to regular clone\n'))
1814 'falling back to regular clone\n'))
1806 return
1815 return
1807
1816
1808 entries = filterclonebundleentries(repo, entries)
1817 entries = filterclonebundleentries(repo, entries)
1809 if not entries:
1818 if not entries:
1810 # There is a thundering herd concern here. However, if a server
1819 # There is a thundering herd concern here. However, if a server
1811 # operator doesn't advertise bundles appropriate for its clients,
1820 # operator doesn't advertise bundles appropriate for its clients,
1812 # they deserve what's coming. Furthermore, from a client's
1821 # they deserve what's coming. Furthermore, from a client's
1813 # perspective, no automatic fallback would mean not being able to
1822 # perspective, no automatic fallback would mean not being able to
1814 # clone!
1823 # clone!
1815 repo.ui.warn(_('no compatible clone bundles available on server; '
1824 repo.ui.warn(_('no compatible clone bundles available on server; '
1816 'falling back to regular clone\n'))
1825 'falling back to regular clone\n'))
1817 repo.ui.warn(_('(you may want to report this to the server '
1826 repo.ui.warn(_('(you may want to report this to the server '
1818 'operator)\n'))
1827 'operator)\n'))
1819 return
1828 return
1820
1829
1821 entries = sortclonebundleentries(repo.ui, entries)
1830 entries = sortclonebundleentries(repo.ui, entries)
1822
1831
1823 url = entries[0]['URL']
1832 url = entries[0]['URL']
1824 repo.ui.status(_('applying clone bundle from %s\n') % url)
1833 repo.ui.status(_('applying clone bundle from %s\n') % url)
1825 if trypullbundlefromurl(repo.ui, repo, url):
1834 if trypullbundlefromurl(repo.ui, repo, url):
1826 repo.ui.status(_('finished applying clone bundle\n'))
1835 repo.ui.status(_('finished applying clone bundle\n'))
1827 # Bundle failed.
1836 # Bundle failed.
1828 #
1837 #
1829 # We abort by default to avoid the thundering herd of
1838 # We abort by default to avoid the thundering herd of
1830 # clients flooding a server that was expecting expensive
1839 # clients flooding a server that was expecting expensive
1831 # clone load to be offloaded.
1840 # clone load to be offloaded.
1832 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1841 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1833 repo.ui.warn(_('falling back to normal clone\n'))
1842 repo.ui.warn(_('falling back to normal clone\n'))
1834 else:
1843 else:
1835 raise error.Abort(_('error applying bundle'),
1844 raise error.Abort(_('error applying bundle'),
1836 hint=_('if this error persists, consider contacting '
1845 hint=_('if this error persists, consider contacting '
1837 'the server operator or disable clone '
1846 'the server operator or disable clone '
1838 'bundles via '
1847 'bundles via '
1839 '"--config ui.clonebundles=false"'))
1848 '"--config ui.clonebundles=false"'))
1840
1849
1841 def parseclonebundlesmanifest(repo, s):
1850 def parseclonebundlesmanifest(repo, s):
1842 """Parses the raw text of a clone bundles manifest.
1851 """Parses the raw text of a clone bundles manifest.
1843
1852
1844 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1853 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1845 to the URL and other keys are the attributes for the entry.
1854 to the URL and other keys are the attributes for the entry.
1846 """
1855 """
1847 m = []
1856 m = []
1848 for line in s.splitlines():
1857 for line in s.splitlines():
1849 fields = line.split()
1858 fields = line.split()
1850 if not fields:
1859 if not fields:
1851 continue
1860 continue
1852 attrs = {'URL': fields[0]}
1861 attrs = {'URL': fields[0]}
1853 for rawattr in fields[1:]:
1862 for rawattr in fields[1:]:
1854 key, value = rawattr.split('=', 1)
1863 key, value = rawattr.split('=', 1)
1855 key = urlreq.unquote(key)
1864 key = urlreq.unquote(key)
1856 value = urlreq.unquote(value)
1865 value = urlreq.unquote(value)
1857 attrs[key] = value
1866 attrs[key] = value
1858
1867
1859 # Parse BUNDLESPEC into components. This makes client-side
1868 # Parse BUNDLESPEC into components. This makes client-side
1860 # preferences easier to specify since you can prefer a single
1869 # preferences easier to specify since you can prefer a single
1861 # component of the BUNDLESPEC.
1870 # component of the BUNDLESPEC.
1862 if key == 'BUNDLESPEC':
1871 if key == 'BUNDLESPEC':
1863 try:
1872 try:
1864 comp, version, params = parsebundlespec(repo, value,
1873 comp, version, params = parsebundlespec(repo, value,
1865 externalnames=True)
1874 externalnames=True)
1866 attrs['COMPRESSION'] = comp
1875 attrs['COMPRESSION'] = comp
1867 attrs['VERSION'] = version
1876 attrs['VERSION'] = version
1868 except error.InvalidBundleSpecification:
1877 except error.InvalidBundleSpecification:
1869 pass
1878 pass
1870 except error.UnsupportedBundleSpecification:
1879 except error.UnsupportedBundleSpecification:
1871 pass
1880 pass
1872
1881
1873 m.append(attrs)
1882 m.append(attrs)
1874
1883
1875 return m
1884 return m
1876
1885
1877 def filterclonebundleentries(repo, entries):
1886 def filterclonebundleentries(repo, entries):
1878 """Remove incompatible clone bundle manifest entries.
1887 """Remove incompatible clone bundle manifest entries.
1879
1888
1880 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1889 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1881 and returns a new list consisting of only the entries that this client
1890 and returns a new list consisting of only the entries that this client
1882 should be able to apply.
1891 should be able to apply.
1883
1892
1884 There is no guarantee we'll be able to apply all returned entries because
1893 There is no guarantee we'll be able to apply all returned entries because
1885 the metadata we use to filter on may be missing or wrong.
1894 the metadata we use to filter on may be missing or wrong.
1886 """
1895 """
1887 newentries = []
1896 newentries = []
1888 for entry in entries:
1897 for entry in entries:
1889 spec = entry.get('BUNDLESPEC')
1898 spec = entry.get('BUNDLESPEC')
1890 if spec:
1899 if spec:
1891 try:
1900 try:
1892 parsebundlespec(repo, spec, strict=True)
1901 parsebundlespec(repo, spec, strict=True)
1893 except error.InvalidBundleSpecification as e:
1902 except error.InvalidBundleSpecification as e:
1894 repo.ui.debug(str(e) + '\n')
1903 repo.ui.debug(str(e) + '\n')
1895 continue
1904 continue
1896 except error.UnsupportedBundleSpecification as e:
1905 except error.UnsupportedBundleSpecification as e:
1897 repo.ui.debug('filtering %s because unsupported bundle '
1906 repo.ui.debug('filtering %s because unsupported bundle '
1898 'spec: %s\n' % (entry['URL'], str(e)))
1907 'spec: %s\n' % (entry['URL'], str(e)))
1899 continue
1908 continue
1900
1909
1901 if 'REQUIRESNI' in entry and not sslutil.hassni:
1910 if 'REQUIRESNI' in entry and not sslutil.hassni:
1902 repo.ui.debug('filtering %s because SNI not supported\n' %
1911 repo.ui.debug('filtering %s because SNI not supported\n' %
1903 entry['URL'])
1912 entry['URL'])
1904 continue
1913 continue
1905
1914
1906 newentries.append(entry)
1915 newentries.append(entry)
1907
1916
1908 return newentries
1917 return newentries
1909
1918
1910 class clonebundleentry(object):
1919 class clonebundleentry(object):
1911 """Represents an item in a clone bundles manifest.
1920 """Represents an item in a clone bundles manifest.
1912
1921
1913 This rich class is needed to support sorting since sorted() in Python 3
1922 This rich class is needed to support sorting since sorted() in Python 3
1914 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
1923 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
1915 won't work.
1924 won't work.
1916 """
1925 """
1917
1926
1918 def __init__(self, value, prefers):
1927 def __init__(self, value, prefers):
1919 self.value = value
1928 self.value = value
1920 self.prefers = prefers
1929 self.prefers = prefers
1921
1930
1922 def _cmp(self, other):
1931 def _cmp(self, other):
1923 for prefkey, prefvalue in self.prefers:
1932 for prefkey, prefvalue in self.prefers:
1924 avalue = self.value.get(prefkey)
1933 avalue = self.value.get(prefkey)
1925 bvalue = other.value.get(prefkey)
1934 bvalue = other.value.get(prefkey)
1926
1935
1927 # Special case for b missing attribute and a matches exactly.
1936 # Special case for b missing attribute and a matches exactly.
1928 if avalue is not None and bvalue is None and avalue == prefvalue:
1937 if avalue is not None and bvalue is None and avalue == prefvalue:
1929 return -1
1938 return -1
1930
1939
1931 # Special case for a missing attribute and b matches exactly.
1940 # Special case for a missing attribute and b matches exactly.
1932 if bvalue is not None and avalue is None and bvalue == prefvalue:
1941 if bvalue is not None and avalue is None and bvalue == prefvalue:
1933 return 1
1942 return 1
1934
1943
1935 # We can't compare unless attribute present on both.
1944 # We can't compare unless attribute present on both.
1936 if avalue is None or bvalue is None:
1945 if avalue is None or bvalue is None:
1937 continue
1946 continue
1938
1947
1939 # Same values should fall back to next attribute.
1948 # Same values should fall back to next attribute.
1940 if avalue == bvalue:
1949 if avalue == bvalue:
1941 continue
1950 continue
1942
1951
1943 # Exact matches come first.
1952 # Exact matches come first.
1944 if avalue == prefvalue:
1953 if avalue == prefvalue:
1945 return -1
1954 return -1
1946 if bvalue == prefvalue:
1955 if bvalue == prefvalue:
1947 return 1
1956 return 1
1948
1957
1949 # Fall back to next attribute.
1958 # Fall back to next attribute.
1950 continue
1959 continue
1951
1960
1952 # If we got here we couldn't sort by attributes and prefers. Fall
1961 # If we got here we couldn't sort by attributes and prefers. Fall
1953 # back to index order.
1962 # back to index order.
1954 return 0
1963 return 0
1955
1964
1956 def __lt__(self, other):
1965 def __lt__(self, other):
1957 return self._cmp(other) < 0
1966 return self._cmp(other) < 0
1958
1967
1959 def __gt__(self, other):
1968 def __gt__(self, other):
1960 return self._cmp(other) > 0
1969 return self._cmp(other) > 0
1961
1970
1962 def __eq__(self, other):
1971 def __eq__(self, other):
1963 return self._cmp(other) == 0
1972 return self._cmp(other) == 0
1964
1973
1965 def __le__(self, other):
1974 def __le__(self, other):
1966 return self._cmp(other) <= 0
1975 return self._cmp(other) <= 0
1967
1976
1968 def __ge__(self, other):
1977 def __ge__(self, other):
1969 return self._cmp(other) >= 0
1978 return self._cmp(other) >= 0
1970
1979
1971 def __ne__(self, other):
1980 def __ne__(self, other):
1972 return self._cmp(other) != 0
1981 return self._cmp(other) != 0
1973
1982
1974 def sortclonebundleentries(ui, entries):
1983 def sortclonebundleentries(ui, entries):
1975 prefers = ui.configlist('ui', 'clonebundleprefers', default=[])
1984 prefers = ui.configlist('ui', 'clonebundleprefers', default=[])
1976 if not prefers:
1985 if not prefers:
1977 return list(entries)
1986 return list(entries)
1978
1987
1979 prefers = [p.split('=', 1) for p in prefers]
1988 prefers = [p.split('=', 1) for p in prefers]
1980
1989
1981 items = sorted(clonebundleentry(v, prefers) for v in entries)
1990 items = sorted(clonebundleentry(v, prefers) for v in entries)
1982 return [i.value for i in items]
1991 return [i.value for i in items]
1983
1992
1984 def trypullbundlefromurl(ui, repo, url):
1993 def trypullbundlefromurl(ui, repo, url):
1985 """Attempt to apply a bundle from a URL."""
1994 """Attempt to apply a bundle from a URL."""
1986 lock = repo.lock()
1995 lock = repo.lock()
1987 try:
1996 try:
1988 tr = repo.transaction('bundleurl')
1997 tr = repo.transaction('bundleurl')
1989 try:
1998 try:
1990 try:
1999 try:
1991 fh = urlmod.open(ui, url)
2000 fh = urlmod.open(ui, url)
1992 cg = readbundle(ui, fh, 'stream')
2001 cg = readbundle(ui, fh, 'stream')
1993
2002
1994 if isinstance(cg, bundle2.unbundle20):
2003 if isinstance(cg, bundle2.unbundle20):
1995 bundle2.processbundle(repo, cg, lambda: tr)
2004 bundle2.processbundle(repo, cg, lambda: tr)
1996 elif isinstance(cg, streamclone.streamcloneapplier):
2005 elif isinstance(cg, streamclone.streamcloneapplier):
1997 cg.apply(repo)
2006 cg.apply(repo)
1998 else:
2007 else:
1999 cg.apply(repo, 'clonebundles', url)
2008 cg.apply(repo, 'clonebundles', url)
2000 tr.close()
2009 tr.close()
2001 return True
2010 return True
2002 except urlerr.httperror as e:
2011 except urlerr.httperror as e:
2003 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2012 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2004 except urlerr.urlerror as e:
2013 except urlerr.urlerror as e:
2005 ui.warn(_('error fetching bundle: %s\n') % e.reason[1])
2014 ui.warn(_('error fetching bundle: %s\n') % e.reason[1])
2006
2015
2007 return False
2016 return False
2008 finally:
2017 finally:
2009 tr.release()
2018 tr.release()
2010 finally:
2019 finally:
2011 lock.release()
2020 lock.release()
@@ -1,195 +1,229 b''
1
1
2 $ cat << EOF >> $HGRCPATH
2 $ cat << EOF >> $HGRCPATH
3 > [format]
3 > [format]
4 > usegeneraldelta=yes
4 > usegeneraldelta=yes
5 > EOF
5 > EOF
6
6
7 bundle w/o type option
7 bundle w/o type option
8
8
9 $ hg init t1
9 $ hg init t1
10 $ hg init t2
10 $ hg init t2
11 $ cd t1
11 $ cd t1
12 $ echo blablablablabla > file.txt
12 $ echo blablablablabla > file.txt
13 $ hg ci -Ama
13 $ hg ci -Ama
14 adding file.txt
14 adding file.txt
15 $ hg log | grep summary
15 $ hg log | grep summary
16 summary: a
16 summary: a
17 $ hg bundle ../b1 ../t2
17 $ hg bundle ../b1 ../t2
18 searching for changes
18 searching for changes
19 1 changesets found
19 1 changesets found
20
20
21 $ cd ../t2
21 $ cd ../t2
22 $ hg pull ../b1
22 $ hg pull ../b1
23 pulling from ../b1
23 pulling from ../b1
24 requesting all changes
24 requesting all changes
25 adding changesets
25 adding changesets
26 adding manifests
26 adding manifests
27 adding file changes
27 adding file changes
28 added 1 changesets with 1 changes to 1 files
28 added 1 changesets with 1 changes to 1 files
29 (run 'hg update' to get a working copy)
29 (run 'hg update' to get a working copy)
30 $ hg up
30 $ hg up
31 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
31 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
32 $ hg log | grep summary
32 $ hg log | grep summary
33 summary: a
33 summary: a
34 $ cd ..
34 $ cd ..
35
35
36 Unknown compression type is rejected
37
38 $ hg init t3
39 $ cd t3
40 $ hg -q pull ../b1
41 $ hg bundle -a -t unknown out.hg
42 abort: unknown is not a recognized bundle specification
43 (see 'hg help bundle' for supported values for --type)
44 [255]
45
46 $ hg bundle -a -t unknown-v2 out.hg
47 abort: unknown compression is not supported
48 (see 'hg help bundle' for supported values for --type)
49 [255]
50
51 $ cd ..
52
36 test bundle types
53 test bundle types
37
54
38 $ testbundle() {
55 $ testbundle() {
39 > echo % test bundle type $1
56 > echo % test bundle type $1
40 > hg init t$1
57 > hg init t$1
41 > cd t1
58 > cd t1
42 > hg bundle -t $1 ../b$1 ../t$1
59 > hg bundle -t $1 ../b$1 ../t$1
43 > f -q -B6 -D ../b$1; echo
60 > f -q -B6 -D ../b$1; echo
44 > cd ../t$1
61 > cd ../t$1
45 > hg debugbundle ../b$1
62 > hg debugbundle ../b$1
46 > hg debugbundle --spec ../b$1
63 > hg debugbundle --spec ../b$1
47 > echo
64 > echo
48 > cd ..
65 > cd ..
49 > }
66 > }
50
67
51 $ for t in "None" "bzip2" "gzip" "none-v2" "v2" "v1" "gzip-v1"; do
68 $ for t in "None" "bzip2" "gzip" "none-v2" "v2" "v1" "gzip-v1"; do
52 > testbundle $t
69 > testbundle $t
53 > done
70 > done
54 % test bundle type None
71 % test bundle type None
55 searching for changes
72 searching for changes
56 1 changesets found
73 1 changesets found
57 HG20\x00\x00 (esc)
74 HG20\x00\x00 (esc)
58 Stream params: {}
75 Stream params: {}
59 changegroup -- "sortdict([('version', '02'), ('nbchanges', '1')])"
76 changegroup -- "sortdict([('version', '02'), ('nbchanges', '1')])"
60 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
77 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
61 none-v2
78 none-v2
62
79
63 % test bundle type bzip2
80 % test bundle type bzip2
64 searching for changes
81 searching for changes
65 1 changesets found
82 1 changesets found
66 HG20\x00\x00 (esc)
83 HG20\x00\x00 (esc)
67 Stream params: sortdict([('Compression', 'BZ')])
84 Stream params: sortdict([('Compression', 'BZ')])
68 changegroup -- "sortdict([('version', '02'), ('nbchanges', '1')])"
85 changegroup -- "sortdict([('version', '02'), ('nbchanges', '1')])"
69 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
86 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
70 bzip2-v2
87 bzip2-v2
71
88
72 % test bundle type gzip
89 % test bundle type gzip
73 searching for changes
90 searching for changes
74 1 changesets found
91 1 changesets found
75 HG20\x00\x00 (esc)
92 HG20\x00\x00 (esc)
76 Stream params: sortdict([('Compression', 'GZ')])
93 Stream params: sortdict([('Compression', 'GZ')])
77 changegroup -- "sortdict([('version', '02'), ('nbchanges', '1')])"
94 changegroup -- "sortdict([('version', '02'), ('nbchanges', '1')])"
78 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
95 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
79 gzip-v2
96 gzip-v2
80
97
81 % test bundle type none-v2
98 % test bundle type none-v2
82 searching for changes
99 searching for changes
83 1 changesets found
100 1 changesets found
84 HG20\x00\x00 (esc)
101 HG20\x00\x00 (esc)
85 Stream params: {}
102 Stream params: {}
86 changegroup -- "sortdict([('version', '02'), ('nbchanges', '1')])"
103 changegroup -- "sortdict([('version', '02'), ('nbchanges', '1')])"
87 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
104 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
88 none-v2
105 none-v2
89
106
90 % test bundle type v2
107 % test bundle type v2
91 searching for changes
108 searching for changes
92 1 changesets found
109 1 changesets found
93 HG20\x00\x00 (esc)
110 HG20\x00\x00 (esc)
94 Stream params: sortdict([('Compression', 'BZ')])
111 Stream params: sortdict([('Compression', 'BZ')])
95 changegroup -- "sortdict([('version', '02'), ('nbchanges', '1')])"
112 changegroup -- "sortdict([('version', '02'), ('nbchanges', '1')])"
96 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
113 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
97 bzip2-v2
114 bzip2-v2
98
115
99 % test bundle type v1
116 % test bundle type v1
100 searching for changes
117 searching for changes
101 1 changesets found
118 1 changesets found
102 HG10BZ
119 HG10BZ
103 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
120 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
104 bzip2-v1
121 bzip2-v1
105
122
106 % test bundle type gzip-v1
123 % test bundle type gzip-v1
107 searching for changes
124 searching for changes
108 1 changesets found
125 1 changesets found
109 HG10GZ
126 HG10GZ
110 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
127 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
111 gzip-v1
128 gzip-v1
112
129
113
130
114 Compression level can be adjusted for bundle2 bundles
131 Compression level can be adjusted for bundle2 bundles
115
132
116 $ hg init test-complevel
133 $ hg init test-complevel
117 $ cd test-complevel
134 $ cd test-complevel
118
135
119 $ cat > file0 << EOF
136 $ cat > file0 << EOF
120 > this is a file
137 > this is a file
121 > with some text
138 > with some text
122 > and some more text
139 > and some more text
123 > and other content
140 > and other content
124 > EOF
141 > EOF
125 $ cat > file1 << EOF
142 $ cat > file1 << EOF
126 > this is another file
143 > this is another file
127 > with some other content
144 > with some other content
128 > and repeated, repeated, repeated, repeated content
145 > and repeated, repeated, repeated, repeated content
129 > EOF
146 > EOF
130 $ hg -q commit -A -m initial
147 $ hg -q commit -A -m initial
131
148
132 $ hg bundle -a -t gzip-v2 gzip-v2.hg
149 $ hg bundle -a -t gzip-v2 gzip-v2.hg
133 1 changesets found
150 1 changesets found
134 $ f --size gzip-v2.hg
151 $ f --size gzip-v2.hg
135 gzip-v2.hg: size=427
152 gzip-v2.hg: size=427
136
153
137 $ hg --config experimental.bundlecomplevel=1 bundle -a -t gzip-v2 gzip-v2-level1.hg
154 $ hg --config experimental.bundlecomplevel=1 bundle -a -t gzip-v2 gzip-v2-level1.hg
138 1 changesets found
155 1 changesets found
139 $ f --size gzip-v2-level1.hg
156 $ f --size gzip-v2-level1.hg
140 gzip-v2-level1.hg: size=435
157 gzip-v2-level1.hg: size=435
141
158
142 $ cd ..
159 $ cd ..
143
160
144 #if zstd
161 #if zstd
145
162
146 $ for t in "zstd" "zstd-v2"; do
163 $ for t in "zstd" "zstd-v2"; do
147 > testbundle $t
164 > testbundle $t
148 > done
165 > done
149 % test bundle type zstd
166 % test bundle type zstd
150 searching for changes
167 searching for changes
151 1 changesets found
168 1 changesets found
152 HG20\x00\x00 (esc)
169 HG20\x00\x00 (esc)
153 Stream params: sortdict([('Compression', 'ZS')])
170 Stream params: sortdict([('Compression', 'ZS')])
154 changegroup -- "sortdict([('version', '02'), ('nbchanges', '1')])"
171 changegroup -- "sortdict([('version', '02'), ('nbchanges', '1')])"
155 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
172 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
156 zstd-v2
173 zstd-v2
157
174
158 % test bundle type zstd-v2
175 % test bundle type zstd-v2
159 searching for changes
176 searching for changes
160 1 changesets found
177 1 changesets found
161 HG20\x00\x00 (esc)
178 HG20\x00\x00 (esc)
162 Stream params: sortdict([('Compression', 'ZS')])
179 Stream params: sortdict([('Compression', 'ZS')])
163 changegroup -- "sortdict([('version', '02'), ('nbchanges', '1')])"
180 changegroup -- "sortdict([('version', '02'), ('nbchanges', '1')])"
164 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
181 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
165 zstd-v2
182 zstd-v2
166
183
184
185 Explicit request for zstd on non-generaldelta repos
186
187 $ hg --config format.usegeneraldelta=false init nogd
188 $ hg -q -R nogd pull t1
189 $ hg -R nogd bundle -a -t zstd nogd-zstd
190 abort: compression engine zstd is not supported on v1 bundles
191 (see 'hg help bundle' for supported values for --type)
192 [255]
193
194 zstd-v1 always fails
195
196 $ hg -R tzstd bundle -a -t zstd-v1 zstd-v1
197 abort: compression engine zstd is not supported on v1 bundles
198 (see 'hg help bundle' for supported values for --type)
199 [255]
200
167 #else
201 #else
168
202
169 zstd is a valid engine but isn't available
203 zstd is a valid engine but isn't available
170
204
171 $ hg -R t1 bundle -a -t zstd irrelevant.hg
205 $ hg -R t1 bundle -a -t zstd irrelevant.hg
172 abort: compression engine zstd could not be loaded
206 abort: compression engine zstd could not be loaded
173 [255]
207 [255]
174
208
175 #endif
209 #endif
176
210
177 test garbage file
211 test garbage file
178
212
179 $ echo garbage > bgarbage
213 $ echo garbage > bgarbage
180 $ hg init tgarbage
214 $ hg init tgarbage
181 $ cd tgarbage
215 $ cd tgarbage
182 $ hg pull ../bgarbage
216 $ hg pull ../bgarbage
183 pulling from ../bgarbage
217 pulling from ../bgarbage
184 abort: ../bgarbage: not a Mercurial bundle
218 abort: ../bgarbage: not a Mercurial bundle
185 [255]
219 [255]
186 $ cd ..
220 $ cd ..
187
221
188 test invalid bundle type
222 test invalid bundle type
189
223
190 $ cd t1
224 $ cd t1
191 $ hg bundle -a -t garbage ../bgarbage
225 $ hg bundle -a -t garbage ../bgarbage
192 abort: garbage is not a recognized bundle specification
226 abort: garbage is not a recognized bundle specification
193 (see 'hg help bundle' for supported values for --type)
227 (see 'hg help bundle' for supported values for --type)
194 [255]
228 [255]
195 $ cd ..
229 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now