##// END OF EJS Templates
pushrace: avoid crash on bare push when using concurrent push mode...
marmoute -
r33133:78fc540c default
parent child Browse files
Show More
@@ -1,2012 +1,2013 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 hex,
15 hex,
16 nullid,
16 nullid,
17 )
17 )
18 from . import (
18 from . import (
19 bookmarks as bookmod,
19 bookmarks as bookmod,
20 bundle2,
20 bundle2,
21 changegroup,
21 changegroup,
22 discovery,
22 discovery,
23 error,
23 error,
24 lock as lockmod,
24 lock as lockmod,
25 obsolete,
25 obsolete,
26 phases,
26 phases,
27 pushkey,
27 pushkey,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sslutil,
30 sslutil,
31 streamclone,
31 streamclone,
32 url as urlmod,
32 url as urlmod,
33 util,
33 util,
34 )
34 )
35
35
36 urlerr = util.urlerr
36 urlerr = util.urlerr
37 urlreq = util.urlreq
37 urlreq = util.urlreq
38
38
39 # Maps bundle version human names to changegroup versions.
39 # Maps bundle version human names to changegroup versions.
40 _bundlespeccgversions = {'v1': '01',
40 _bundlespeccgversions = {'v1': '01',
41 'v2': '02',
41 'v2': '02',
42 'packed1': 's1',
42 'packed1': 's1',
43 'bundle2': '02', #legacy
43 'bundle2': '02', #legacy
44 }
44 }
45
45
46 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
46 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
47 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
47 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
48
48
49 def parsebundlespec(repo, spec, strict=True, externalnames=False):
49 def parsebundlespec(repo, spec, strict=True, externalnames=False):
50 """Parse a bundle string specification into parts.
50 """Parse a bundle string specification into parts.
51
51
52 Bundle specifications denote a well-defined bundle/exchange format.
52 Bundle specifications denote a well-defined bundle/exchange format.
53 The content of a given specification should not change over time in
53 The content of a given specification should not change over time in
54 order to ensure that bundles produced by a newer version of Mercurial are
54 order to ensure that bundles produced by a newer version of Mercurial are
55 readable from an older version.
55 readable from an older version.
56
56
57 The string currently has the form:
57 The string currently has the form:
58
58
59 <compression>-<type>[;<parameter0>[;<parameter1>]]
59 <compression>-<type>[;<parameter0>[;<parameter1>]]
60
60
61 Where <compression> is one of the supported compression formats
61 Where <compression> is one of the supported compression formats
62 and <type> is (currently) a version string. A ";" can follow the type and
62 and <type> is (currently) a version string. A ";" can follow the type and
63 all text afterwards is interpreted as URI encoded, ";" delimited key=value
63 all text afterwards is interpreted as URI encoded, ";" delimited key=value
64 pairs.
64 pairs.
65
65
66 If ``strict`` is True (the default) <compression> is required. Otherwise,
66 If ``strict`` is True (the default) <compression> is required. Otherwise,
67 it is optional.
67 it is optional.
68
68
69 If ``externalnames`` is False (the default), the human-centric names will
69 If ``externalnames`` is False (the default), the human-centric names will
70 be converted to their internal representation.
70 be converted to their internal representation.
71
71
72 Returns a 3-tuple of (compression, version, parameters). Compression will
72 Returns a 3-tuple of (compression, version, parameters). Compression will
73 be ``None`` if not in strict mode and a compression isn't defined.
73 be ``None`` if not in strict mode and a compression isn't defined.
74
74
75 An ``InvalidBundleSpecification`` is raised when the specification is
75 An ``InvalidBundleSpecification`` is raised when the specification is
76 not syntactically well formed.
76 not syntactically well formed.
77
77
78 An ``UnsupportedBundleSpecification`` is raised when the compression or
78 An ``UnsupportedBundleSpecification`` is raised when the compression or
79 bundle type/version is not recognized.
79 bundle type/version is not recognized.
80
80
81 Note: this function will likely eventually return a more complex data
81 Note: this function will likely eventually return a more complex data
82 structure, including bundle2 part information.
82 structure, including bundle2 part information.
83 """
83 """
84 def parseparams(s):
84 def parseparams(s):
85 if ';' not in s:
85 if ';' not in s:
86 return s, {}
86 return s, {}
87
87
88 params = {}
88 params = {}
89 version, paramstr = s.split(';', 1)
89 version, paramstr = s.split(';', 1)
90
90
91 for p in paramstr.split(';'):
91 for p in paramstr.split(';'):
92 if '=' not in p:
92 if '=' not in p:
93 raise error.InvalidBundleSpecification(
93 raise error.InvalidBundleSpecification(
94 _('invalid bundle specification: '
94 _('invalid bundle specification: '
95 'missing "=" in parameter: %s') % p)
95 'missing "=" in parameter: %s') % p)
96
96
97 key, value = p.split('=', 1)
97 key, value = p.split('=', 1)
98 key = urlreq.unquote(key)
98 key = urlreq.unquote(key)
99 value = urlreq.unquote(value)
99 value = urlreq.unquote(value)
100 params[key] = value
100 params[key] = value
101
101
102 return version, params
102 return version, params
103
103
104
104
105 if strict and '-' not in spec:
105 if strict and '-' not in spec:
106 raise error.InvalidBundleSpecification(
106 raise error.InvalidBundleSpecification(
107 _('invalid bundle specification; '
107 _('invalid bundle specification; '
108 'must be prefixed with compression: %s') % spec)
108 'must be prefixed with compression: %s') % spec)
109
109
110 if '-' in spec:
110 if '-' in spec:
111 compression, version = spec.split('-', 1)
111 compression, version = spec.split('-', 1)
112
112
113 if compression not in util.compengines.supportedbundlenames:
113 if compression not in util.compengines.supportedbundlenames:
114 raise error.UnsupportedBundleSpecification(
114 raise error.UnsupportedBundleSpecification(
115 _('%s compression is not supported') % compression)
115 _('%s compression is not supported') % compression)
116
116
117 version, params = parseparams(version)
117 version, params = parseparams(version)
118
118
119 if version not in _bundlespeccgversions:
119 if version not in _bundlespeccgversions:
120 raise error.UnsupportedBundleSpecification(
120 raise error.UnsupportedBundleSpecification(
121 _('%s is not a recognized bundle version') % version)
121 _('%s is not a recognized bundle version') % version)
122 else:
122 else:
123 # Value could be just the compression or just the version, in which
123 # Value could be just the compression or just the version, in which
124 # case some defaults are assumed (but only when not in strict mode).
124 # case some defaults are assumed (but only when not in strict mode).
125 assert not strict
125 assert not strict
126
126
127 spec, params = parseparams(spec)
127 spec, params = parseparams(spec)
128
128
129 if spec in util.compengines.supportedbundlenames:
129 if spec in util.compengines.supportedbundlenames:
130 compression = spec
130 compression = spec
131 version = 'v1'
131 version = 'v1'
132 # Generaldelta repos require v2.
132 # Generaldelta repos require v2.
133 if 'generaldelta' in repo.requirements:
133 if 'generaldelta' in repo.requirements:
134 version = 'v2'
134 version = 'v2'
135 # Modern compression engines require v2.
135 # Modern compression engines require v2.
136 if compression not in _bundlespecv1compengines:
136 if compression not in _bundlespecv1compengines:
137 version = 'v2'
137 version = 'v2'
138 elif spec in _bundlespeccgversions:
138 elif spec in _bundlespeccgversions:
139 if spec == 'packed1':
139 if spec == 'packed1':
140 compression = 'none'
140 compression = 'none'
141 else:
141 else:
142 compression = 'bzip2'
142 compression = 'bzip2'
143 version = spec
143 version = spec
144 else:
144 else:
145 raise error.UnsupportedBundleSpecification(
145 raise error.UnsupportedBundleSpecification(
146 _('%s is not a recognized bundle specification') % spec)
146 _('%s is not a recognized bundle specification') % spec)
147
147
148 # Bundle version 1 only supports a known set of compression engines.
148 # Bundle version 1 only supports a known set of compression engines.
149 if version == 'v1' and compression not in _bundlespecv1compengines:
149 if version == 'v1' and compression not in _bundlespecv1compengines:
150 raise error.UnsupportedBundleSpecification(
150 raise error.UnsupportedBundleSpecification(
151 _('compression engine %s is not supported on v1 bundles') %
151 _('compression engine %s is not supported on v1 bundles') %
152 compression)
152 compression)
153
153
154 # The specification for packed1 can optionally declare the data formats
154 # The specification for packed1 can optionally declare the data formats
155 # required to apply it. If we see this metadata, compare against what the
155 # required to apply it. If we see this metadata, compare against what the
156 # repo supports and error if the bundle isn't compatible.
156 # repo supports and error if the bundle isn't compatible.
157 if version == 'packed1' and 'requirements' in params:
157 if version == 'packed1' and 'requirements' in params:
158 requirements = set(params['requirements'].split(','))
158 requirements = set(params['requirements'].split(','))
159 missingreqs = requirements - repo.supportedformats
159 missingreqs = requirements - repo.supportedformats
160 if missingreqs:
160 if missingreqs:
161 raise error.UnsupportedBundleSpecification(
161 raise error.UnsupportedBundleSpecification(
162 _('missing support for repository features: %s') %
162 _('missing support for repository features: %s') %
163 ', '.join(sorted(missingreqs)))
163 ', '.join(sorted(missingreqs)))
164
164
165 if not externalnames:
165 if not externalnames:
166 engine = util.compengines.forbundlename(compression)
166 engine = util.compengines.forbundlename(compression)
167 compression = engine.bundletype()[1]
167 compression = engine.bundletype()[1]
168 version = _bundlespeccgversions[version]
168 version = _bundlespeccgversions[version]
169 return compression, version, params
169 return compression, version, params
170
170
171 def readbundle(ui, fh, fname, vfs=None):
171 def readbundle(ui, fh, fname, vfs=None):
172 header = changegroup.readexactly(fh, 4)
172 header = changegroup.readexactly(fh, 4)
173
173
174 alg = None
174 alg = None
175 if not fname:
175 if not fname:
176 fname = "stream"
176 fname = "stream"
177 if not header.startswith('HG') and header.startswith('\0'):
177 if not header.startswith('HG') and header.startswith('\0'):
178 fh = changegroup.headerlessfixup(fh, header)
178 fh = changegroup.headerlessfixup(fh, header)
179 header = "HG10"
179 header = "HG10"
180 alg = 'UN'
180 alg = 'UN'
181 elif vfs:
181 elif vfs:
182 fname = vfs.join(fname)
182 fname = vfs.join(fname)
183
183
184 magic, version = header[0:2], header[2:4]
184 magic, version = header[0:2], header[2:4]
185
185
186 if magic != 'HG':
186 if magic != 'HG':
187 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
187 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
188 if version == '10':
188 if version == '10':
189 if alg is None:
189 if alg is None:
190 alg = changegroup.readexactly(fh, 2)
190 alg = changegroup.readexactly(fh, 2)
191 return changegroup.cg1unpacker(fh, alg)
191 return changegroup.cg1unpacker(fh, alg)
192 elif version.startswith('2'):
192 elif version.startswith('2'):
193 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
193 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
194 elif version == 'S1':
194 elif version == 'S1':
195 return streamclone.streamcloneapplier(fh)
195 return streamclone.streamcloneapplier(fh)
196 else:
196 else:
197 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
197 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
198
198
199 def getbundlespec(ui, fh):
199 def getbundlespec(ui, fh):
200 """Infer the bundlespec from a bundle file handle.
200 """Infer the bundlespec from a bundle file handle.
201
201
202 The input file handle is seeked and the original seek position is not
202 The input file handle is seeked and the original seek position is not
203 restored.
203 restored.
204 """
204 """
205 def speccompression(alg):
205 def speccompression(alg):
206 try:
206 try:
207 return util.compengines.forbundletype(alg).bundletype()[0]
207 return util.compengines.forbundletype(alg).bundletype()[0]
208 except KeyError:
208 except KeyError:
209 return None
209 return None
210
210
211 b = readbundle(ui, fh, None)
211 b = readbundle(ui, fh, None)
212 if isinstance(b, changegroup.cg1unpacker):
212 if isinstance(b, changegroup.cg1unpacker):
213 alg = b._type
213 alg = b._type
214 if alg == '_truncatedBZ':
214 if alg == '_truncatedBZ':
215 alg = 'BZ'
215 alg = 'BZ'
216 comp = speccompression(alg)
216 comp = speccompression(alg)
217 if not comp:
217 if not comp:
218 raise error.Abort(_('unknown compression algorithm: %s') % alg)
218 raise error.Abort(_('unknown compression algorithm: %s') % alg)
219 return '%s-v1' % comp
219 return '%s-v1' % comp
220 elif isinstance(b, bundle2.unbundle20):
220 elif isinstance(b, bundle2.unbundle20):
221 if 'Compression' in b.params:
221 if 'Compression' in b.params:
222 comp = speccompression(b.params['Compression'])
222 comp = speccompression(b.params['Compression'])
223 if not comp:
223 if not comp:
224 raise error.Abort(_('unknown compression algorithm: %s') % comp)
224 raise error.Abort(_('unknown compression algorithm: %s') % comp)
225 else:
225 else:
226 comp = 'none'
226 comp = 'none'
227
227
228 version = None
228 version = None
229 for part in b.iterparts():
229 for part in b.iterparts():
230 if part.type == 'changegroup':
230 if part.type == 'changegroup':
231 version = part.params['version']
231 version = part.params['version']
232 if version in ('01', '02'):
232 if version in ('01', '02'):
233 version = 'v2'
233 version = 'v2'
234 else:
234 else:
235 raise error.Abort(_('changegroup version %s does not have '
235 raise error.Abort(_('changegroup version %s does not have '
236 'a known bundlespec') % version,
236 'a known bundlespec') % version,
237 hint=_('try upgrading your Mercurial '
237 hint=_('try upgrading your Mercurial '
238 'client'))
238 'client'))
239
239
240 if not version:
240 if not version:
241 raise error.Abort(_('could not identify changegroup version in '
241 raise error.Abort(_('could not identify changegroup version in '
242 'bundle'))
242 'bundle'))
243
243
244 return '%s-%s' % (comp, version)
244 return '%s-%s' % (comp, version)
245 elif isinstance(b, streamclone.streamcloneapplier):
245 elif isinstance(b, streamclone.streamcloneapplier):
246 requirements = streamclone.readbundle1header(fh)[2]
246 requirements = streamclone.readbundle1header(fh)[2]
247 params = 'requirements=%s' % ','.join(sorted(requirements))
247 params = 'requirements=%s' % ','.join(sorted(requirements))
248 return 'none-packed1;%s' % urlreq.quote(params)
248 return 'none-packed1;%s' % urlreq.quote(params)
249 else:
249 else:
250 raise error.Abort(_('unknown bundle type: %s') % b)
250 raise error.Abort(_('unknown bundle type: %s') % b)
251
251
252 def _computeoutgoing(repo, heads, common):
252 def _computeoutgoing(repo, heads, common):
253 """Computes which revs are outgoing given a set of common
253 """Computes which revs are outgoing given a set of common
254 and a set of heads.
254 and a set of heads.
255
255
256 This is a separate function so extensions can have access to
256 This is a separate function so extensions can have access to
257 the logic.
257 the logic.
258
258
259 Returns a discovery.outgoing object.
259 Returns a discovery.outgoing object.
260 """
260 """
261 cl = repo.changelog
261 cl = repo.changelog
262 if common:
262 if common:
263 hasnode = cl.hasnode
263 hasnode = cl.hasnode
264 common = [n for n in common if hasnode(n)]
264 common = [n for n in common if hasnode(n)]
265 else:
265 else:
266 common = [nullid]
266 common = [nullid]
267 if not heads:
267 if not heads:
268 heads = cl.heads()
268 heads = cl.heads()
269 return discovery.outgoing(repo, common, heads)
269 return discovery.outgoing(repo, common, heads)
270
270
271 def _forcebundle1(op):
271 def _forcebundle1(op):
272 """return true if a pull/push must use bundle1
272 """return true if a pull/push must use bundle1
273
273
274 This function is used to allow testing of the older bundle version"""
274 This function is used to allow testing of the older bundle version"""
275 ui = op.repo.ui
275 ui = op.repo.ui
276 forcebundle1 = False
276 forcebundle1 = False
277 # The goal is this config is to allow developer to choose the bundle
277 # The goal is this config is to allow developer to choose the bundle
278 # version used during exchanged. This is especially handy during test.
278 # version used during exchanged. This is especially handy during test.
279 # Value is a list of bundle version to be picked from, highest version
279 # Value is a list of bundle version to be picked from, highest version
280 # should be used.
280 # should be used.
281 #
281 #
282 # developer config: devel.legacy.exchange
282 # developer config: devel.legacy.exchange
283 exchange = ui.configlist('devel', 'legacy.exchange')
283 exchange = ui.configlist('devel', 'legacy.exchange')
284 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
284 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
285 return forcebundle1 or not op.remote.capable('bundle2')
285 return forcebundle1 or not op.remote.capable('bundle2')
286
286
287 class pushoperation(object):
287 class pushoperation(object):
288 """A object that represent a single push operation
288 """A object that represent a single push operation
289
289
290 Its purpose is to carry push related state and very common operations.
290 Its purpose is to carry push related state and very common operations.
291
291
292 A new pushoperation should be created at the beginning of each push and
292 A new pushoperation should be created at the beginning of each push and
293 discarded afterward.
293 discarded afterward.
294 """
294 """
295
295
296 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
296 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
297 bookmarks=()):
297 bookmarks=()):
298 # repo we push from
298 # repo we push from
299 self.repo = repo
299 self.repo = repo
300 self.ui = repo.ui
300 self.ui = repo.ui
301 # repo we push to
301 # repo we push to
302 self.remote = remote
302 self.remote = remote
303 # force option provided
303 # force option provided
304 self.force = force
304 self.force = force
305 # revs to be pushed (None is "all")
305 # revs to be pushed (None is "all")
306 self.revs = revs
306 self.revs = revs
307 # bookmark explicitly pushed
307 # bookmark explicitly pushed
308 self.bookmarks = bookmarks
308 self.bookmarks = bookmarks
309 # allow push of new branch
309 # allow push of new branch
310 self.newbranch = newbranch
310 self.newbranch = newbranch
311 # did a local lock get acquired?
311 # did a local lock get acquired?
312 self.locallocked = None
312 self.locallocked = None
313 # step already performed
313 # step already performed
314 # (used to check what steps have been already performed through bundle2)
314 # (used to check what steps have been already performed through bundle2)
315 self.stepsdone = set()
315 self.stepsdone = set()
316 # Integer version of the changegroup push result
316 # Integer version of the changegroup push result
317 # - None means nothing to push
317 # - None means nothing to push
318 # - 0 means HTTP error
318 # - 0 means HTTP error
319 # - 1 means we pushed and remote head count is unchanged *or*
319 # - 1 means we pushed and remote head count is unchanged *or*
320 # we have outgoing changesets but refused to push
320 # we have outgoing changesets but refused to push
321 # - other values as described by addchangegroup()
321 # - other values as described by addchangegroup()
322 self.cgresult = None
322 self.cgresult = None
323 # Boolean value for the bookmark push
323 # Boolean value for the bookmark push
324 self.bkresult = None
324 self.bkresult = None
325 # discover.outgoing object (contains common and outgoing data)
325 # discover.outgoing object (contains common and outgoing data)
326 self.outgoing = None
326 self.outgoing = None
327 # all remote topological heads before the push
327 # all remote topological heads before the push
328 self.remoteheads = None
328 self.remoteheads = None
329 # Details of the remote branch pre and post push
329 # Details of the remote branch pre and post push
330 #
330 #
331 # mapping: {'branch': ([remoteheads],
331 # mapping: {'branch': ([remoteheads],
332 # [newheads],
332 # [newheads],
333 # [unsyncedheads],
333 # [unsyncedheads],
334 # [discardedheads])}
334 # [discardedheads])}
335 # - branch: the branch name
335 # - branch: the branch name
336 # - remoteheads: the list of remote heads known locally
336 # - remoteheads: the list of remote heads known locally
337 # None if the branch is new
337 # None if the branch is new
338 # - newheads: the new remote heads (known locally) with outgoing pushed
338 # - newheads: the new remote heads (known locally) with outgoing pushed
339 # - unsyncedheads: the list of remote heads unknown locally.
339 # - unsyncedheads: the list of remote heads unknown locally.
340 # - discardedheads: the list of remote heads made obsolete by the push
340 # - discardedheads: the list of remote heads made obsolete by the push
341 self.pushbranchmap = None
341 self.pushbranchmap = None
342 # testable as a boolean indicating if any nodes are missing locally.
342 # testable as a boolean indicating if any nodes are missing locally.
343 self.incoming = None
343 self.incoming = None
344 # phases changes that must be pushed along side the changesets
344 # phases changes that must be pushed along side the changesets
345 self.outdatedphases = None
345 self.outdatedphases = None
346 # phases changes that must be pushed if changeset push fails
346 # phases changes that must be pushed if changeset push fails
347 self.fallbackoutdatedphases = None
347 self.fallbackoutdatedphases = None
348 # outgoing obsmarkers
348 # outgoing obsmarkers
349 self.outobsmarkers = set()
349 self.outobsmarkers = set()
350 # outgoing bookmarks
350 # outgoing bookmarks
351 self.outbookmarks = []
351 self.outbookmarks = []
352 # transaction manager
352 # transaction manager
353 self.trmanager = None
353 self.trmanager = None
354 # map { pushkey partid -> callback handling failure}
354 # map { pushkey partid -> callback handling failure}
355 # used to handle exception from mandatory pushkey part failure
355 # used to handle exception from mandatory pushkey part failure
356 self.pkfailcb = {}
356 self.pkfailcb = {}
357
357
358 @util.propertycache
358 @util.propertycache
359 def futureheads(self):
359 def futureheads(self):
360 """future remote heads if the changeset push succeeds"""
360 """future remote heads if the changeset push succeeds"""
361 return self.outgoing.missingheads
361 return self.outgoing.missingheads
362
362
363 @util.propertycache
363 @util.propertycache
364 def fallbackheads(self):
364 def fallbackheads(self):
365 """future remote heads if the changeset push fails"""
365 """future remote heads if the changeset push fails"""
366 if self.revs is None:
366 if self.revs is None:
367 # not target to push, all common are relevant
367 # not target to push, all common are relevant
368 return self.outgoing.commonheads
368 return self.outgoing.commonheads
369 unfi = self.repo.unfiltered()
369 unfi = self.repo.unfiltered()
370 # I want cheads = heads(::missingheads and ::commonheads)
370 # I want cheads = heads(::missingheads and ::commonheads)
371 # (missingheads is revs with secret changeset filtered out)
371 # (missingheads is revs with secret changeset filtered out)
372 #
372 #
373 # This can be expressed as:
373 # This can be expressed as:
374 # cheads = ( (missingheads and ::commonheads)
374 # cheads = ( (missingheads and ::commonheads)
375 # + (commonheads and ::missingheads))"
375 # + (commonheads and ::missingheads))"
376 # )
376 # )
377 #
377 #
378 # while trying to push we already computed the following:
378 # while trying to push we already computed the following:
379 # common = (::commonheads)
379 # common = (::commonheads)
380 # missing = ((commonheads::missingheads) - commonheads)
380 # missing = ((commonheads::missingheads) - commonheads)
381 #
381 #
382 # We can pick:
382 # We can pick:
383 # * missingheads part of common (::commonheads)
383 # * missingheads part of common (::commonheads)
384 common = self.outgoing.common
384 common = self.outgoing.common
385 nm = self.repo.changelog.nodemap
385 nm = self.repo.changelog.nodemap
386 cheads = [node for node in self.revs if nm[node] in common]
386 cheads = [node for node in self.revs if nm[node] in common]
387 # and
387 # and
388 # * commonheads parents on missing
388 # * commonheads parents on missing
389 revset = unfi.set('%ln and parents(roots(%ln))',
389 revset = unfi.set('%ln and parents(roots(%ln))',
390 self.outgoing.commonheads,
390 self.outgoing.commonheads,
391 self.outgoing.missing)
391 self.outgoing.missing)
392 cheads.extend(c.node() for c in revset)
392 cheads.extend(c.node() for c in revset)
393 return cheads
393 return cheads
394
394
395 @property
395 @property
396 def commonheads(self):
396 def commonheads(self):
397 """set of all common heads after changeset bundle push"""
397 """set of all common heads after changeset bundle push"""
398 if self.cgresult:
398 if self.cgresult:
399 return self.futureheads
399 return self.futureheads
400 else:
400 else:
401 return self.fallbackheads
401 return self.fallbackheads
402
402
403 # mapping of message used when pushing bookmark
403 # mapping of message used when pushing bookmark
404 bookmsgmap = {'update': (_("updating bookmark %s\n"),
404 bookmsgmap = {'update': (_("updating bookmark %s\n"),
405 _('updating bookmark %s failed!\n')),
405 _('updating bookmark %s failed!\n')),
406 'export': (_("exporting bookmark %s\n"),
406 'export': (_("exporting bookmark %s\n"),
407 _('exporting bookmark %s failed!\n')),
407 _('exporting bookmark %s failed!\n')),
408 'delete': (_("deleting remote bookmark %s\n"),
408 'delete': (_("deleting remote bookmark %s\n"),
409 _('deleting remote bookmark %s failed!\n')),
409 _('deleting remote bookmark %s failed!\n')),
410 }
410 }
411
411
412
412
413 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
413 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
414 opargs=None):
414 opargs=None):
415 '''Push outgoing changesets (limited by revs) from a local
415 '''Push outgoing changesets (limited by revs) from a local
416 repository to remote. Return an integer:
416 repository to remote. Return an integer:
417 - None means nothing to push
417 - None means nothing to push
418 - 0 means HTTP error
418 - 0 means HTTP error
419 - 1 means we pushed and remote head count is unchanged *or*
419 - 1 means we pushed and remote head count is unchanged *or*
420 we have outgoing changesets but refused to push
420 we have outgoing changesets but refused to push
421 - other values as described by addchangegroup()
421 - other values as described by addchangegroup()
422 '''
422 '''
423 if opargs is None:
423 if opargs is None:
424 opargs = {}
424 opargs = {}
425 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
425 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
426 **opargs)
426 **opargs)
427 if pushop.remote.local():
427 if pushop.remote.local():
428 missing = (set(pushop.repo.requirements)
428 missing = (set(pushop.repo.requirements)
429 - pushop.remote.local().supported)
429 - pushop.remote.local().supported)
430 if missing:
430 if missing:
431 msg = _("required features are not"
431 msg = _("required features are not"
432 " supported in the destination:"
432 " supported in the destination:"
433 " %s") % (', '.join(sorted(missing)))
433 " %s") % (', '.join(sorted(missing)))
434 raise error.Abort(msg)
434 raise error.Abort(msg)
435
435
436 # there are two ways to push to remote repo:
436 # there are two ways to push to remote repo:
437 #
437 #
438 # addchangegroup assumes local user can lock remote
438 # addchangegroup assumes local user can lock remote
439 # repo (local filesystem, old ssh servers).
439 # repo (local filesystem, old ssh servers).
440 #
440 #
441 # unbundle assumes local user cannot lock remote repo (new ssh
441 # unbundle assumes local user cannot lock remote repo (new ssh
442 # servers, http servers).
442 # servers, http servers).
443
443
444 if not pushop.remote.canpush():
444 if not pushop.remote.canpush():
445 raise error.Abort(_("destination does not support push"))
445 raise error.Abort(_("destination does not support push"))
446 # get local lock as we might write phase data
446 # get local lock as we might write phase data
447 localwlock = locallock = None
447 localwlock = locallock = None
448 try:
448 try:
449 # bundle2 push may receive a reply bundle touching bookmarks or other
449 # bundle2 push may receive a reply bundle touching bookmarks or other
450 # things requiring the wlock. Take it now to ensure proper ordering.
450 # things requiring the wlock. Take it now to ensure proper ordering.
451 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
451 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
452 if (not _forcebundle1(pushop)) and maypushback:
452 if (not _forcebundle1(pushop)) and maypushback:
453 localwlock = pushop.repo.wlock()
453 localwlock = pushop.repo.wlock()
454 locallock = pushop.repo.lock()
454 locallock = pushop.repo.lock()
455 pushop.locallocked = True
455 pushop.locallocked = True
456 except IOError as err:
456 except IOError as err:
457 pushop.locallocked = False
457 pushop.locallocked = False
458 if err.errno != errno.EACCES:
458 if err.errno != errno.EACCES:
459 raise
459 raise
460 # source repo cannot be locked.
460 # source repo cannot be locked.
461 # We do not abort the push, but just disable the local phase
461 # We do not abort the push, but just disable the local phase
462 # synchronisation.
462 # synchronisation.
463 msg = 'cannot lock source repository: %s\n' % err
463 msg = 'cannot lock source repository: %s\n' % err
464 pushop.ui.debug(msg)
464 pushop.ui.debug(msg)
465 try:
465 try:
466 if pushop.locallocked:
466 if pushop.locallocked:
467 pushop.trmanager = transactionmanager(pushop.repo,
467 pushop.trmanager = transactionmanager(pushop.repo,
468 'push-response',
468 'push-response',
469 pushop.remote.url())
469 pushop.remote.url())
470 pushop.repo.checkpush(pushop)
470 pushop.repo.checkpush(pushop)
471 lock = None
471 lock = None
472 unbundle = pushop.remote.capable('unbundle')
472 unbundle = pushop.remote.capable('unbundle')
473 if not unbundle:
473 if not unbundle:
474 lock = pushop.remote.lock()
474 lock = pushop.remote.lock()
475 try:
475 try:
476 _pushdiscovery(pushop)
476 _pushdiscovery(pushop)
477 if not _forcebundle1(pushop):
477 if not _forcebundle1(pushop):
478 _pushbundle2(pushop)
478 _pushbundle2(pushop)
479 _pushchangeset(pushop)
479 _pushchangeset(pushop)
480 _pushsyncphase(pushop)
480 _pushsyncphase(pushop)
481 _pushobsolete(pushop)
481 _pushobsolete(pushop)
482 _pushbookmark(pushop)
482 _pushbookmark(pushop)
483 finally:
483 finally:
484 if lock is not None:
484 if lock is not None:
485 lock.release()
485 lock.release()
486 if pushop.trmanager:
486 if pushop.trmanager:
487 pushop.trmanager.close()
487 pushop.trmanager.close()
488 finally:
488 finally:
489 if pushop.trmanager:
489 if pushop.trmanager:
490 pushop.trmanager.release()
490 pushop.trmanager.release()
491 if locallock is not None:
491 if locallock is not None:
492 locallock.release()
492 locallock.release()
493 if localwlock is not None:
493 if localwlock is not None:
494 localwlock.release()
494 localwlock.release()
495
495
496 return pushop
496 return pushop
497
497
498 # list of steps to perform discovery before push
498 # list of steps to perform discovery before push
499 pushdiscoveryorder = []
499 pushdiscoveryorder = []
500
500
501 # Mapping between step name and function
501 # Mapping between step name and function
502 #
502 #
503 # This exists to help extensions wrap steps if necessary
503 # This exists to help extensions wrap steps if necessary
504 pushdiscoverymapping = {}
504 pushdiscoverymapping = {}
505
505
506 def pushdiscovery(stepname):
506 def pushdiscovery(stepname):
507 """decorator for function performing discovery before push
507 """decorator for function performing discovery before push
508
508
509 The function is added to the step -> function mapping and appended to the
509 The function is added to the step -> function mapping and appended to the
510 list of steps. Beware that decorated function will be added in order (this
510 list of steps. Beware that decorated function will be added in order (this
511 may matter).
511 may matter).
512
512
513 You can only use this decorator for a new step, if you want to wrap a step
513 You can only use this decorator for a new step, if you want to wrap a step
514 from an extension, change the pushdiscovery dictionary directly."""
514 from an extension, change the pushdiscovery dictionary directly."""
515 def dec(func):
515 def dec(func):
516 assert stepname not in pushdiscoverymapping
516 assert stepname not in pushdiscoverymapping
517 pushdiscoverymapping[stepname] = func
517 pushdiscoverymapping[stepname] = func
518 pushdiscoveryorder.append(stepname)
518 pushdiscoveryorder.append(stepname)
519 return func
519 return func
520 return dec
520 return dec
521
521
522 def _pushdiscovery(pushop):
522 def _pushdiscovery(pushop):
523 """Run all discovery steps"""
523 """Run all discovery steps"""
524 for stepname in pushdiscoveryorder:
524 for stepname in pushdiscoveryorder:
525 step = pushdiscoverymapping[stepname]
525 step = pushdiscoverymapping[stepname]
526 step(pushop)
526 step(pushop)
527
527
528 @pushdiscovery('changeset')
528 @pushdiscovery('changeset')
529 def _pushdiscoverychangeset(pushop):
529 def _pushdiscoverychangeset(pushop):
530 """discover the changeset that need to be pushed"""
530 """discover the changeset that need to be pushed"""
531 fci = discovery.findcommonincoming
531 fci = discovery.findcommonincoming
532 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
532 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
533 common, inc, remoteheads = commoninc
533 common, inc, remoteheads = commoninc
534 fco = discovery.findcommonoutgoing
534 fco = discovery.findcommonoutgoing
535 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
535 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
536 commoninc=commoninc, force=pushop.force)
536 commoninc=commoninc, force=pushop.force)
537 pushop.outgoing = outgoing
537 pushop.outgoing = outgoing
538 pushop.remoteheads = remoteheads
538 pushop.remoteheads = remoteheads
539 pushop.incoming = inc
539 pushop.incoming = inc
540
540
541 @pushdiscovery('phase')
541 @pushdiscovery('phase')
542 def _pushdiscoveryphase(pushop):
542 def _pushdiscoveryphase(pushop):
543 """discover the phase that needs to be pushed
543 """discover the phase that needs to be pushed
544
544
545 (computed for both success and failure case for changesets push)"""
545 (computed for both success and failure case for changesets push)"""
546 outgoing = pushop.outgoing
546 outgoing = pushop.outgoing
547 unfi = pushop.repo.unfiltered()
547 unfi = pushop.repo.unfiltered()
548 remotephases = pushop.remote.listkeys('phases')
548 remotephases = pushop.remote.listkeys('phases')
549 publishing = remotephases.get('publishing', False)
549 publishing = remotephases.get('publishing', False)
550 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
550 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
551 and remotephases # server supports phases
551 and remotephases # server supports phases
552 and not pushop.outgoing.missing # no changesets to be pushed
552 and not pushop.outgoing.missing # no changesets to be pushed
553 and publishing):
553 and publishing):
554 # When:
554 # When:
555 # - this is a subrepo push
555 # - this is a subrepo push
556 # - and remote support phase
556 # - and remote support phase
557 # - and no changeset are to be pushed
557 # - and no changeset are to be pushed
558 # - and remote is publishing
558 # - and remote is publishing
559 # We may be in issue 3871 case!
559 # We may be in issue 3871 case!
560 # We drop the possible phase synchronisation done by
560 # We drop the possible phase synchronisation done by
561 # courtesy to publish changesets possibly locally draft
561 # courtesy to publish changesets possibly locally draft
562 # on the remote.
562 # on the remote.
563 remotephases = {'publishing': 'True'}
563 remotephases = {'publishing': 'True'}
564 ana = phases.analyzeremotephases(pushop.repo,
564 ana = phases.analyzeremotephases(pushop.repo,
565 pushop.fallbackheads,
565 pushop.fallbackheads,
566 remotephases)
566 remotephases)
567 pheads, droots = ana
567 pheads, droots = ana
568 extracond = ''
568 extracond = ''
569 if not publishing:
569 if not publishing:
570 extracond = ' and public()'
570 extracond = ' and public()'
571 revset = 'heads((%%ln::%%ln) %s)' % extracond
571 revset = 'heads((%%ln::%%ln) %s)' % extracond
572 # Get the list of all revs draft on remote by public here.
572 # Get the list of all revs draft on remote by public here.
573 # XXX Beware that revset break if droots is not strictly
573 # XXX Beware that revset break if droots is not strictly
574 # XXX root we may want to ensure it is but it is costly
574 # XXX root we may want to ensure it is but it is costly
575 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
575 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
576 if not outgoing.missing:
576 if not outgoing.missing:
577 future = fallback
577 future = fallback
578 else:
578 else:
579 # adds changeset we are going to push as draft
579 # adds changeset we are going to push as draft
580 #
580 #
581 # should not be necessary for publishing server, but because of an
581 # should not be necessary for publishing server, but because of an
582 # issue fixed in xxxxx we have to do it anyway.
582 # issue fixed in xxxxx we have to do it anyway.
583 fdroots = list(unfi.set('roots(%ln + %ln::)',
583 fdroots = list(unfi.set('roots(%ln + %ln::)',
584 outgoing.missing, droots))
584 outgoing.missing, droots))
585 fdroots = [f.node() for f in fdroots]
585 fdroots = [f.node() for f in fdroots]
586 future = list(unfi.set(revset, fdroots, pushop.futureheads))
586 future = list(unfi.set(revset, fdroots, pushop.futureheads))
587 pushop.outdatedphases = future
587 pushop.outdatedphases = future
588 pushop.fallbackoutdatedphases = fallback
588 pushop.fallbackoutdatedphases = fallback
589
589
590 @pushdiscovery('obsmarker')
590 @pushdiscovery('obsmarker')
591 def _pushdiscoveryobsmarkers(pushop):
591 def _pushdiscoveryobsmarkers(pushop):
592 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
592 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
593 and pushop.repo.obsstore
593 and pushop.repo.obsstore
594 and 'obsolete' in pushop.remote.listkeys('namespaces')):
594 and 'obsolete' in pushop.remote.listkeys('namespaces')):
595 repo = pushop.repo
595 repo = pushop.repo
596 # very naive computation, that can be quite expensive on big repo.
596 # very naive computation, that can be quite expensive on big repo.
597 # However: evolution is currently slow on them anyway.
597 # However: evolution is currently slow on them anyway.
598 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
598 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
599 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
599 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
600
600
601 @pushdiscovery('bookmarks')
601 @pushdiscovery('bookmarks')
602 def _pushdiscoverybookmarks(pushop):
602 def _pushdiscoverybookmarks(pushop):
603 ui = pushop.ui
603 ui = pushop.ui
604 repo = pushop.repo.unfiltered()
604 repo = pushop.repo.unfiltered()
605 remote = pushop.remote
605 remote = pushop.remote
606 ui.debug("checking for updated bookmarks\n")
606 ui.debug("checking for updated bookmarks\n")
607 ancestors = ()
607 ancestors = ()
608 if pushop.revs:
608 if pushop.revs:
609 revnums = map(repo.changelog.rev, pushop.revs)
609 revnums = map(repo.changelog.rev, pushop.revs)
610 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
610 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
611 remotebookmark = remote.listkeys('bookmarks')
611 remotebookmark = remote.listkeys('bookmarks')
612
612
613 explicit = set([repo._bookmarks.expandname(bookmark)
613 explicit = set([repo._bookmarks.expandname(bookmark)
614 for bookmark in pushop.bookmarks])
614 for bookmark in pushop.bookmarks])
615
615
616 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
616 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
617 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
617 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
618
618
619 def safehex(x):
619 def safehex(x):
620 if x is None:
620 if x is None:
621 return x
621 return x
622 return hex(x)
622 return hex(x)
623
623
624 def hexifycompbookmarks(bookmarks):
624 def hexifycompbookmarks(bookmarks):
625 for b, scid, dcid in bookmarks:
625 for b, scid, dcid in bookmarks:
626 yield b, safehex(scid), safehex(dcid)
626 yield b, safehex(scid), safehex(dcid)
627
627
628 comp = [hexifycompbookmarks(marks) for marks in comp]
628 comp = [hexifycompbookmarks(marks) for marks in comp]
629 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
629 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
630
630
631 for b, scid, dcid in advsrc:
631 for b, scid, dcid in advsrc:
632 if b in explicit:
632 if b in explicit:
633 explicit.remove(b)
633 explicit.remove(b)
634 if not ancestors or repo[scid].rev() in ancestors:
634 if not ancestors or repo[scid].rev() in ancestors:
635 pushop.outbookmarks.append((b, dcid, scid))
635 pushop.outbookmarks.append((b, dcid, scid))
636 # search added bookmark
636 # search added bookmark
637 for b, scid, dcid in addsrc:
637 for b, scid, dcid in addsrc:
638 if b in explicit:
638 if b in explicit:
639 explicit.remove(b)
639 explicit.remove(b)
640 pushop.outbookmarks.append((b, '', scid))
640 pushop.outbookmarks.append((b, '', scid))
641 # search for overwritten bookmark
641 # search for overwritten bookmark
642 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
642 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
643 if b in explicit:
643 if b in explicit:
644 explicit.remove(b)
644 explicit.remove(b)
645 pushop.outbookmarks.append((b, dcid, scid))
645 pushop.outbookmarks.append((b, dcid, scid))
646 # search for bookmark to delete
646 # search for bookmark to delete
647 for b, scid, dcid in adddst:
647 for b, scid, dcid in adddst:
648 if b in explicit:
648 if b in explicit:
649 explicit.remove(b)
649 explicit.remove(b)
650 # treat as "deleted locally"
650 # treat as "deleted locally"
651 pushop.outbookmarks.append((b, dcid, ''))
651 pushop.outbookmarks.append((b, dcid, ''))
652 # identical bookmarks shouldn't get reported
652 # identical bookmarks shouldn't get reported
653 for b, scid, dcid in same:
653 for b, scid, dcid in same:
654 if b in explicit:
654 if b in explicit:
655 explicit.remove(b)
655 explicit.remove(b)
656
656
657 if explicit:
657 if explicit:
658 explicit = sorted(explicit)
658 explicit = sorted(explicit)
659 # we should probably list all of them
659 # we should probably list all of them
660 ui.warn(_('bookmark %s does not exist on the local '
660 ui.warn(_('bookmark %s does not exist on the local '
661 'or remote repository!\n') % explicit[0])
661 'or remote repository!\n') % explicit[0])
662 pushop.bkresult = 2
662 pushop.bkresult = 2
663
663
664 pushop.outbookmarks.sort()
664 pushop.outbookmarks.sort()
665
665
666 def _pushcheckoutgoing(pushop):
666 def _pushcheckoutgoing(pushop):
667 outgoing = pushop.outgoing
667 outgoing = pushop.outgoing
668 unfi = pushop.repo.unfiltered()
668 unfi = pushop.repo.unfiltered()
669 if not outgoing.missing:
669 if not outgoing.missing:
670 # nothing to push
670 # nothing to push
671 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
671 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
672 return False
672 return False
673 # something to push
673 # something to push
674 if not pushop.force:
674 if not pushop.force:
675 # if repo.obsstore == False --> no obsolete
675 # if repo.obsstore == False --> no obsolete
676 # then, save the iteration
676 # then, save the iteration
677 if unfi.obsstore:
677 if unfi.obsstore:
678 # this message are here for 80 char limit reason
678 # this message are here for 80 char limit reason
679 mso = _("push includes obsolete changeset: %s!")
679 mso = _("push includes obsolete changeset: %s!")
680 mst = {"unstable": _("push includes unstable changeset: %s!"),
680 mst = {"unstable": _("push includes unstable changeset: %s!"),
681 "bumped": _("push includes bumped changeset: %s!"),
681 "bumped": _("push includes bumped changeset: %s!"),
682 "divergent": _("push includes divergent changeset: %s!")}
682 "divergent": _("push includes divergent changeset: %s!")}
683 # If we are to push if there is at least one
683 # If we are to push if there is at least one
684 # obsolete or unstable changeset in missing, at
684 # obsolete or unstable changeset in missing, at
685 # least one of the missinghead will be obsolete or
685 # least one of the missinghead will be obsolete or
686 # unstable. So checking heads only is ok
686 # unstable. So checking heads only is ok
687 for node in outgoing.missingheads:
687 for node in outgoing.missingheads:
688 ctx = unfi[node]
688 ctx = unfi[node]
689 if ctx.obsolete():
689 if ctx.obsolete():
690 raise error.Abort(mso % ctx)
690 raise error.Abort(mso % ctx)
691 elif ctx.troubled():
691 elif ctx.troubled():
692 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
692 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
693
693
694 discovery.checkheads(pushop)
694 discovery.checkheads(pushop)
695 return True
695 return True
696
696
697 # List of names of steps to perform for an outgoing bundle2, order matters.
697 # List of names of steps to perform for an outgoing bundle2, order matters.
698 b2partsgenorder = []
698 b2partsgenorder = []
699
699
700 # Mapping between step name and function
700 # Mapping between step name and function
701 #
701 #
702 # This exists to help extensions wrap steps if necessary
702 # This exists to help extensions wrap steps if necessary
703 b2partsgenmapping = {}
703 b2partsgenmapping = {}
704
704
705 def b2partsgenerator(stepname, idx=None):
705 def b2partsgenerator(stepname, idx=None):
706 """decorator for function generating bundle2 part
706 """decorator for function generating bundle2 part
707
707
708 The function is added to the step -> function mapping and appended to the
708 The function is added to the step -> function mapping and appended to the
709 list of steps. Beware that decorated functions will be added in order
709 list of steps. Beware that decorated functions will be added in order
710 (this may matter).
710 (this may matter).
711
711
712 You can only use this decorator for new steps, if you want to wrap a step
712 You can only use this decorator for new steps, if you want to wrap a step
713 from an extension, attack the b2partsgenmapping dictionary directly."""
713 from an extension, attack the b2partsgenmapping dictionary directly."""
714 def dec(func):
714 def dec(func):
715 assert stepname not in b2partsgenmapping
715 assert stepname not in b2partsgenmapping
716 b2partsgenmapping[stepname] = func
716 b2partsgenmapping[stepname] = func
717 if idx is None:
717 if idx is None:
718 b2partsgenorder.append(stepname)
718 b2partsgenorder.append(stepname)
719 else:
719 else:
720 b2partsgenorder.insert(idx, stepname)
720 b2partsgenorder.insert(idx, stepname)
721 return func
721 return func
722 return dec
722 return dec
723
723
724 def _pushb2ctxcheckheads(pushop, bundler):
724 def _pushb2ctxcheckheads(pushop, bundler):
725 """Generate race condition checking parts
725 """Generate race condition checking parts
726
726
727 Exists as an independent function to aid extensions
727 Exists as an independent function to aid extensions
728 """
728 """
729 # * 'force' do not check for push race,
729 # * 'force' do not check for push race,
730 # * if we don't push anything, there are nothing to check.
730 # * if we don't push anything, there are nothing to check.
731 if not pushop.force and pushop.outgoing.missingheads:
731 if not pushop.force and pushop.outgoing.missingheads:
732 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
732 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
733 if not allowunrelated:
733 emptyremote = pushop.pushbranchmap is None
734 if not allowunrelated or emptyremote:
734 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
735 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
735 else:
736 else:
736 affected = set()
737 affected = set()
737 for branch, heads in pushop.pushbranchmap.iteritems():
738 for branch, heads in pushop.pushbranchmap.iteritems():
738 remoteheads, newheads, unsyncedheads, discardedheads = heads
739 remoteheads, newheads, unsyncedheads, discardedheads = heads
739 if remoteheads is not None:
740 if remoteheads is not None:
740 remote = set(remoteheads)
741 remote = set(remoteheads)
741 affected |= set(discardedheads) & remote
742 affected |= set(discardedheads) & remote
742 affected |= remote - set(newheads)
743 affected |= remote - set(newheads)
743 if affected:
744 if affected:
744 data = iter(sorted(affected))
745 data = iter(sorted(affected))
745 bundler.newpart('check:updated-heads', data=data)
746 bundler.newpart('check:updated-heads', data=data)
746
747
747 @b2partsgenerator('changeset')
748 @b2partsgenerator('changeset')
748 def _pushb2ctx(pushop, bundler):
749 def _pushb2ctx(pushop, bundler):
749 """handle changegroup push through bundle2
750 """handle changegroup push through bundle2
750
751
751 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
752 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
752 """
753 """
753 if 'changesets' in pushop.stepsdone:
754 if 'changesets' in pushop.stepsdone:
754 return
755 return
755 pushop.stepsdone.add('changesets')
756 pushop.stepsdone.add('changesets')
756 # Send known heads to the server for race detection.
757 # Send known heads to the server for race detection.
757 if not _pushcheckoutgoing(pushop):
758 if not _pushcheckoutgoing(pushop):
758 return
759 return
759 pushop.repo.prepushoutgoinghooks(pushop)
760 pushop.repo.prepushoutgoinghooks(pushop)
760
761
761 _pushb2ctxcheckheads(pushop, bundler)
762 _pushb2ctxcheckheads(pushop, bundler)
762
763
763 b2caps = bundle2.bundle2caps(pushop.remote)
764 b2caps = bundle2.bundle2caps(pushop.remote)
764 version = '01'
765 version = '01'
765 cgversions = b2caps.get('changegroup')
766 cgversions = b2caps.get('changegroup')
766 if cgversions: # 3.1 and 3.2 ship with an empty value
767 if cgversions: # 3.1 and 3.2 ship with an empty value
767 cgversions = [v for v in cgversions
768 cgversions = [v for v in cgversions
768 if v in changegroup.supportedoutgoingversions(
769 if v in changegroup.supportedoutgoingversions(
769 pushop.repo)]
770 pushop.repo)]
770 if not cgversions:
771 if not cgversions:
771 raise ValueError(_('no common changegroup version'))
772 raise ValueError(_('no common changegroup version'))
772 version = max(cgversions)
773 version = max(cgversions)
773 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
774 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
774 pushop.outgoing,
775 pushop.outgoing,
775 version=version)
776 version=version)
776 cgpart = bundler.newpart('changegroup', data=cg)
777 cgpart = bundler.newpart('changegroup', data=cg)
777 if cgversions:
778 if cgversions:
778 cgpart.addparam('version', version)
779 cgpart.addparam('version', version)
779 if 'treemanifest' in pushop.repo.requirements:
780 if 'treemanifest' in pushop.repo.requirements:
780 cgpart.addparam('treemanifest', '1')
781 cgpart.addparam('treemanifest', '1')
781 def handlereply(op):
782 def handlereply(op):
782 """extract addchangegroup returns from server reply"""
783 """extract addchangegroup returns from server reply"""
783 cgreplies = op.records.getreplies(cgpart.id)
784 cgreplies = op.records.getreplies(cgpart.id)
784 assert len(cgreplies['changegroup']) == 1
785 assert len(cgreplies['changegroup']) == 1
785 pushop.cgresult = cgreplies['changegroup'][0]['return']
786 pushop.cgresult = cgreplies['changegroup'][0]['return']
786 return handlereply
787 return handlereply
787
788
788 @b2partsgenerator('phase')
789 @b2partsgenerator('phase')
789 def _pushb2phases(pushop, bundler):
790 def _pushb2phases(pushop, bundler):
790 """handle phase push through bundle2"""
791 """handle phase push through bundle2"""
791 if 'phases' in pushop.stepsdone:
792 if 'phases' in pushop.stepsdone:
792 return
793 return
793 b2caps = bundle2.bundle2caps(pushop.remote)
794 b2caps = bundle2.bundle2caps(pushop.remote)
794 if not 'pushkey' in b2caps:
795 if not 'pushkey' in b2caps:
795 return
796 return
796 pushop.stepsdone.add('phases')
797 pushop.stepsdone.add('phases')
797 part2node = []
798 part2node = []
798
799
799 def handlefailure(pushop, exc):
800 def handlefailure(pushop, exc):
800 targetid = int(exc.partid)
801 targetid = int(exc.partid)
801 for partid, node in part2node:
802 for partid, node in part2node:
802 if partid == targetid:
803 if partid == targetid:
803 raise error.Abort(_('updating %s to public failed') % node)
804 raise error.Abort(_('updating %s to public failed') % node)
804
805
805 enc = pushkey.encode
806 enc = pushkey.encode
806 for newremotehead in pushop.outdatedphases:
807 for newremotehead in pushop.outdatedphases:
807 part = bundler.newpart('pushkey')
808 part = bundler.newpart('pushkey')
808 part.addparam('namespace', enc('phases'))
809 part.addparam('namespace', enc('phases'))
809 part.addparam('key', enc(newremotehead.hex()))
810 part.addparam('key', enc(newremotehead.hex()))
810 part.addparam('old', enc(str(phases.draft)))
811 part.addparam('old', enc(str(phases.draft)))
811 part.addparam('new', enc(str(phases.public)))
812 part.addparam('new', enc(str(phases.public)))
812 part2node.append((part.id, newremotehead))
813 part2node.append((part.id, newremotehead))
813 pushop.pkfailcb[part.id] = handlefailure
814 pushop.pkfailcb[part.id] = handlefailure
814
815
815 def handlereply(op):
816 def handlereply(op):
816 for partid, node in part2node:
817 for partid, node in part2node:
817 partrep = op.records.getreplies(partid)
818 partrep = op.records.getreplies(partid)
818 results = partrep['pushkey']
819 results = partrep['pushkey']
819 assert len(results) <= 1
820 assert len(results) <= 1
820 msg = None
821 msg = None
821 if not results:
822 if not results:
822 msg = _('server ignored update of %s to public!\n') % node
823 msg = _('server ignored update of %s to public!\n') % node
823 elif not int(results[0]['return']):
824 elif not int(results[0]['return']):
824 msg = _('updating %s to public failed!\n') % node
825 msg = _('updating %s to public failed!\n') % node
825 if msg is not None:
826 if msg is not None:
826 pushop.ui.warn(msg)
827 pushop.ui.warn(msg)
827 return handlereply
828 return handlereply
828
829
829 @b2partsgenerator('obsmarkers')
830 @b2partsgenerator('obsmarkers')
830 def _pushb2obsmarkers(pushop, bundler):
831 def _pushb2obsmarkers(pushop, bundler):
831 if 'obsmarkers' in pushop.stepsdone:
832 if 'obsmarkers' in pushop.stepsdone:
832 return
833 return
833 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
834 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
834 if obsolete.commonversion(remoteversions) is None:
835 if obsolete.commonversion(remoteversions) is None:
835 return
836 return
836 pushop.stepsdone.add('obsmarkers')
837 pushop.stepsdone.add('obsmarkers')
837 if pushop.outobsmarkers:
838 if pushop.outobsmarkers:
838 markers = sorted(pushop.outobsmarkers)
839 markers = sorted(pushop.outobsmarkers)
839 bundle2.buildobsmarkerspart(bundler, markers)
840 bundle2.buildobsmarkerspart(bundler, markers)
840
841
841 @b2partsgenerator('bookmarks')
842 @b2partsgenerator('bookmarks')
842 def _pushb2bookmarks(pushop, bundler):
843 def _pushb2bookmarks(pushop, bundler):
843 """handle bookmark push through bundle2"""
844 """handle bookmark push through bundle2"""
844 if 'bookmarks' in pushop.stepsdone:
845 if 'bookmarks' in pushop.stepsdone:
845 return
846 return
846 b2caps = bundle2.bundle2caps(pushop.remote)
847 b2caps = bundle2.bundle2caps(pushop.remote)
847 if 'pushkey' not in b2caps:
848 if 'pushkey' not in b2caps:
848 return
849 return
849 pushop.stepsdone.add('bookmarks')
850 pushop.stepsdone.add('bookmarks')
850 part2book = []
851 part2book = []
851 enc = pushkey.encode
852 enc = pushkey.encode
852
853
853 def handlefailure(pushop, exc):
854 def handlefailure(pushop, exc):
854 targetid = int(exc.partid)
855 targetid = int(exc.partid)
855 for partid, book, action in part2book:
856 for partid, book, action in part2book:
856 if partid == targetid:
857 if partid == targetid:
857 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
858 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
858 # we should not be called for part we did not generated
859 # we should not be called for part we did not generated
859 assert False
860 assert False
860
861
861 for book, old, new in pushop.outbookmarks:
862 for book, old, new in pushop.outbookmarks:
862 part = bundler.newpart('pushkey')
863 part = bundler.newpart('pushkey')
863 part.addparam('namespace', enc('bookmarks'))
864 part.addparam('namespace', enc('bookmarks'))
864 part.addparam('key', enc(book))
865 part.addparam('key', enc(book))
865 part.addparam('old', enc(old))
866 part.addparam('old', enc(old))
866 part.addparam('new', enc(new))
867 part.addparam('new', enc(new))
867 action = 'update'
868 action = 'update'
868 if not old:
869 if not old:
869 action = 'export'
870 action = 'export'
870 elif not new:
871 elif not new:
871 action = 'delete'
872 action = 'delete'
872 part2book.append((part.id, book, action))
873 part2book.append((part.id, book, action))
873 pushop.pkfailcb[part.id] = handlefailure
874 pushop.pkfailcb[part.id] = handlefailure
874
875
875 def handlereply(op):
876 def handlereply(op):
876 ui = pushop.ui
877 ui = pushop.ui
877 for partid, book, action in part2book:
878 for partid, book, action in part2book:
878 partrep = op.records.getreplies(partid)
879 partrep = op.records.getreplies(partid)
879 results = partrep['pushkey']
880 results = partrep['pushkey']
880 assert len(results) <= 1
881 assert len(results) <= 1
881 if not results:
882 if not results:
882 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
883 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
883 else:
884 else:
884 ret = int(results[0]['return'])
885 ret = int(results[0]['return'])
885 if ret:
886 if ret:
886 ui.status(bookmsgmap[action][0] % book)
887 ui.status(bookmsgmap[action][0] % book)
887 else:
888 else:
888 ui.warn(bookmsgmap[action][1] % book)
889 ui.warn(bookmsgmap[action][1] % book)
889 if pushop.bkresult is not None:
890 if pushop.bkresult is not None:
890 pushop.bkresult = 1
891 pushop.bkresult = 1
891 return handlereply
892 return handlereply
892
893
893
894
894 def _pushbundle2(pushop):
895 def _pushbundle2(pushop):
895 """push data to the remote using bundle2
896 """push data to the remote using bundle2
896
897
897 The only currently supported type of data is changegroup but this will
898 The only currently supported type of data is changegroup but this will
898 evolve in the future."""
899 evolve in the future."""
899 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
900 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
900 pushback = (pushop.trmanager
901 pushback = (pushop.trmanager
901 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
902 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
902
903
903 # create reply capability
904 # create reply capability
904 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
905 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
905 allowpushback=pushback))
906 allowpushback=pushback))
906 bundler.newpart('replycaps', data=capsblob)
907 bundler.newpart('replycaps', data=capsblob)
907 replyhandlers = []
908 replyhandlers = []
908 for partgenname in b2partsgenorder:
909 for partgenname in b2partsgenorder:
909 partgen = b2partsgenmapping[partgenname]
910 partgen = b2partsgenmapping[partgenname]
910 ret = partgen(pushop, bundler)
911 ret = partgen(pushop, bundler)
911 if callable(ret):
912 if callable(ret):
912 replyhandlers.append(ret)
913 replyhandlers.append(ret)
913 # do not push if nothing to push
914 # do not push if nothing to push
914 if bundler.nbparts <= 1:
915 if bundler.nbparts <= 1:
915 return
916 return
916 stream = util.chunkbuffer(bundler.getchunks())
917 stream = util.chunkbuffer(bundler.getchunks())
917 try:
918 try:
918 try:
919 try:
919 reply = pushop.remote.unbundle(
920 reply = pushop.remote.unbundle(
920 stream, ['force'], pushop.remote.url())
921 stream, ['force'], pushop.remote.url())
921 except error.BundleValueError as exc:
922 except error.BundleValueError as exc:
922 raise error.Abort(_('missing support for %s') % exc)
923 raise error.Abort(_('missing support for %s') % exc)
923 try:
924 try:
924 trgetter = None
925 trgetter = None
925 if pushback:
926 if pushback:
926 trgetter = pushop.trmanager.transaction
927 trgetter = pushop.trmanager.transaction
927 op = bundle2.processbundle(pushop.repo, reply, trgetter)
928 op = bundle2.processbundle(pushop.repo, reply, trgetter)
928 except error.BundleValueError as exc:
929 except error.BundleValueError as exc:
929 raise error.Abort(_('missing support for %s') % exc)
930 raise error.Abort(_('missing support for %s') % exc)
930 except bundle2.AbortFromPart as exc:
931 except bundle2.AbortFromPart as exc:
931 pushop.ui.status(_('remote: %s\n') % exc)
932 pushop.ui.status(_('remote: %s\n') % exc)
932 if exc.hint is not None:
933 if exc.hint is not None:
933 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
934 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
934 raise error.Abort(_('push failed on remote'))
935 raise error.Abort(_('push failed on remote'))
935 except error.PushkeyFailed as exc:
936 except error.PushkeyFailed as exc:
936 partid = int(exc.partid)
937 partid = int(exc.partid)
937 if partid not in pushop.pkfailcb:
938 if partid not in pushop.pkfailcb:
938 raise
939 raise
939 pushop.pkfailcb[partid](pushop, exc)
940 pushop.pkfailcb[partid](pushop, exc)
940 for rephand in replyhandlers:
941 for rephand in replyhandlers:
941 rephand(op)
942 rephand(op)
942
943
943 def _pushchangeset(pushop):
944 def _pushchangeset(pushop):
944 """Make the actual push of changeset bundle to remote repo"""
945 """Make the actual push of changeset bundle to remote repo"""
945 if 'changesets' in pushop.stepsdone:
946 if 'changesets' in pushop.stepsdone:
946 return
947 return
947 pushop.stepsdone.add('changesets')
948 pushop.stepsdone.add('changesets')
948 if not _pushcheckoutgoing(pushop):
949 if not _pushcheckoutgoing(pushop):
949 return
950 return
950 pushop.repo.prepushoutgoinghooks(pushop)
951 pushop.repo.prepushoutgoinghooks(pushop)
951 outgoing = pushop.outgoing
952 outgoing = pushop.outgoing
952 unbundle = pushop.remote.capable('unbundle')
953 unbundle = pushop.remote.capable('unbundle')
953 # TODO: get bundlecaps from remote
954 # TODO: get bundlecaps from remote
954 bundlecaps = None
955 bundlecaps = None
955 # create a changegroup from local
956 # create a changegroup from local
956 if pushop.revs is None and not (outgoing.excluded
957 if pushop.revs is None and not (outgoing.excluded
957 or pushop.repo.changelog.filteredrevs):
958 or pushop.repo.changelog.filteredrevs):
958 # push everything,
959 # push everything,
959 # use the fast path, no race possible on push
960 # use the fast path, no race possible on push
960 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
961 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
961 cg = changegroup.getsubset(pushop.repo,
962 cg = changegroup.getsubset(pushop.repo,
962 outgoing,
963 outgoing,
963 bundler,
964 bundler,
964 'push',
965 'push',
965 fastpath=True)
966 fastpath=True)
966 else:
967 else:
967 cg = changegroup.getchangegroup(pushop.repo, 'push', outgoing,
968 cg = changegroup.getchangegroup(pushop.repo, 'push', outgoing,
968 bundlecaps=bundlecaps)
969 bundlecaps=bundlecaps)
969
970
970 # apply changegroup to remote
971 # apply changegroup to remote
971 if unbundle:
972 if unbundle:
972 # local repo finds heads on server, finds out what
973 # local repo finds heads on server, finds out what
973 # revs it must push. once revs transferred, if server
974 # revs it must push. once revs transferred, if server
974 # finds it has different heads (someone else won
975 # finds it has different heads (someone else won
975 # commit/push race), server aborts.
976 # commit/push race), server aborts.
976 if pushop.force:
977 if pushop.force:
977 remoteheads = ['force']
978 remoteheads = ['force']
978 else:
979 else:
979 remoteheads = pushop.remoteheads
980 remoteheads = pushop.remoteheads
980 # ssh: return remote's addchangegroup()
981 # ssh: return remote's addchangegroup()
981 # http: return remote's addchangegroup() or 0 for error
982 # http: return remote's addchangegroup() or 0 for error
982 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
983 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
983 pushop.repo.url())
984 pushop.repo.url())
984 else:
985 else:
985 # we return an integer indicating remote head count
986 # we return an integer indicating remote head count
986 # change
987 # change
987 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
988 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
988 pushop.repo.url())
989 pushop.repo.url())
989
990
990 def _pushsyncphase(pushop):
991 def _pushsyncphase(pushop):
991 """synchronise phase information locally and remotely"""
992 """synchronise phase information locally and remotely"""
992 cheads = pushop.commonheads
993 cheads = pushop.commonheads
993 # even when we don't push, exchanging phase data is useful
994 # even when we don't push, exchanging phase data is useful
994 remotephases = pushop.remote.listkeys('phases')
995 remotephases = pushop.remote.listkeys('phases')
995 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
996 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
996 and remotephases # server supports phases
997 and remotephases # server supports phases
997 and pushop.cgresult is None # nothing was pushed
998 and pushop.cgresult is None # nothing was pushed
998 and remotephases.get('publishing', False)):
999 and remotephases.get('publishing', False)):
999 # When:
1000 # When:
1000 # - this is a subrepo push
1001 # - this is a subrepo push
1001 # - and remote support phase
1002 # - and remote support phase
1002 # - and no changeset was pushed
1003 # - and no changeset was pushed
1003 # - and remote is publishing
1004 # - and remote is publishing
1004 # We may be in issue 3871 case!
1005 # We may be in issue 3871 case!
1005 # We drop the possible phase synchronisation done by
1006 # We drop the possible phase synchronisation done by
1006 # courtesy to publish changesets possibly locally draft
1007 # courtesy to publish changesets possibly locally draft
1007 # on the remote.
1008 # on the remote.
1008 remotephases = {'publishing': 'True'}
1009 remotephases = {'publishing': 'True'}
1009 if not remotephases: # old server or public only reply from non-publishing
1010 if not remotephases: # old server or public only reply from non-publishing
1010 _localphasemove(pushop, cheads)
1011 _localphasemove(pushop, cheads)
1011 # don't push any phase data as there is nothing to push
1012 # don't push any phase data as there is nothing to push
1012 else:
1013 else:
1013 ana = phases.analyzeremotephases(pushop.repo, cheads,
1014 ana = phases.analyzeremotephases(pushop.repo, cheads,
1014 remotephases)
1015 remotephases)
1015 pheads, droots = ana
1016 pheads, droots = ana
1016 ### Apply remote phase on local
1017 ### Apply remote phase on local
1017 if remotephases.get('publishing', False):
1018 if remotephases.get('publishing', False):
1018 _localphasemove(pushop, cheads)
1019 _localphasemove(pushop, cheads)
1019 else: # publish = False
1020 else: # publish = False
1020 _localphasemove(pushop, pheads)
1021 _localphasemove(pushop, pheads)
1021 _localphasemove(pushop, cheads, phases.draft)
1022 _localphasemove(pushop, cheads, phases.draft)
1022 ### Apply local phase on remote
1023 ### Apply local phase on remote
1023
1024
1024 if pushop.cgresult:
1025 if pushop.cgresult:
1025 if 'phases' in pushop.stepsdone:
1026 if 'phases' in pushop.stepsdone:
1026 # phases already pushed though bundle2
1027 # phases already pushed though bundle2
1027 return
1028 return
1028 outdated = pushop.outdatedphases
1029 outdated = pushop.outdatedphases
1029 else:
1030 else:
1030 outdated = pushop.fallbackoutdatedphases
1031 outdated = pushop.fallbackoutdatedphases
1031
1032
1032 pushop.stepsdone.add('phases')
1033 pushop.stepsdone.add('phases')
1033
1034
1034 # filter heads already turned public by the push
1035 # filter heads already turned public by the push
1035 outdated = [c for c in outdated if c.node() not in pheads]
1036 outdated = [c for c in outdated if c.node() not in pheads]
1036 # fallback to independent pushkey command
1037 # fallback to independent pushkey command
1037 for newremotehead in outdated:
1038 for newremotehead in outdated:
1038 r = pushop.remote.pushkey('phases',
1039 r = pushop.remote.pushkey('phases',
1039 newremotehead.hex(),
1040 newremotehead.hex(),
1040 str(phases.draft),
1041 str(phases.draft),
1041 str(phases.public))
1042 str(phases.public))
1042 if not r:
1043 if not r:
1043 pushop.ui.warn(_('updating %s to public failed!\n')
1044 pushop.ui.warn(_('updating %s to public failed!\n')
1044 % newremotehead)
1045 % newremotehead)
1045
1046
1046 def _localphasemove(pushop, nodes, phase=phases.public):
1047 def _localphasemove(pushop, nodes, phase=phases.public):
1047 """move <nodes> to <phase> in the local source repo"""
1048 """move <nodes> to <phase> in the local source repo"""
1048 if pushop.trmanager:
1049 if pushop.trmanager:
1049 phases.advanceboundary(pushop.repo,
1050 phases.advanceboundary(pushop.repo,
1050 pushop.trmanager.transaction(),
1051 pushop.trmanager.transaction(),
1051 phase,
1052 phase,
1052 nodes)
1053 nodes)
1053 else:
1054 else:
1054 # repo is not locked, do not change any phases!
1055 # repo is not locked, do not change any phases!
1055 # Informs the user that phases should have been moved when
1056 # Informs the user that phases should have been moved when
1056 # applicable.
1057 # applicable.
1057 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1058 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1058 phasestr = phases.phasenames[phase]
1059 phasestr = phases.phasenames[phase]
1059 if actualmoves:
1060 if actualmoves:
1060 pushop.ui.status(_('cannot lock source repo, skipping '
1061 pushop.ui.status(_('cannot lock source repo, skipping '
1061 'local %s phase update\n') % phasestr)
1062 'local %s phase update\n') % phasestr)
1062
1063
1063 def _pushobsolete(pushop):
1064 def _pushobsolete(pushop):
1064 """utility function to push obsolete markers to a remote"""
1065 """utility function to push obsolete markers to a remote"""
1065 if 'obsmarkers' in pushop.stepsdone:
1066 if 'obsmarkers' in pushop.stepsdone:
1066 return
1067 return
1067 repo = pushop.repo
1068 repo = pushop.repo
1068 remote = pushop.remote
1069 remote = pushop.remote
1069 pushop.stepsdone.add('obsmarkers')
1070 pushop.stepsdone.add('obsmarkers')
1070 if pushop.outobsmarkers:
1071 if pushop.outobsmarkers:
1071 pushop.ui.debug('try to push obsolete markers to remote\n')
1072 pushop.ui.debug('try to push obsolete markers to remote\n')
1072 rslts = []
1073 rslts = []
1073 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1074 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1074 for key in sorted(remotedata, reverse=True):
1075 for key in sorted(remotedata, reverse=True):
1075 # reverse sort to ensure we end with dump0
1076 # reverse sort to ensure we end with dump0
1076 data = remotedata[key]
1077 data = remotedata[key]
1077 rslts.append(remote.pushkey('obsolete', key, '', data))
1078 rslts.append(remote.pushkey('obsolete', key, '', data))
1078 if [r for r in rslts if not r]:
1079 if [r for r in rslts if not r]:
1079 msg = _('failed to push some obsolete markers!\n')
1080 msg = _('failed to push some obsolete markers!\n')
1080 repo.ui.warn(msg)
1081 repo.ui.warn(msg)
1081
1082
1082 def _pushbookmark(pushop):
1083 def _pushbookmark(pushop):
1083 """Update bookmark position on remote"""
1084 """Update bookmark position on remote"""
1084 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1085 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1085 return
1086 return
1086 pushop.stepsdone.add('bookmarks')
1087 pushop.stepsdone.add('bookmarks')
1087 ui = pushop.ui
1088 ui = pushop.ui
1088 remote = pushop.remote
1089 remote = pushop.remote
1089
1090
1090 for b, old, new in pushop.outbookmarks:
1091 for b, old, new in pushop.outbookmarks:
1091 action = 'update'
1092 action = 'update'
1092 if not old:
1093 if not old:
1093 action = 'export'
1094 action = 'export'
1094 elif not new:
1095 elif not new:
1095 action = 'delete'
1096 action = 'delete'
1096 if remote.pushkey('bookmarks', b, old, new):
1097 if remote.pushkey('bookmarks', b, old, new):
1097 ui.status(bookmsgmap[action][0] % b)
1098 ui.status(bookmsgmap[action][0] % b)
1098 else:
1099 else:
1099 ui.warn(bookmsgmap[action][1] % b)
1100 ui.warn(bookmsgmap[action][1] % b)
1100 # discovery can have set the value form invalid entry
1101 # discovery can have set the value form invalid entry
1101 if pushop.bkresult is not None:
1102 if pushop.bkresult is not None:
1102 pushop.bkresult = 1
1103 pushop.bkresult = 1
1103
1104
1104 class pulloperation(object):
1105 class pulloperation(object):
1105 """A object that represent a single pull operation
1106 """A object that represent a single pull operation
1106
1107
1107 It purpose is to carry pull related state and very common operation.
1108 It purpose is to carry pull related state and very common operation.
1108
1109
1109 A new should be created at the beginning of each pull and discarded
1110 A new should be created at the beginning of each pull and discarded
1110 afterward.
1111 afterward.
1111 """
1112 """
1112
1113
1113 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1114 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1114 remotebookmarks=None, streamclonerequested=None):
1115 remotebookmarks=None, streamclonerequested=None):
1115 # repo we pull into
1116 # repo we pull into
1116 self.repo = repo
1117 self.repo = repo
1117 # repo we pull from
1118 # repo we pull from
1118 self.remote = remote
1119 self.remote = remote
1119 # revision we try to pull (None is "all")
1120 # revision we try to pull (None is "all")
1120 self.heads = heads
1121 self.heads = heads
1121 # bookmark pulled explicitly
1122 # bookmark pulled explicitly
1122 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1123 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1123 for bookmark in bookmarks]
1124 for bookmark in bookmarks]
1124 # do we force pull?
1125 # do we force pull?
1125 self.force = force
1126 self.force = force
1126 # whether a streaming clone was requested
1127 # whether a streaming clone was requested
1127 self.streamclonerequested = streamclonerequested
1128 self.streamclonerequested = streamclonerequested
1128 # transaction manager
1129 # transaction manager
1129 self.trmanager = None
1130 self.trmanager = None
1130 # set of common changeset between local and remote before pull
1131 # set of common changeset between local and remote before pull
1131 self.common = None
1132 self.common = None
1132 # set of pulled head
1133 # set of pulled head
1133 self.rheads = None
1134 self.rheads = None
1134 # list of missing changeset to fetch remotely
1135 # list of missing changeset to fetch remotely
1135 self.fetch = None
1136 self.fetch = None
1136 # remote bookmarks data
1137 # remote bookmarks data
1137 self.remotebookmarks = remotebookmarks
1138 self.remotebookmarks = remotebookmarks
1138 # result of changegroup pulling (used as return code by pull)
1139 # result of changegroup pulling (used as return code by pull)
1139 self.cgresult = None
1140 self.cgresult = None
1140 # list of step already done
1141 # list of step already done
1141 self.stepsdone = set()
1142 self.stepsdone = set()
1142 # Whether we attempted a clone from pre-generated bundles.
1143 # Whether we attempted a clone from pre-generated bundles.
1143 self.clonebundleattempted = False
1144 self.clonebundleattempted = False
1144
1145
1145 @util.propertycache
1146 @util.propertycache
1146 def pulledsubset(self):
1147 def pulledsubset(self):
1147 """heads of the set of changeset target by the pull"""
1148 """heads of the set of changeset target by the pull"""
1148 # compute target subset
1149 # compute target subset
1149 if self.heads is None:
1150 if self.heads is None:
1150 # We pulled every thing possible
1151 # We pulled every thing possible
1151 # sync on everything common
1152 # sync on everything common
1152 c = set(self.common)
1153 c = set(self.common)
1153 ret = list(self.common)
1154 ret = list(self.common)
1154 for n in self.rheads:
1155 for n in self.rheads:
1155 if n not in c:
1156 if n not in c:
1156 ret.append(n)
1157 ret.append(n)
1157 return ret
1158 return ret
1158 else:
1159 else:
1159 # We pulled a specific subset
1160 # We pulled a specific subset
1160 # sync on this subset
1161 # sync on this subset
1161 return self.heads
1162 return self.heads
1162
1163
1163 @util.propertycache
1164 @util.propertycache
1164 def canusebundle2(self):
1165 def canusebundle2(self):
1165 return not _forcebundle1(self)
1166 return not _forcebundle1(self)
1166
1167
1167 @util.propertycache
1168 @util.propertycache
1168 def remotebundle2caps(self):
1169 def remotebundle2caps(self):
1169 return bundle2.bundle2caps(self.remote)
1170 return bundle2.bundle2caps(self.remote)
1170
1171
1171 def gettransaction(self):
1172 def gettransaction(self):
1172 # deprecated; talk to trmanager directly
1173 # deprecated; talk to trmanager directly
1173 return self.trmanager.transaction()
1174 return self.trmanager.transaction()
1174
1175
1175 class transactionmanager(object):
1176 class transactionmanager(object):
1176 """An object to manage the life cycle of a transaction
1177 """An object to manage the life cycle of a transaction
1177
1178
1178 It creates the transaction on demand and calls the appropriate hooks when
1179 It creates the transaction on demand and calls the appropriate hooks when
1179 closing the transaction."""
1180 closing the transaction."""
1180 def __init__(self, repo, source, url):
1181 def __init__(self, repo, source, url):
1181 self.repo = repo
1182 self.repo = repo
1182 self.source = source
1183 self.source = source
1183 self.url = url
1184 self.url = url
1184 self._tr = None
1185 self._tr = None
1185
1186
1186 def transaction(self):
1187 def transaction(self):
1187 """Return an open transaction object, constructing if necessary"""
1188 """Return an open transaction object, constructing if necessary"""
1188 if not self._tr:
1189 if not self._tr:
1189 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1190 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1190 self._tr = self.repo.transaction(trname)
1191 self._tr = self.repo.transaction(trname)
1191 self._tr.hookargs['source'] = self.source
1192 self._tr.hookargs['source'] = self.source
1192 self._tr.hookargs['url'] = self.url
1193 self._tr.hookargs['url'] = self.url
1193 return self._tr
1194 return self._tr
1194
1195
1195 def close(self):
1196 def close(self):
1196 """close transaction if created"""
1197 """close transaction if created"""
1197 if self._tr is not None:
1198 if self._tr is not None:
1198 self._tr.close()
1199 self._tr.close()
1199
1200
1200 def release(self):
1201 def release(self):
1201 """release transaction if created"""
1202 """release transaction if created"""
1202 if self._tr is not None:
1203 if self._tr is not None:
1203 self._tr.release()
1204 self._tr.release()
1204
1205
1205 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1206 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1206 streamclonerequested=None):
1207 streamclonerequested=None):
1207 """Fetch repository data from a remote.
1208 """Fetch repository data from a remote.
1208
1209
1209 This is the main function used to retrieve data from a remote repository.
1210 This is the main function used to retrieve data from a remote repository.
1210
1211
1211 ``repo`` is the local repository to clone into.
1212 ``repo`` is the local repository to clone into.
1212 ``remote`` is a peer instance.
1213 ``remote`` is a peer instance.
1213 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1214 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1214 default) means to pull everything from the remote.
1215 default) means to pull everything from the remote.
1215 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1216 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1216 default, all remote bookmarks are pulled.
1217 default, all remote bookmarks are pulled.
1217 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1218 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1218 initialization.
1219 initialization.
1219 ``streamclonerequested`` is a boolean indicating whether a "streaming
1220 ``streamclonerequested`` is a boolean indicating whether a "streaming
1220 clone" is requested. A "streaming clone" is essentially a raw file copy
1221 clone" is requested. A "streaming clone" is essentially a raw file copy
1221 of revlogs from the server. This only works when the local repository is
1222 of revlogs from the server. This only works when the local repository is
1222 empty. The default value of ``None`` means to respect the server
1223 empty. The default value of ``None`` means to respect the server
1223 configuration for preferring stream clones.
1224 configuration for preferring stream clones.
1224
1225
1225 Returns the ``pulloperation`` created for this pull.
1226 Returns the ``pulloperation`` created for this pull.
1226 """
1227 """
1227 if opargs is None:
1228 if opargs is None:
1228 opargs = {}
1229 opargs = {}
1229 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1230 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1230 streamclonerequested=streamclonerequested, **opargs)
1231 streamclonerequested=streamclonerequested, **opargs)
1231 if pullop.remote.local():
1232 if pullop.remote.local():
1232 missing = set(pullop.remote.requirements) - pullop.repo.supported
1233 missing = set(pullop.remote.requirements) - pullop.repo.supported
1233 if missing:
1234 if missing:
1234 msg = _("required features are not"
1235 msg = _("required features are not"
1235 " supported in the destination:"
1236 " supported in the destination:"
1236 " %s") % (', '.join(sorted(missing)))
1237 " %s") % (', '.join(sorted(missing)))
1237 raise error.Abort(msg)
1238 raise error.Abort(msg)
1238
1239
1239 wlock = lock = None
1240 wlock = lock = None
1240 try:
1241 try:
1241 wlock = pullop.repo.wlock()
1242 wlock = pullop.repo.wlock()
1242 lock = pullop.repo.lock()
1243 lock = pullop.repo.lock()
1243 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1244 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1244 streamclone.maybeperformlegacystreamclone(pullop)
1245 streamclone.maybeperformlegacystreamclone(pullop)
1245 # This should ideally be in _pullbundle2(). However, it needs to run
1246 # This should ideally be in _pullbundle2(). However, it needs to run
1246 # before discovery to avoid extra work.
1247 # before discovery to avoid extra work.
1247 _maybeapplyclonebundle(pullop)
1248 _maybeapplyclonebundle(pullop)
1248 _pulldiscovery(pullop)
1249 _pulldiscovery(pullop)
1249 if pullop.canusebundle2:
1250 if pullop.canusebundle2:
1250 _pullbundle2(pullop)
1251 _pullbundle2(pullop)
1251 _pullchangeset(pullop)
1252 _pullchangeset(pullop)
1252 _pullphase(pullop)
1253 _pullphase(pullop)
1253 _pullbookmarks(pullop)
1254 _pullbookmarks(pullop)
1254 _pullobsolete(pullop)
1255 _pullobsolete(pullop)
1255 pullop.trmanager.close()
1256 pullop.trmanager.close()
1256 finally:
1257 finally:
1257 lockmod.release(pullop.trmanager, lock, wlock)
1258 lockmod.release(pullop.trmanager, lock, wlock)
1258
1259
1259 return pullop
1260 return pullop
1260
1261
1261 # list of steps to perform discovery before pull
1262 # list of steps to perform discovery before pull
1262 pulldiscoveryorder = []
1263 pulldiscoveryorder = []
1263
1264
1264 # Mapping between step name and function
1265 # Mapping between step name and function
1265 #
1266 #
1266 # This exists to help extensions wrap steps if necessary
1267 # This exists to help extensions wrap steps if necessary
1267 pulldiscoverymapping = {}
1268 pulldiscoverymapping = {}
1268
1269
1269 def pulldiscovery(stepname):
1270 def pulldiscovery(stepname):
1270 """decorator for function performing discovery before pull
1271 """decorator for function performing discovery before pull
1271
1272
1272 The function is added to the step -> function mapping and appended to the
1273 The function is added to the step -> function mapping and appended to the
1273 list of steps. Beware that decorated function will be added in order (this
1274 list of steps. Beware that decorated function will be added in order (this
1274 may matter).
1275 may matter).
1275
1276
1276 You can only use this decorator for a new step, if you want to wrap a step
1277 You can only use this decorator for a new step, if you want to wrap a step
1277 from an extension, change the pulldiscovery dictionary directly."""
1278 from an extension, change the pulldiscovery dictionary directly."""
1278 def dec(func):
1279 def dec(func):
1279 assert stepname not in pulldiscoverymapping
1280 assert stepname not in pulldiscoverymapping
1280 pulldiscoverymapping[stepname] = func
1281 pulldiscoverymapping[stepname] = func
1281 pulldiscoveryorder.append(stepname)
1282 pulldiscoveryorder.append(stepname)
1282 return func
1283 return func
1283 return dec
1284 return dec
1284
1285
1285 def _pulldiscovery(pullop):
1286 def _pulldiscovery(pullop):
1286 """Run all discovery steps"""
1287 """Run all discovery steps"""
1287 for stepname in pulldiscoveryorder:
1288 for stepname in pulldiscoveryorder:
1288 step = pulldiscoverymapping[stepname]
1289 step = pulldiscoverymapping[stepname]
1289 step(pullop)
1290 step(pullop)
1290
1291
1291 @pulldiscovery('b1:bookmarks')
1292 @pulldiscovery('b1:bookmarks')
1292 def _pullbookmarkbundle1(pullop):
1293 def _pullbookmarkbundle1(pullop):
1293 """fetch bookmark data in bundle1 case
1294 """fetch bookmark data in bundle1 case
1294
1295
1295 If not using bundle2, we have to fetch bookmarks before changeset
1296 If not using bundle2, we have to fetch bookmarks before changeset
1296 discovery to reduce the chance and impact of race conditions."""
1297 discovery to reduce the chance and impact of race conditions."""
1297 if pullop.remotebookmarks is not None:
1298 if pullop.remotebookmarks is not None:
1298 return
1299 return
1299 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1300 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1300 # all known bundle2 servers now support listkeys, but lets be nice with
1301 # all known bundle2 servers now support listkeys, but lets be nice with
1301 # new implementation.
1302 # new implementation.
1302 return
1303 return
1303 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1304 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1304
1305
1305
1306
1306 @pulldiscovery('changegroup')
1307 @pulldiscovery('changegroup')
1307 def _pulldiscoverychangegroup(pullop):
1308 def _pulldiscoverychangegroup(pullop):
1308 """discovery phase for the pull
1309 """discovery phase for the pull
1309
1310
1310 Current handle changeset discovery only, will change handle all discovery
1311 Current handle changeset discovery only, will change handle all discovery
1311 at some point."""
1312 at some point."""
1312 tmp = discovery.findcommonincoming(pullop.repo,
1313 tmp = discovery.findcommonincoming(pullop.repo,
1313 pullop.remote,
1314 pullop.remote,
1314 heads=pullop.heads,
1315 heads=pullop.heads,
1315 force=pullop.force)
1316 force=pullop.force)
1316 common, fetch, rheads = tmp
1317 common, fetch, rheads = tmp
1317 nm = pullop.repo.unfiltered().changelog.nodemap
1318 nm = pullop.repo.unfiltered().changelog.nodemap
1318 if fetch and rheads:
1319 if fetch and rheads:
1319 # If a remote heads in filtered locally, lets drop it from the unknown
1320 # If a remote heads in filtered locally, lets drop it from the unknown
1320 # remote heads and put in back in common.
1321 # remote heads and put in back in common.
1321 #
1322 #
1322 # This is a hackish solution to catch most of "common but locally
1323 # This is a hackish solution to catch most of "common but locally
1323 # hidden situation". We do not performs discovery on unfiltered
1324 # hidden situation". We do not performs discovery on unfiltered
1324 # repository because it end up doing a pathological amount of round
1325 # repository because it end up doing a pathological amount of round
1325 # trip for w huge amount of changeset we do not care about.
1326 # trip for w huge amount of changeset we do not care about.
1326 #
1327 #
1327 # If a set of such "common but filtered" changeset exist on the server
1328 # If a set of such "common but filtered" changeset exist on the server
1328 # but are not including a remote heads, we'll not be able to detect it,
1329 # but are not including a remote heads, we'll not be able to detect it,
1329 scommon = set(common)
1330 scommon = set(common)
1330 filteredrheads = []
1331 filteredrheads = []
1331 for n in rheads:
1332 for n in rheads:
1332 if n in nm:
1333 if n in nm:
1333 if n not in scommon:
1334 if n not in scommon:
1334 common.append(n)
1335 common.append(n)
1335 else:
1336 else:
1336 filteredrheads.append(n)
1337 filteredrheads.append(n)
1337 if not filteredrheads:
1338 if not filteredrheads:
1338 fetch = []
1339 fetch = []
1339 rheads = filteredrheads
1340 rheads = filteredrheads
1340 pullop.common = common
1341 pullop.common = common
1341 pullop.fetch = fetch
1342 pullop.fetch = fetch
1342 pullop.rheads = rheads
1343 pullop.rheads = rheads
1343
1344
1344 def _pullbundle2(pullop):
1345 def _pullbundle2(pullop):
1345 """pull data using bundle2
1346 """pull data using bundle2
1346
1347
1347 For now, the only supported data are changegroup."""
1348 For now, the only supported data are changegroup."""
1348 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1349 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1349
1350
1350 # At the moment we don't do stream clones over bundle2. If that is
1351 # At the moment we don't do stream clones over bundle2. If that is
1351 # implemented then here's where the check for that will go.
1352 # implemented then here's where the check for that will go.
1352 streaming = False
1353 streaming = False
1353
1354
1354 # pulling changegroup
1355 # pulling changegroup
1355 pullop.stepsdone.add('changegroup')
1356 pullop.stepsdone.add('changegroup')
1356
1357
1357 kwargs['common'] = pullop.common
1358 kwargs['common'] = pullop.common
1358 kwargs['heads'] = pullop.heads or pullop.rheads
1359 kwargs['heads'] = pullop.heads or pullop.rheads
1359 kwargs['cg'] = pullop.fetch
1360 kwargs['cg'] = pullop.fetch
1360 if 'listkeys' in pullop.remotebundle2caps:
1361 if 'listkeys' in pullop.remotebundle2caps:
1361 kwargs['listkeys'] = ['phases']
1362 kwargs['listkeys'] = ['phases']
1362 if pullop.remotebookmarks is None:
1363 if pullop.remotebookmarks is None:
1363 # make sure to always includes bookmark data when migrating
1364 # make sure to always includes bookmark data when migrating
1364 # `hg incoming --bundle` to using this function.
1365 # `hg incoming --bundle` to using this function.
1365 kwargs['listkeys'].append('bookmarks')
1366 kwargs['listkeys'].append('bookmarks')
1366
1367
1367 # If this is a full pull / clone and the server supports the clone bundles
1368 # If this is a full pull / clone and the server supports the clone bundles
1368 # feature, tell the server whether we attempted a clone bundle. The
1369 # feature, tell the server whether we attempted a clone bundle. The
1369 # presence of this flag indicates the client supports clone bundles. This
1370 # presence of this flag indicates the client supports clone bundles. This
1370 # will enable the server to treat clients that support clone bundles
1371 # will enable the server to treat clients that support clone bundles
1371 # differently from those that don't.
1372 # differently from those that don't.
1372 if (pullop.remote.capable('clonebundles')
1373 if (pullop.remote.capable('clonebundles')
1373 and pullop.heads is None and list(pullop.common) == [nullid]):
1374 and pullop.heads is None and list(pullop.common) == [nullid]):
1374 kwargs['cbattempted'] = pullop.clonebundleattempted
1375 kwargs['cbattempted'] = pullop.clonebundleattempted
1375
1376
1376 if streaming:
1377 if streaming:
1377 pullop.repo.ui.status(_('streaming all changes\n'))
1378 pullop.repo.ui.status(_('streaming all changes\n'))
1378 elif not pullop.fetch:
1379 elif not pullop.fetch:
1379 pullop.repo.ui.status(_("no changes found\n"))
1380 pullop.repo.ui.status(_("no changes found\n"))
1380 pullop.cgresult = 0
1381 pullop.cgresult = 0
1381 else:
1382 else:
1382 if pullop.heads is None and list(pullop.common) == [nullid]:
1383 if pullop.heads is None and list(pullop.common) == [nullid]:
1383 pullop.repo.ui.status(_("requesting all changes\n"))
1384 pullop.repo.ui.status(_("requesting all changes\n"))
1384 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1385 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1385 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1386 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1386 if obsolete.commonversion(remoteversions) is not None:
1387 if obsolete.commonversion(remoteversions) is not None:
1387 kwargs['obsmarkers'] = True
1388 kwargs['obsmarkers'] = True
1388 pullop.stepsdone.add('obsmarkers')
1389 pullop.stepsdone.add('obsmarkers')
1389 _pullbundle2extraprepare(pullop, kwargs)
1390 _pullbundle2extraprepare(pullop, kwargs)
1390 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1391 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1391 try:
1392 try:
1392 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1393 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1393 except bundle2.AbortFromPart as exc:
1394 except bundle2.AbortFromPart as exc:
1394 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1395 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1395 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1396 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1396 except error.BundleValueError as exc:
1397 except error.BundleValueError as exc:
1397 raise error.Abort(_('missing support for %s') % exc)
1398 raise error.Abort(_('missing support for %s') % exc)
1398
1399
1399 if pullop.fetch:
1400 if pullop.fetch:
1400 pullop.cgresult = bundle2.combinechangegroupresults(op)
1401 pullop.cgresult = bundle2.combinechangegroupresults(op)
1401
1402
1402 # processing phases change
1403 # processing phases change
1403 for namespace, value in op.records['listkeys']:
1404 for namespace, value in op.records['listkeys']:
1404 if namespace == 'phases':
1405 if namespace == 'phases':
1405 _pullapplyphases(pullop, value)
1406 _pullapplyphases(pullop, value)
1406
1407
1407 # processing bookmark update
1408 # processing bookmark update
1408 for namespace, value in op.records['listkeys']:
1409 for namespace, value in op.records['listkeys']:
1409 if namespace == 'bookmarks':
1410 if namespace == 'bookmarks':
1410 pullop.remotebookmarks = value
1411 pullop.remotebookmarks = value
1411
1412
1412 # bookmark data were either already there or pulled in the bundle
1413 # bookmark data were either already there or pulled in the bundle
1413 if pullop.remotebookmarks is not None:
1414 if pullop.remotebookmarks is not None:
1414 _pullbookmarks(pullop)
1415 _pullbookmarks(pullop)
1415
1416
1416 def _pullbundle2extraprepare(pullop, kwargs):
1417 def _pullbundle2extraprepare(pullop, kwargs):
1417 """hook function so that extensions can extend the getbundle call"""
1418 """hook function so that extensions can extend the getbundle call"""
1418 pass
1419 pass
1419
1420
1420 def _pullchangeset(pullop):
1421 def _pullchangeset(pullop):
1421 """pull changeset from unbundle into the local repo"""
1422 """pull changeset from unbundle into the local repo"""
1422 # We delay the open of the transaction as late as possible so we
1423 # We delay the open of the transaction as late as possible so we
1423 # don't open transaction for nothing or you break future useful
1424 # don't open transaction for nothing or you break future useful
1424 # rollback call
1425 # rollback call
1425 if 'changegroup' in pullop.stepsdone:
1426 if 'changegroup' in pullop.stepsdone:
1426 return
1427 return
1427 pullop.stepsdone.add('changegroup')
1428 pullop.stepsdone.add('changegroup')
1428 if not pullop.fetch:
1429 if not pullop.fetch:
1429 pullop.repo.ui.status(_("no changes found\n"))
1430 pullop.repo.ui.status(_("no changes found\n"))
1430 pullop.cgresult = 0
1431 pullop.cgresult = 0
1431 return
1432 return
1432 tr = pullop.gettransaction()
1433 tr = pullop.gettransaction()
1433 if pullop.heads is None and list(pullop.common) == [nullid]:
1434 if pullop.heads is None and list(pullop.common) == [nullid]:
1434 pullop.repo.ui.status(_("requesting all changes\n"))
1435 pullop.repo.ui.status(_("requesting all changes\n"))
1435 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1436 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1436 # issue1320, avoid a race if remote changed after discovery
1437 # issue1320, avoid a race if remote changed after discovery
1437 pullop.heads = pullop.rheads
1438 pullop.heads = pullop.rheads
1438
1439
1439 if pullop.remote.capable('getbundle'):
1440 if pullop.remote.capable('getbundle'):
1440 # TODO: get bundlecaps from remote
1441 # TODO: get bundlecaps from remote
1441 cg = pullop.remote.getbundle('pull', common=pullop.common,
1442 cg = pullop.remote.getbundle('pull', common=pullop.common,
1442 heads=pullop.heads or pullop.rheads)
1443 heads=pullop.heads or pullop.rheads)
1443 elif pullop.heads is None:
1444 elif pullop.heads is None:
1444 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1445 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1445 elif not pullop.remote.capable('changegroupsubset'):
1446 elif not pullop.remote.capable('changegroupsubset'):
1446 raise error.Abort(_("partial pull cannot be done because "
1447 raise error.Abort(_("partial pull cannot be done because "
1447 "other repository doesn't support "
1448 "other repository doesn't support "
1448 "changegroupsubset."))
1449 "changegroupsubset."))
1449 else:
1450 else:
1450 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1451 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1451 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1452 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1452 pullop.remote.url())
1453 pullop.remote.url())
1453 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1454 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1454
1455
1455 def _pullphase(pullop):
1456 def _pullphase(pullop):
1456 # Get remote phases data from remote
1457 # Get remote phases data from remote
1457 if 'phases' in pullop.stepsdone:
1458 if 'phases' in pullop.stepsdone:
1458 return
1459 return
1459 remotephases = pullop.remote.listkeys('phases')
1460 remotephases = pullop.remote.listkeys('phases')
1460 _pullapplyphases(pullop, remotephases)
1461 _pullapplyphases(pullop, remotephases)
1461
1462
1462 def _pullapplyphases(pullop, remotephases):
1463 def _pullapplyphases(pullop, remotephases):
1463 """apply phase movement from observed remote state"""
1464 """apply phase movement from observed remote state"""
1464 if 'phases' in pullop.stepsdone:
1465 if 'phases' in pullop.stepsdone:
1465 return
1466 return
1466 pullop.stepsdone.add('phases')
1467 pullop.stepsdone.add('phases')
1467 publishing = bool(remotephases.get('publishing', False))
1468 publishing = bool(remotephases.get('publishing', False))
1468 if remotephases and not publishing:
1469 if remotephases and not publishing:
1469 # remote is new and non-publishing
1470 # remote is new and non-publishing
1470 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1471 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1471 pullop.pulledsubset,
1472 pullop.pulledsubset,
1472 remotephases)
1473 remotephases)
1473 dheads = pullop.pulledsubset
1474 dheads = pullop.pulledsubset
1474 else:
1475 else:
1475 # Remote is old or publishing all common changesets
1476 # Remote is old or publishing all common changesets
1476 # should be seen as public
1477 # should be seen as public
1477 pheads = pullop.pulledsubset
1478 pheads = pullop.pulledsubset
1478 dheads = []
1479 dheads = []
1479 unfi = pullop.repo.unfiltered()
1480 unfi = pullop.repo.unfiltered()
1480 phase = unfi._phasecache.phase
1481 phase = unfi._phasecache.phase
1481 rev = unfi.changelog.nodemap.get
1482 rev = unfi.changelog.nodemap.get
1482 public = phases.public
1483 public = phases.public
1483 draft = phases.draft
1484 draft = phases.draft
1484
1485
1485 # exclude changesets already public locally and update the others
1486 # exclude changesets already public locally and update the others
1486 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1487 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1487 if pheads:
1488 if pheads:
1488 tr = pullop.gettransaction()
1489 tr = pullop.gettransaction()
1489 phases.advanceboundary(pullop.repo, tr, public, pheads)
1490 phases.advanceboundary(pullop.repo, tr, public, pheads)
1490
1491
1491 # exclude changesets already draft locally and update the others
1492 # exclude changesets already draft locally and update the others
1492 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1493 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1493 if dheads:
1494 if dheads:
1494 tr = pullop.gettransaction()
1495 tr = pullop.gettransaction()
1495 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1496 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1496
1497
1497 def _pullbookmarks(pullop):
1498 def _pullbookmarks(pullop):
1498 """process the remote bookmark information to update the local one"""
1499 """process the remote bookmark information to update the local one"""
1499 if 'bookmarks' in pullop.stepsdone:
1500 if 'bookmarks' in pullop.stepsdone:
1500 return
1501 return
1501 pullop.stepsdone.add('bookmarks')
1502 pullop.stepsdone.add('bookmarks')
1502 repo = pullop.repo
1503 repo = pullop.repo
1503 remotebookmarks = pullop.remotebookmarks
1504 remotebookmarks = pullop.remotebookmarks
1504 remotebookmarks = bookmod.unhexlifybookmarks(remotebookmarks)
1505 remotebookmarks = bookmod.unhexlifybookmarks(remotebookmarks)
1505 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1506 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1506 pullop.remote.url(),
1507 pullop.remote.url(),
1507 pullop.gettransaction,
1508 pullop.gettransaction,
1508 explicit=pullop.explicitbookmarks)
1509 explicit=pullop.explicitbookmarks)
1509
1510
1510 def _pullobsolete(pullop):
1511 def _pullobsolete(pullop):
1511 """utility function to pull obsolete markers from a remote
1512 """utility function to pull obsolete markers from a remote
1512
1513
1513 The `gettransaction` is function that return the pull transaction, creating
1514 The `gettransaction` is function that return the pull transaction, creating
1514 one if necessary. We return the transaction to inform the calling code that
1515 one if necessary. We return the transaction to inform the calling code that
1515 a new transaction have been created (when applicable).
1516 a new transaction have been created (when applicable).
1516
1517
1517 Exists mostly to allow overriding for experimentation purpose"""
1518 Exists mostly to allow overriding for experimentation purpose"""
1518 if 'obsmarkers' in pullop.stepsdone:
1519 if 'obsmarkers' in pullop.stepsdone:
1519 return
1520 return
1520 pullop.stepsdone.add('obsmarkers')
1521 pullop.stepsdone.add('obsmarkers')
1521 tr = None
1522 tr = None
1522 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1523 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1523 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1524 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1524 remoteobs = pullop.remote.listkeys('obsolete')
1525 remoteobs = pullop.remote.listkeys('obsolete')
1525 if 'dump0' in remoteobs:
1526 if 'dump0' in remoteobs:
1526 tr = pullop.gettransaction()
1527 tr = pullop.gettransaction()
1527 markers = []
1528 markers = []
1528 for key in sorted(remoteobs, reverse=True):
1529 for key in sorted(remoteobs, reverse=True):
1529 if key.startswith('dump'):
1530 if key.startswith('dump'):
1530 data = util.b85decode(remoteobs[key])
1531 data = util.b85decode(remoteobs[key])
1531 version, newmarks = obsolete._readmarkers(data)
1532 version, newmarks = obsolete._readmarkers(data)
1532 markers += newmarks
1533 markers += newmarks
1533 if markers:
1534 if markers:
1534 pullop.repo.obsstore.add(tr, markers)
1535 pullop.repo.obsstore.add(tr, markers)
1535 pullop.repo.invalidatevolatilesets()
1536 pullop.repo.invalidatevolatilesets()
1536 return tr
1537 return tr
1537
1538
1538 def caps20to10(repo):
1539 def caps20to10(repo):
1539 """return a set with appropriate options to use bundle20 during getbundle"""
1540 """return a set with appropriate options to use bundle20 during getbundle"""
1540 caps = {'HG20'}
1541 caps = {'HG20'}
1541 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1542 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1542 caps.add('bundle2=' + urlreq.quote(capsblob))
1543 caps.add('bundle2=' + urlreq.quote(capsblob))
1543 return caps
1544 return caps
1544
1545
1545 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1546 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1546 getbundle2partsorder = []
1547 getbundle2partsorder = []
1547
1548
1548 # Mapping between step name and function
1549 # Mapping between step name and function
1549 #
1550 #
1550 # This exists to help extensions wrap steps if necessary
1551 # This exists to help extensions wrap steps if necessary
1551 getbundle2partsmapping = {}
1552 getbundle2partsmapping = {}
1552
1553
1553 def getbundle2partsgenerator(stepname, idx=None):
1554 def getbundle2partsgenerator(stepname, idx=None):
1554 """decorator for function generating bundle2 part for getbundle
1555 """decorator for function generating bundle2 part for getbundle
1555
1556
1556 The function is added to the step -> function mapping and appended to the
1557 The function is added to the step -> function mapping and appended to the
1557 list of steps. Beware that decorated functions will be added in order
1558 list of steps. Beware that decorated functions will be added in order
1558 (this may matter).
1559 (this may matter).
1559
1560
1560 You can only use this decorator for new steps, if you want to wrap a step
1561 You can only use this decorator for new steps, if you want to wrap a step
1561 from an extension, attack the getbundle2partsmapping dictionary directly."""
1562 from an extension, attack the getbundle2partsmapping dictionary directly."""
1562 def dec(func):
1563 def dec(func):
1563 assert stepname not in getbundle2partsmapping
1564 assert stepname not in getbundle2partsmapping
1564 getbundle2partsmapping[stepname] = func
1565 getbundle2partsmapping[stepname] = func
1565 if idx is None:
1566 if idx is None:
1566 getbundle2partsorder.append(stepname)
1567 getbundle2partsorder.append(stepname)
1567 else:
1568 else:
1568 getbundle2partsorder.insert(idx, stepname)
1569 getbundle2partsorder.insert(idx, stepname)
1569 return func
1570 return func
1570 return dec
1571 return dec
1571
1572
1572 def bundle2requested(bundlecaps):
1573 def bundle2requested(bundlecaps):
1573 if bundlecaps is not None:
1574 if bundlecaps is not None:
1574 return any(cap.startswith('HG2') for cap in bundlecaps)
1575 return any(cap.startswith('HG2') for cap in bundlecaps)
1575 return False
1576 return False
1576
1577
1577 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1578 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1578 **kwargs):
1579 **kwargs):
1579 """Return chunks constituting a bundle's raw data.
1580 """Return chunks constituting a bundle's raw data.
1580
1581
1581 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1582 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1582 passed.
1583 passed.
1583
1584
1584 Returns an iterator over raw chunks (of varying sizes).
1585 Returns an iterator over raw chunks (of varying sizes).
1585 """
1586 """
1586 kwargs = pycompat.byteskwargs(kwargs)
1587 kwargs = pycompat.byteskwargs(kwargs)
1587 usebundle2 = bundle2requested(bundlecaps)
1588 usebundle2 = bundle2requested(bundlecaps)
1588 # bundle10 case
1589 # bundle10 case
1589 if not usebundle2:
1590 if not usebundle2:
1590 if bundlecaps and not kwargs.get('cg', True):
1591 if bundlecaps and not kwargs.get('cg', True):
1591 raise ValueError(_('request for bundle10 must include changegroup'))
1592 raise ValueError(_('request for bundle10 must include changegroup'))
1592
1593
1593 if kwargs:
1594 if kwargs:
1594 raise ValueError(_('unsupported getbundle arguments: %s')
1595 raise ValueError(_('unsupported getbundle arguments: %s')
1595 % ', '.join(sorted(kwargs.keys())))
1596 % ', '.join(sorted(kwargs.keys())))
1596 outgoing = _computeoutgoing(repo, heads, common)
1597 outgoing = _computeoutgoing(repo, heads, common)
1597 bundler = changegroup.getbundler('01', repo, bundlecaps)
1598 bundler = changegroup.getbundler('01', repo, bundlecaps)
1598 return changegroup.getsubsetraw(repo, outgoing, bundler, source)
1599 return changegroup.getsubsetraw(repo, outgoing, bundler, source)
1599
1600
1600 # bundle20 case
1601 # bundle20 case
1601 b2caps = {}
1602 b2caps = {}
1602 for bcaps in bundlecaps:
1603 for bcaps in bundlecaps:
1603 if bcaps.startswith('bundle2='):
1604 if bcaps.startswith('bundle2='):
1604 blob = urlreq.unquote(bcaps[len('bundle2='):])
1605 blob = urlreq.unquote(bcaps[len('bundle2='):])
1605 b2caps.update(bundle2.decodecaps(blob))
1606 b2caps.update(bundle2.decodecaps(blob))
1606 bundler = bundle2.bundle20(repo.ui, b2caps)
1607 bundler = bundle2.bundle20(repo.ui, b2caps)
1607
1608
1608 kwargs['heads'] = heads
1609 kwargs['heads'] = heads
1609 kwargs['common'] = common
1610 kwargs['common'] = common
1610
1611
1611 for name in getbundle2partsorder:
1612 for name in getbundle2partsorder:
1612 func = getbundle2partsmapping[name]
1613 func = getbundle2partsmapping[name]
1613 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1614 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1614 **pycompat.strkwargs(kwargs))
1615 **pycompat.strkwargs(kwargs))
1615
1616
1616 return bundler.getchunks()
1617 return bundler.getchunks()
1617
1618
1618 @getbundle2partsgenerator('changegroup')
1619 @getbundle2partsgenerator('changegroup')
1619 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1620 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1620 b2caps=None, heads=None, common=None, **kwargs):
1621 b2caps=None, heads=None, common=None, **kwargs):
1621 """add a changegroup part to the requested bundle"""
1622 """add a changegroup part to the requested bundle"""
1622 cg = None
1623 cg = None
1623 if kwargs.get('cg', True):
1624 if kwargs.get('cg', True):
1624 # build changegroup bundle here.
1625 # build changegroup bundle here.
1625 version = '01'
1626 version = '01'
1626 cgversions = b2caps.get('changegroup')
1627 cgversions = b2caps.get('changegroup')
1627 if cgversions: # 3.1 and 3.2 ship with an empty value
1628 if cgversions: # 3.1 and 3.2 ship with an empty value
1628 cgversions = [v for v in cgversions
1629 cgversions = [v for v in cgversions
1629 if v in changegroup.supportedoutgoingversions(repo)]
1630 if v in changegroup.supportedoutgoingversions(repo)]
1630 if not cgversions:
1631 if not cgversions:
1631 raise ValueError(_('no common changegroup version'))
1632 raise ValueError(_('no common changegroup version'))
1632 version = max(cgversions)
1633 version = max(cgversions)
1633 outgoing = _computeoutgoing(repo, heads, common)
1634 outgoing = _computeoutgoing(repo, heads, common)
1634 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1635 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1635 bundlecaps=bundlecaps,
1636 bundlecaps=bundlecaps,
1636 version=version)
1637 version=version)
1637
1638
1638 if cg:
1639 if cg:
1639 part = bundler.newpart('changegroup', data=cg)
1640 part = bundler.newpart('changegroup', data=cg)
1640 if cgversions:
1641 if cgversions:
1641 part.addparam('version', version)
1642 part.addparam('version', version)
1642 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1643 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1643 if 'treemanifest' in repo.requirements:
1644 if 'treemanifest' in repo.requirements:
1644 part.addparam('treemanifest', '1')
1645 part.addparam('treemanifest', '1')
1645
1646
1646 @getbundle2partsgenerator('listkeys')
1647 @getbundle2partsgenerator('listkeys')
1647 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1648 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1648 b2caps=None, **kwargs):
1649 b2caps=None, **kwargs):
1649 """add parts containing listkeys namespaces to the requested bundle"""
1650 """add parts containing listkeys namespaces to the requested bundle"""
1650 listkeys = kwargs.get('listkeys', ())
1651 listkeys = kwargs.get('listkeys', ())
1651 for namespace in listkeys:
1652 for namespace in listkeys:
1652 part = bundler.newpart('listkeys')
1653 part = bundler.newpart('listkeys')
1653 part.addparam('namespace', namespace)
1654 part.addparam('namespace', namespace)
1654 keys = repo.listkeys(namespace).items()
1655 keys = repo.listkeys(namespace).items()
1655 part.data = pushkey.encodekeys(keys)
1656 part.data = pushkey.encodekeys(keys)
1656
1657
1657 @getbundle2partsgenerator('obsmarkers')
1658 @getbundle2partsgenerator('obsmarkers')
1658 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1659 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1659 b2caps=None, heads=None, **kwargs):
1660 b2caps=None, heads=None, **kwargs):
1660 """add an obsolescence markers part to the requested bundle"""
1661 """add an obsolescence markers part to the requested bundle"""
1661 if kwargs.get('obsmarkers', False):
1662 if kwargs.get('obsmarkers', False):
1662 if heads is None:
1663 if heads is None:
1663 heads = repo.heads()
1664 heads = repo.heads()
1664 subset = [c.node() for c in repo.set('::%ln', heads)]
1665 subset = [c.node() for c in repo.set('::%ln', heads)]
1665 markers = repo.obsstore.relevantmarkers(subset)
1666 markers = repo.obsstore.relevantmarkers(subset)
1666 markers = sorted(markers)
1667 markers = sorted(markers)
1667 bundle2.buildobsmarkerspart(bundler, markers)
1668 bundle2.buildobsmarkerspart(bundler, markers)
1668
1669
1669 @getbundle2partsgenerator('hgtagsfnodes')
1670 @getbundle2partsgenerator('hgtagsfnodes')
1670 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1671 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1671 b2caps=None, heads=None, common=None,
1672 b2caps=None, heads=None, common=None,
1672 **kwargs):
1673 **kwargs):
1673 """Transfer the .hgtags filenodes mapping.
1674 """Transfer the .hgtags filenodes mapping.
1674
1675
1675 Only values for heads in this bundle will be transferred.
1676 Only values for heads in this bundle will be transferred.
1676
1677
1677 The part data consists of pairs of 20 byte changeset node and .hgtags
1678 The part data consists of pairs of 20 byte changeset node and .hgtags
1678 filenodes raw values.
1679 filenodes raw values.
1679 """
1680 """
1680 # Don't send unless:
1681 # Don't send unless:
1681 # - changeset are being exchanged,
1682 # - changeset are being exchanged,
1682 # - the client supports it.
1683 # - the client supports it.
1683 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1684 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1684 return
1685 return
1685
1686
1686 outgoing = _computeoutgoing(repo, heads, common)
1687 outgoing = _computeoutgoing(repo, heads, common)
1687 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1688 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1688
1689
1689 def _getbookmarks(repo, **kwargs):
1690 def _getbookmarks(repo, **kwargs):
1690 """Returns bookmark to node mapping.
1691 """Returns bookmark to node mapping.
1691
1692
1692 This function is primarily used to generate `bookmarks` bundle2 part.
1693 This function is primarily used to generate `bookmarks` bundle2 part.
1693 It is a separate function in order to make it easy to wrap it
1694 It is a separate function in order to make it easy to wrap it
1694 in extensions. Passing `kwargs` to the function makes it easy to
1695 in extensions. Passing `kwargs` to the function makes it easy to
1695 add new parameters in extensions.
1696 add new parameters in extensions.
1696 """
1697 """
1697
1698
1698 return dict(bookmod.listbinbookmarks(repo))
1699 return dict(bookmod.listbinbookmarks(repo))
1699
1700
1700 def check_heads(repo, their_heads, context):
1701 def check_heads(repo, their_heads, context):
1701 """check if the heads of a repo have been modified
1702 """check if the heads of a repo have been modified
1702
1703
1703 Used by peer for unbundling.
1704 Used by peer for unbundling.
1704 """
1705 """
1705 heads = repo.heads()
1706 heads = repo.heads()
1706 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1707 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1707 if not (their_heads == ['force'] or their_heads == heads or
1708 if not (their_heads == ['force'] or their_heads == heads or
1708 their_heads == ['hashed', heads_hash]):
1709 their_heads == ['hashed', heads_hash]):
1709 # someone else committed/pushed/unbundled while we
1710 # someone else committed/pushed/unbundled while we
1710 # were transferring data
1711 # were transferring data
1711 raise error.PushRaced('repository changed while %s - '
1712 raise error.PushRaced('repository changed while %s - '
1712 'please try again' % context)
1713 'please try again' % context)
1713
1714
1714 def unbundle(repo, cg, heads, source, url):
1715 def unbundle(repo, cg, heads, source, url):
1715 """Apply a bundle to a repo.
1716 """Apply a bundle to a repo.
1716
1717
1717 this function makes sure the repo is locked during the application and have
1718 this function makes sure the repo is locked during the application and have
1718 mechanism to check that no push race occurred between the creation of the
1719 mechanism to check that no push race occurred between the creation of the
1719 bundle and its application.
1720 bundle and its application.
1720
1721
1721 If the push was raced as PushRaced exception is raised."""
1722 If the push was raced as PushRaced exception is raised."""
1722 r = 0
1723 r = 0
1723 # need a transaction when processing a bundle2 stream
1724 # need a transaction when processing a bundle2 stream
1724 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1725 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1725 lockandtr = [None, None, None]
1726 lockandtr = [None, None, None]
1726 recordout = None
1727 recordout = None
1727 # quick fix for output mismatch with bundle2 in 3.4
1728 # quick fix for output mismatch with bundle2 in 3.4
1728 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1729 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1729 False)
1730 False)
1730 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1731 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1731 captureoutput = True
1732 captureoutput = True
1732 try:
1733 try:
1733 # note: outside bundle1, 'heads' is expected to be empty and this
1734 # note: outside bundle1, 'heads' is expected to be empty and this
1734 # 'check_heads' call wil be a no-op
1735 # 'check_heads' call wil be a no-op
1735 check_heads(repo, heads, 'uploading changes')
1736 check_heads(repo, heads, 'uploading changes')
1736 # push can proceed
1737 # push can proceed
1737 if not isinstance(cg, bundle2.unbundle20):
1738 if not isinstance(cg, bundle2.unbundle20):
1738 # legacy case: bundle1 (changegroup 01)
1739 # legacy case: bundle1 (changegroup 01)
1739 txnname = "\n".join([source, util.hidepassword(url)])
1740 txnname = "\n".join([source, util.hidepassword(url)])
1740 with repo.lock(), repo.transaction(txnname) as tr:
1741 with repo.lock(), repo.transaction(txnname) as tr:
1741 op = bundle2.applybundle(repo, cg, tr, source, url)
1742 op = bundle2.applybundle(repo, cg, tr, source, url)
1742 r = bundle2.combinechangegroupresults(op)
1743 r = bundle2.combinechangegroupresults(op)
1743 else:
1744 else:
1744 r = None
1745 r = None
1745 try:
1746 try:
1746 def gettransaction():
1747 def gettransaction():
1747 if not lockandtr[2]:
1748 if not lockandtr[2]:
1748 lockandtr[0] = repo.wlock()
1749 lockandtr[0] = repo.wlock()
1749 lockandtr[1] = repo.lock()
1750 lockandtr[1] = repo.lock()
1750 lockandtr[2] = repo.transaction(source)
1751 lockandtr[2] = repo.transaction(source)
1751 lockandtr[2].hookargs['source'] = source
1752 lockandtr[2].hookargs['source'] = source
1752 lockandtr[2].hookargs['url'] = url
1753 lockandtr[2].hookargs['url'] = url
1753 lockandtr[2].hookargs['bundle2'] = '1'
1754 lockandtr[2].hookargs['bundle2'] = '1'
1754 return lockandtr[2]
1755 return lockandtr[2]
1755
1756
1756 # Do greedy locking by default until we're satisfied with lazy
1757 # Do greedy locking by default until we're satisfied with lazy
1757 # locking.
1758 # locking.
1758 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1759 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1759 gettransaction()
1760 gettransaction()
1760
1761
1761 op = bundle2.bundleoperation(repo, gettransaction,
1762 op = bundle2.bundleoperation(repo, gettransaction,
1762 captureoutput=captureoutput)
1763 captureoutput=captureoutput)
1763 try:
1764 try:
1764 op = bundle2.processbundle(repo, cg, op=op)
1765 op = bundle2.processbundle(repo, cg, op=op)
1765 finally:
1766 finally:
1766 r = op.reply
1767 r = op.reply
1767 if captureoutput and r is not None:
1768 if captureoutput and r is not None:
1768 repo.ui.pushbuffer(error=True, subproc=True)
1769 repo.ui.pushbuffer(error=True, subproc=True)
1769 def recordout(output):
1770 def recordout(output):
1770 r.newpart('output', data=output, mandatory=False)
1771 r.newpart('output', data=output, mandatory=False)
1771 if lockandtr[2] is not None:
1772 if lockandtr[2] is not None:
1772 lockandtr[2].close()
1773 lockandtr[2].close()
1773 except BaseException as exc:
1774 except BaseException as exc:
1774 exc.duringunbundle2 = True
1775 exc.duringunbundle2 = True
1775 if captureoutput and r is not None:
1776 if captureoutput and r is not None:
1776 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1777 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1777 def recordout(output):
1778 def recordout(output):
1778 part = bundle2.bundlepart('output', data=output,
1779 part = bundle2.bundlepart('output', data=output,
1779 mandatory=False)
1780 mandatory=False)
1780 parts.append(part)
1781 parts.append(part)
1781 raise
1782 raise
1782 finally:
1783 finally:
1783 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1784 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1784 if recordout is not None:
1785 if recordout is not None:
1785 recordout(repo.ui.popbuffer())
1786 recordout(repo.ui.popbuffer())
1786 return r
1787 return r
1787
1788
1788 def _maybeapplyclonebundle(pullop):
1789 def _maybeapplyclonebundle(pullop):
1789 """Apply a clone bundle from a remote, if possible."""
1790 """Apply a clone bundle from a remote, if possible."""
1790
1791
1791 repo = pullop.repo
1792 repo = pullop.repo
1792 remote = pullop.remote
1793 remote = pullop.remote
1793
1794
1794 if not repo.ui.configbool('ui', 'clonebundles', True):
1795 if not repo.ui.configbool('ui', 'clonebundles', True):
1795 return
1796 return
1796
1797
1797 # Only run if local repo is empty.
1798 # Only run if local repo is empty.
1798 if len(repo):
1799 if len(repo):
1799 return
1800 return
1800
1801
1801 if pullop.heads:
1802 if pullop.heads:
1802 return
1803 return
1803
1804
1804 if not remote.capable('clonebundles'):
1805 if not remote.capable('clonebundles'):
1805 return
1806 return
1806
1807
1807 res = remote._call('clonebundles')
1808 res = remote._call('clonebundles')
1808
1809
1809 # If we call the wire protocol command, that's good enough to record the
1810 # If we call the wire protocol command, that's good enough to record the
1810 # attempt.
1811 # attempt.
1811 pullop.clonebundleattempted = True
1812 pullop.clonebundleattempted = True
1812
1813
1813 entries = parseclonebundlesmanifest(repo, res)
1814 entries = parseclonebundlesmanifest(repo, res)
1814 if not entries:
1815 if not entries:
1815 repo.ui.note(_('no clone bundles available on remote; '
1816 repo.ui.note(_('no clone bundles available on remote; '
1816 'falling back to regular clone\n'))
1817 'falling back to regular clone\n'))
1817 return
1818 return
1818
1819
1819 entries = filterclonebundleentries(repo, entries)
1820 entries = filterclonebundleentries(repo, entries)
1820 if not entries:
1821 if not entries:
1821 # There is a thundering herd concern here. However, if a server
1822 # There is a thundering herd concern here. However, if a server
1822 # operator doesn't advertise bundles appropriate for its clients,
1823 # operator doesn't advertise bundles appropriate for its clients,
1823 # they deserve what's coming. Furthermore, from a client's
1824 # they deserve what's coming. Furthermore, from a client's
1824 # perspective, no automatic fallback would mean not being able to
1825 # perspective, no automatic fallback would mean not being able to
1825 # clone!
1826 # clone!
1826 repo.ui.warn(_('no compatible clone bundles available on server; '
1827 repo.ui.warn(_('no compatible clone bundles available on server; '
1827 'falling back to regular clone\n'))
1828 'falling back to regular clone\n'))
1828 repo.ui.warn(_('(you may want to report this to the server '
1829 repo.ui.warn(_('(you may want to report this to the server '
1829 'operator)\n'))
1830 'operator)\n'))
1830 return
1831 return
1831
1832
1832 entries = sortclonebundleentries(repo.ui, entries)
1833 entries = sortclonebundleentries(repo.ui, entries)
1833
1834
1834 url = entries[0]['URL']
1835 url = entries[0]['URL']
1835 repo.ui.status(_('applying clone bundle from %s\n') % url)
1836 repo.ui.status(_('applying clone bundle from %s\n') % url)
1836 if trypullbundlefromurl(repo.ui, repo, url):
1837 if trypullbundlefromurl(repo.ui, repo, url):
1837 repo.ui.status(_('finished applying clone bundle\n'))
1838 repo.ui.status(_('finished applying clone bundle\n'))
1838 # Bundle failed.
1839 # Bundle failed.
1839 #
1840 #
1840 # We abort by default to avoid the thundering herd of
1841 # We abort by default to avoid the thundering herd of
1841 # clients flooding a server that was expecting expensive
1842 # clients flooding a server that was expecting expensive
1842 # clone load to be offloaded.
1843 # clone load to be offloaded.
1843 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1844 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1844 repo.ui.warn(_('falling back to normal clone\n'))
1845 repo.ui.warn(_('falling back to normal clone\n'))
1845 else:
1846 else:
1846 raise error.Abort(_('error applying bundle'),
1847 raise error.Abort(_('error applying bundle'),
1847 hint=_('if this error persists, consider contacting '
1848 hint=_('if this error persists, consider contacting '
1848 'the server operator or disable clone '
1849 'the server operator or disable clone '
1849 'bundles via '
1850 'bundles via '
1850 '"--config ui.clonebundles=false"'))
1851 '"--config ui.clonebundles=false"'))
1851
1852
1852 def parseclonebundlesmanifest(repo, s):
1853 def parseclonebundlesmanifest(repo, s):
1853 """Parses the raw text of a clone bundles manifest.
1854 """Parses the raw text of a clone bundles manifest.
1854
1855
1855 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1856 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1856 to the URL and other keys are the attributes for the entry.
1857 to the URL and other keys are the attributes for the entry.
1857 """
1858 """
1858 m = []
1859 m = []
1859 for line in s.splitlines():
1860 for line in s.splitlines():
1860 fields = line.split()
1861 fields = line.split()
1861 if not fields:
1862 if not fields:
1862 continue
1863 continue
1863 attrs = {'URL': fields[0]}
1864 attrs = {'URL': fields[0]}
1864 for rawattr in fields[1:]:
1865 for rawattr in fields[1:]:
1865 key, value = rawattr.split('=', 1)
1866 key, value = rawattr.split('=', 1)
1866 key = urlreq.unquote(key)
1867 key = urlreq.unquote(key)
1867 value = urlreq.unquote(value)
1868 value = urlreq.unquote(value)
1868 attrs[key] = value
1869 attrs[key] = value
1869
1870
1870 # Parse BUNDLESPEC into components. This makes client-side
1871 # Parse BUNDLESPEC into components. This makes client-side
1871 # preferences easier to specify since you can prefer a single
1872 # preferences easier to specify since you can prefer a single
1872 # component of the BUNDLESPEC.
1873 # component of the BUNDLESPEC.
1873 if key == 'BUNDLESPEC':
1874 if key == 'BUNDLESPEC':
1874 try:
1875 try:
1875 comp, version, params = parsebundlespec(repo, value,
1876 comp, version, params = parsebundlespec(repo, value,
1876 externalnames=True)
1877 externalnames=True)
1877 attrs['COMPRESSION'] = comp
1878 attrs['COMPRESSION'] = comp
1878 attrs['VERSION'] = version
1879 attrs['VERSION'] = version
1879 except error.InvalidBundleSpecification:
1880 except error.InvalidBundleSpecification:
1880 pass
1881 pass
1881 except error.UnsupportedBundleSpecification:
1882 except error.UnsupportedBundleSpecification:
1882 pass
1883 pass
1883
1884
1884 m.append(attrs)
1885 m.append(attrs)
1885
1886
1886 return m
1887 return m
1887
1888
1888 def filterclonebundleentries(repo, entries):
1889 def filterclonebundleentries(repo, entries):
1889 """Remove incompatible clone bundle manifest entries.
1890 """Remove incompatible clone bundle manifest entries.
1890
1891
1891 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1892 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1892 and returns a new list consisting of only the entries that this client
1893 and returns a new list consisting of only the entries that this client
1893 should be able to apply.
1894 should be able to apply.
1894
1895
1895 There is no guarantee we'll be able to apply all returned entries because
1896 There is no guarantee we'll be able to apply all returned entries because
1896 the metadata we use to filter on may be missing or wrong.
1897 the metadata we use to filter on may be missing or wrong.
1897 """
1898 """
1898 newentries = []
1899 newentries = []
1899 for entry in entries:
1900 for entry in entries:
1900 spec = entry.get('BUNDLESPEC')
1901 spec = entry.get('BUNDLESPEC')
1901 if spec:
1902 if spec:
1902 try:
1903 try:
1903 parsebundlespec(repo, spec, strict=True)
1904 parsebundlespec(repo, spec, strict=True)
1904 except error.InvalidBundleSpecification as e:
1905 except error.InvalidBundleSpecification as e:
1905 repo.ui.debug(str(e) + '\n')
1906 repo.ui.debug(str(e) + '\n')
1906 continue
1907 continue
1907 except error.UnsupportedBundleSpecification as e:
1908 except error.UnsupportedBundleSpecification as e:
1908 repo.ui.debug('filtering %s because unsupported bundle '
1909 repo.ui.debug('filtering %s because unsupported bundle '
1909 'spec: %s\n' % (entry['URL'], str(e)))
1910 'spec: %s\n' % (entry['URL'], str(e)))
1910 continue
1911 continue
1911
1912
1912 if 'REQUIRESNI' in entry and not sslutil.hassni:
1913 if 'REQUIRESNI' in entry and not sslutil.hassni:
1913 repo.ui.debug('filtering %s because SNI not supported\n' %
1914 repo.ui.debug('filtering %s because SNI not supported\n' %
1914 entry['URL'])
1915 entry['URL'])
1915 continue
1916 continue
1916
1917
1917 newentries.append(entry)
1918 newentries.append(entry)
1918
1919
1919 return newentries
1920 return newentries
1920
1921
1921 class clonebundleentry(object):
1922 class clonebundleentry(object):
1922 """Represents an item in a clone bundles manifest.
1923 """Represents an item in a clone bundles manifest.
1923
1924
1924 This rich class is needed to support sorting since sorted() in Python 3
1925 This rich class is needed to support sorting since sorted() in Python 3
1925 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
1926 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
1926 won't work.
1927 won't work.
1927 """
1928 """
1928
1929
1929 def __init__(self, value, prefers):
1930 def __init__(self, value, prefers):
1930 self.value = value
1931 self.value = value
1931 self.prefers = prefers
1932 self.prefers = prefers
1932
1933
1933 def _cmp(self, other):
1934 def _cmp(self, other):
1934 for prefkey, prefvalue in self.prefers:
1935 for prefkey, prefvalue in self.prefers:
1935 avalue = self.value.get(prefkey)
1936 avalue = self.value.get(prefkey)
1936 bvalue = other.value.get(prefkey)
1937 bvalue = other.value.get(prefkey)
1937
1938
1938 # Special case for b missing attribute and a matches exactly.
1939 # Special case for b missing attribute and a matches exactly.
1939 if avalue is not None and bvalue is None and avalue == prefvalue:
1940 if avalue is not None and bvalue is None and avalue == prefvalue:
1940 return -1
1941 return -1
1941
1942
1942 # Special case for a missing attribute and b matches exactly.
1943 # Special case for a missing attribute and b matches exactly.
1943 if bvalue is not None and avalue is None and bvalue == prefvalue:
1944 if bvalue is not None and avalue is None and bvalue == prefvalue:
1944 return 1
1945 return 1
1945
1946
1946 # We can't compare unless attribute present on both.
1947 # We can't compare unless attribute present on both.
1947 if avalue is None or bvalue is None:
1948 if avalue is None or bvalue is None:
1948 continue
1949 continue
1949
1950
1950 # Same values should fall back to next attribute.
1951 # Same values should fall back to next attribute.
1951 if avalue == bvalue:
1952 if avalue == bvalue:
1952 continue
1953 continue
1953
1954
1954 # Exact matches come first.
1955 # Exact matches come first.
1955 if avalue == prefvalue:
1956 if avalue == prefvalue:
1956 return -1
1957 return -1
1957 if bvalue == prefvalue:
1958 if bvalue == prefvalue:
1958 return 1
1959 return 1
1959
1960
1960 # Fall back to next attribute.
1961 # Fall back to next attribute.
1961 continue
1962 continue
1962
1963
1963 # If we got here we couldn't sort by attributes and prefers. Fall
1964 # If we got here we couldn't sort by attributes and prefers. Fall
1964 # back to index order.
1965 # back to index order.
1965 return 0
1966 return 0
1966
1967
1967 def __lt__(self, other):
1968 def __lt__(self, other):
1968 return self._cmp(other) < 0
1969 return self._cmp(other) < 0
1969
1970
1970 def __gt__(self, other):
1971 def __gt__(self, other):
1971 return self._cmp(other) > 0
1972 return self._cmp(other) > 0
1972
1973
1973 def __eq__(self, other):
1974 def __eq__(self, other):
1974 return self._cmp(other) == 0
1975 return self._cmp(other) == 0
1975
1976
1976 def __le__(self, other):
1977 def __le__(self, other):
1977 return self._cmp(other) <= 0
1978 return self._cmp(other) <= 0
1978
1979
1979 def __ge__(self, other):
1980 def __ge__(self, other):
1980 return self._cmp(other) >= 0
1981 return self._cmp(other) >= 0
1981
1982
1982 def __ne__(self, other):
1983 def __ne__(self, other):
1983 return self._cmp(other) != 0
1984 return self._cmp(other) != 0
1984
1985
1985 def sortclonebundleentries(ui, entries):
1986 def sortclonebundleentries(ui, entries):
1986 prefers = ui.configlist('ui', 'clonebundleprefers')
1987 prefers = ui.configlist('ui', 'clonebundleprefers')
1987 if not prefers:
1988 if not prefers:
1988 return list(entries)
1989 return list(entries)
1989
1990
1990 prefers = [p.split('=', 1) for p in prefers]
1991 prefers = [p.split('=', 1) for p in prefers]
1991
1992
1992 items = sorted(clonebundleentry(v, prefers) for v in entries)
1993 items = sorted(clonebundleentry(v, prefers) for v in entries)
1993 return [i.value for i in items]
1994 return [i.value for i in items]
1994
1995
1995 def trypullbundlefromurl(ui, repo, url):
1996 def trypullbundlefromurl(ui, repo, url):
1996 """Attempt to apply a bundle from a URL."""
1997 """Attempt to apply a bundle from a URL."""
1997 with repo.lock(), repo.transaction('bundleurl') as tr:
1998 with repo.lock(), repo.transaction('bundleurl') as tr:
1998 try:
1999 try:
1999 fh = urlmod.open(ui, url)
2000 fh = urlmod.open(ui, url)
2000 cg = readbundle(ui, fh, 'stream')
2001 cg = readbundle(ui, fh, 'stream')
2001
2002
2002 if isinstance(cg, streamclone.streamcloneapplier):
2003 if isinstance(cg, streamclone.streamcloneapplier):
2003 cg.apply(repo)
2004 cg.apply(repo)
2004 else:
2005 else:
2005 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2006 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2006 return True
2007 return True
2007 except urlerr.httperror as e:
2008 except urlerr.httperror as e:
2008 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2009 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2009 except urlerr.urlerror as e:
2010 except urlerr.urlerror as e:
2010 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2011 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2011
2012
2012 return False
2013 return False
@@ -1,299 +1,318 b''
1 ==================================
1 ==================================
2 Basic testing for the push command
2 Basic testing for the push command
3 ==================================
3 ==================================
4
4
5 Testing of the '--rev' flag
5 Testing of the '--rev' flag
6 ===========================
6 ===========================
7
7
8 $ hg init test-revflag
8 $ hg init test-revflag
9 $ hg -R test-revflag unbundle "$TESTDIR/bundles/remote.hg"
9 $ hg -R test-revflag unbundle "$TESTDIR/bundles/remote.hg"
10 adding changesets
10 adding changesets
11 adding manifests
11 adding manifests
12 adding file changes
12 adding file changes
13 added 9 changesets with 7 changes to 4 files (+1 heads)
13 added 9 changesets with 7 changes to 4 files (+1 heads)
14 (run 'hg heads' to see heads, 'hg merge' to merge)
14 (run 'hg heads' to see heads, 'hg merge' to merge)
15
15
16 $ for i in 0 1 2 3 4 5 6 7 8; do
16 $ for i in 0 1 2 3 4 5 6 7 8; do
17 > echo
17 > echo
18 > hg init test-revflag-"$i"
18 > hg init test-revflag-"$i"
19 > hg -R test-revflag push -r "$i" test-revflag-"$i"
19 > hg -R test-revflag push -r "$i" test-revflag-"$i"
20 > hg -R test-revflag-"$i" verify
20 > hg -R test-revflag-"$i" verify
21 > done
21 > done
22
22
23 pushing to test-revflag-0
23 pushing to test-revflag-0
24 searching for changes
24 searching for changes
25 adding changesets
25 adding changesets
26 adding manifests
26 adding manifests
27 adding file changes
27 adding file changes
28 added 1 changesets with 1 changes to 1 files
28 added 1 changesets with 1 changes to 1 files
29 checking changesets
29 checking changesets
30 checking manifests
30 checking manifests
31 crosschecking files in changesets and manifests
31 crosschecking files in changesets and manifests
32 checking files
32 checking files
33 1 files, 1 changesets, 1 total revisions
33 1 files, 1 changesets, 1 total revisions
34
34
35 pushing to test-revflag-1
35 pushing to test-revflag-1
36 searching for changes
36 searching for changes
37 adding changesets
37 adding changesets
38 adding manifests
38 adding manifests
39 adding file changes
39 adding file changes
40 added 2 changesets with 2 changes to 1 files
40 added 2 changesets with 2 changes to 1 files
41 checking changesets
41 checking changesets
42 checking manifests
42 checking manifests
43 crosschecking files in changesets and manifests
43 crosschecking files in changesets and manifests
44 checking files
44 checking files
45 1 files, 2 changesets, 2 total revisions
45 1 files, 2 changesets, 2 total revisions
46
46
47 pushing to test-revflag-2
47 pushing to test-revflag-2
48 searching for changes
48 searching for changes
49 adding changesets
49 adding changesets
50 adding manifests
50 adding manifests
51 adding file changes
51 adding file changes
52 added 3 changesets with 3 changes to 1 files
52 added 3 changesets with 3 changes to 1 files
53 checking changesets
53 checking changesets
54 checking manifests
54 checking manifests
55 crosschecking files in changesets and manifests
55 crosschecking files in changesets and manifests
56 checking files
56 checking files
57 1 files, 3 changesets, 3 total revisions
57 1 files, 3 changesets, 3 total revisions
58
58
59 pushing to test-revflag-3
59 pushing to test-revflag-3
60 searching for changes
60 searching for changes
61 adding changesets
61 adding changesets
62 adding manifests
62 adding manifests
63 adding file changes
63 adding file changes
64 added 4 changesets with 4 changes to 1 files
64 added 4 changesets with 4 changes to 1 files
65 checking changesets
65 checking changesets
66 checking manifests
66 checking manifests
67 crosschecking files in changesets and manifests
67 crosschecking files in changesets and manifests
68 checking files
68 checking files
69 1 files, 4 changesets, 4 total revisions
69 1 files, 4 changesets, 4 total revisions
70
70
71 pushing to test-revflag-4
71 pushing to test-revflag-4
72 searching for changes
72 searching for changes
73 adding changesets
73 adding changesets
74 adding manifests
74 adding manifests
75 adding file changes
75 adding file changes
76 added 2 changesets with 2 changes to 1 files
76 added 2 changesets with 2 changes to 1 files
77 checking changesets
77 checking changesets
78 checking manifests
78 checking manifests
79 crosschecking files in changesets and manifests
79 crosschecking files in changesets and manifests
80 checking files
80 checking files
81 1 files, 2 changesets, 2 total revisions
81 1 files, 2 changesets, 2 total revisions
82
82
83 pushing to test-revflag-5
83 pushing to test-revflag-5
84 searching for changes
84 searching for changes
85 adding changesets
85 adding changesets
86 adding manifests
86 adding manifests
87 adding file changes
87 adding file changes
88 added 3 changesets with 3 changes to 1 files
88 added 3 changesets with 3 changes to 1 files
89 checking changesets
89 checking changesets
90 checking manifests
90 checking manifests
91 crosschecking files in changesets and manifests
91 crosschecking files in changesets and manifests
92 checking files
92 checking files
93 1 files, 3 changesets, 3 total revisions
93 1 files, 3 changesets, 3 total revisions
94
94
95 pushing to test-revflag-6
95 pushing to test-revflag-6
96 searching for changes
96 searching for changes
97 adding changesets
97 adding changesets
98 adding manifests
98 adding manifests
99 adding file changes
99 adding file changes
100 added 4 changesets with 5 changes to 2 files
100 added 4 changesets with 5 changes to 2 files
101 checking changesets
101 checking changesets
102 checking manifests
102 checking manifests
103 crosschecking files in changesets and manifests
103 crosschecking files in changesets and manifests
104 checking files
104 checking files
105 2 files, 4 changesets, 5 total revisions
105 2 files, 4 changesets, 5 total revisions
106
106
107 pushing to test-revflag-7
107 pushing to test-revflag-7
108 searching for changes
108 searching for changes
109 adding changesets
109 adding changesets
110 adding manifests
110 adding manifests
111 adding file changes
111 adding file changes
112 added 5 changesets with 6 changes to 3 files
112 added 5 changesets with 6 changes to 3 files
113 checking changesets
113 checking changesets
114 checking manifests
114 checking manifests
115 crosschecking files in changesets and manifests
115 crosschecking files in changesets and manifests
116 checking files
116 checking files
117 3 files, 5 changesets, 6 total revisions
117 3 files, 5 changesets, 6 total revisions
118
118
119 pushing to test-revflag-8
119 pushing to test-revflag-8
120 searching for changes
120 searching for changes
121 adding changesets
121 adding changesets
122 adding manifests
122 adding manifests
123 adding file changes
123 adding file changes
124 added 5 changesets with 5 changes to 2 files
124 added 5 changesets with 5 changes to 2 files
125 checking changesets
125 checking changesets
126 checking manifests
126 checking manifests
127 crosschecking files in changesets and manifests
127 crosschecking files in changesets and manifests
128 checking files
128 checking files
129 2 files, 5 changesets, 5 total revisions
129 2 files, 5 changesets, 5 total revisions
130
130
131 $ cd test-revflag-8
131 $ cd test-revflag-8
132
132
133 $ hg pull ../test-revflag-7
133 $ hg pull ../test-revflag-7
134 pulling from ../test-revflag-7
134 pulling from ../test-revflag-7
135 searching for changes
135 searching for changes
136 adding changesets
136 adding changesets
137 adding manifests
137 adding manifests
138 adding file changes
138 adding file changes
139 added 4 changesets with 2 changes to 3 files (+1 heads)
139 added 4 changesets with 2 changes to 3 files (+1 heads)
140 (run 'hg heads' to see heads, 'hg merge' to merge)
140 (run 'hg heads' to see heads, 'hg merge' to merge)
141
141
142 $ hg verify
142 $ hg verify
143 checking changesets
143 checking changesets
144 checking manifests
144 checking manifests
145 crosschecking files in changesets and manifests
145 crosschecking files in changesets and manifests
146 checking files
146 checking files
147 4 files, 9 changesets, 7 total revisions
147 4 files, 9 changesets, 7 total revisions
148
148
149 $ cd ..
149 $ cd ..
150
150
151 Test server side validation during push
151 Test server side validation during push
152 =======================================
152 =======================================
153
153
154 $ hg init test-validation
154 $ hg init test-validation
155 $ cd test-validation
155 $ cd test-validation
156
156
157 $ cat > .hg/hgrc <<EOF
157 $ cat > .hg/hgrc <<EOF
158 > [server]
158 > [server]
159 > validate=1
159 > validate=1
160 > EOF
160 > EOF
161
161
162 $ echo alpha > alpha
162 $ echo alpha > alpha
163 $ echo beta > beta
163 $ echo beta > beta
164 $ hg addr
164 $ hg addr
165 adding alpha
165 adding alpha
166 adding beta
166 adding beta
167 $ hg ci -m 1
167 $ hg ci -m 1
168
168
169 $ cd ..
169 $ cd ..
170 $ hg clone test-validation test-validation-clone
170 $ hg clone test-validation test-validation-clone
171 updating to branch default
171 updating to branch default
172 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
172 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
173
173
174 Test spurious filelog entries:
174 Test spurious filelog entries:
175
175
176 $ cd test-validation-clone
176 $ cd test-validation-clone
177 $ echo blah >> beta
177 $ echo blah >> beta
178 $ cp .hg/store/data/beta.i tmp1
178 $ cp .hg/store/data/beta.i tmp1
179 $ hg ci -m 2
179 $ hg ci -m 2
180 $ cp .hg/store/data/beta.i tmp2
180 $ cp .hg/store/data/beta.i tmp2
181 $ hg -q rollback
181 $ hg -q rollback
182 $ mv tmp2 .hg/store/data/beta.i
182 $ mv tmp2 .hg/store/data/beta.i
183 $ echo blah >> beta
183 $ echo blah >> beta
184 $ hg ci -m '2 (corrupt)'
184 $ hg ci -m '2 (corrupt)'
185
185
186 Expected to fail:
186 Expected to fail:
187
187
188 $ hg verify
188 $ hg verify
189 checking changesets
189 checking changesets
190 checking manifests
190 checking manifests
191 crosschecking files in changesets and manifests
191 crosschecking files in changesets and manifests
192 checking files
192 checking files
193 beta@1: dddc47b3ba30 not in manifests
193 beta@1: dddc47b3ba30 not in manifests
194 2 files, 2 changesets, 4 total revisions
194 2 files, 2 changesets, 4 total revisions
195 1 integrity errors encountered!
195 1 integrity errors encountered!
196 (first damaged changeset appears to be 1)
196 (first damaged changeset appears to be 1)
197 [1]
197 [1]
198
198
199 $ hg push
199 $ hg push
200 pushing to $TESTTMP/test-validation (glob)
200 pushing to $TESTTMP/test-validation (glob)
201 searching for changes
201 searching for changes
202 adding changesets
202 adding changesets
203 adding manifests
203 adding manifests
204 adding file changes
204 adding file changes
205 transaction abort!
205 transaction abort!
206 rollback completed
206 rollback completed
207 abort: received spurious file revlog entry
207 abort: received spurious file revlog entry
208 [255]
208 [255]
209
209
210 $ hg -q rollback
210 $ hg -q rollback
211 $ mv tmp1 .hg/store/data/beta.i
211 $ mv tmp1 .hg/store/data/beta.i
212 $ echo beta > beta
212 $ echo beta > beta
213
213
214 Test missing filelog entries:
214 Test missing filelog entries:
215
215
216 $ cp .hg/store/data/beta.i tmp
216 $ cp .hg/store/data/beta.i tmp
217 $ echo blah >> beta
217 $ echo blah >> beta
218 $ hg ci -m '2 (corrupt)'
218 $ hg ci -m '2 (corrupt)'
219 $ mv tmp .hg/store/data/beta.i
219 $ mv tmp .hg/store/data/beta.i
220
220
221 Expected to fail:
221 Expected to fail:
222
222
223 $ hg verify
223 $ hg verify
224 checking changesets
224 checking changesets
225 checking manifests
225 checking manifests
226 crosschecking files in changesets and manifests
226 crosschecking files in changesets and manifests
227 checking files
227 checking files
228 beta@1: manifest refers to unknown revision dddc47b3ba30
228 beta@1: manifest refers to unknown revision dddc47b3ba30
229 2 files, 2 changesets, 2 total revisions
229 2 files, 2 changesets, 2 total revisions
230 1 integrity errors encountered!
230 1 integrity errors encountered!
231 (first damaged changeset appears to be 1)
231 (first damaged changeset appears to be 1)
232 [1]
232 [1]
233
233
234 $ hg push
234 $ hg push
235 pushing to $TESTTMP/test-validation (glob)
235 pushing to $TESTTMP/test-validation (glob)
236 searching for changes
236 searching for changes
237 adding changesets
237 adding changesets
238 adding manifests
238 adding manifests
239 adding file changes
239 adding file changes
240 transaction abort!
240 transaction abort!
241 rollback completed
241 rollback completed
242 abort: missing file data for beta:dddc47b3ba30e54484720ce0f4f768a0f4b6efb9 - run hg verify
242 abort: missing file data for beta:dddc47b3ba30e54484720ce0f4f768a0f4b6efb9 - run hg verify
243 [255]
243 [255]
244
244
245 $ cd ..
245 $ cd ..
246
246
247 Test push hook locking
247 Test push hook locking
248 =====================
248 =====================
249
249
250 $ hg init 1
250 $ hg init 1
251
251
252 $ echo '[ui]' >> 1/.hg/hgrc
252 $ echo '[ui]' >> 1/.hg/hgrc
253 $ echo 'timeout = 10' >> 1/.hg/hgrc
253 $ echo 'timeout = 10' >> 1/.hg/hgrc
254
254
255 $ echo foo > 1/foo
255 $ echo foo > 1/foo
256 $ hg --cwd 1 ci -A -m foo
256 $ hg --cwd 1 ci -A -m foo
257 adding foo
257 adding foo
258
258
259 $ hg clone 1 2
259 $ hg clone 1 2
260 updating to branch default
260 updating to branch default
261 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
261 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
262
262
263 $ hg clone 2 3
263 $ hg clone 2 3
264 updating to branch default
264 updating to branch default
265 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
265 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
266
266
267 $ cat <<EOF > $TESTTMP/debuglocks-pretxn-hook.sh
267 $ cat <<EOF > $TESTTMP/debuglocks-pretxn-hook.sh
268 > hg debuglocks
268 > hg debuglocks
269 > true
269 > true
270 > EOF
270 > EOF
271 $ echo '[hooks]' >> 2/.hg/hgrc
271 $ echo '[hooks]' >> 2/.hg/hgrc
272 $ echo "pretxnchangegroup.a = sh $TESTTMP/debuglocks-pretxn-hook.sh" >> 2/.hg/hgrc
272 $ echo "pretxnchangegroup.a = sh $TESTTMP/debuglocks-pretxn-hook.sh" >> 2/.hg/hgrc
273 $ echo 'changegroup.push = hg push -qf ../1' >> 2/.hg/hgrc
273 $ echo 'changegroup.push = hg push -qf ../1' >> 2/.hg/hgrc
274
274
275 $ echo bar >> 3/foo
275 $ echo bar >> 3/foo
276 $ hg --cwd 3 ci -m bar
276 $ hg --cwd 3 ci -m bar
277
277
278 $ hg --cwd 3 push ../2 --config devel.legacy.exchange=bundle1
278 $ hg --cwd 3 push ../2 --config devel.legacy.exchange=bundle1
279 pushing to ../2
279 pushing to ../2
280 searching for changes
280 searching for changes
281 adding changesets
281 adding changesets
282 adding manifests
282 adding manifests
283 adding file changes
283 adding file changes
284 added 1 changesets with 1 changes to 1 files
284 added 1 changesets with 1 changes to 1 files
285 lock: user *, process * (*s) (glob)
285 lock: user *, process * (*s) (glob)
286 wlock: free
286 wlock: free
287
287
288 $ hg --cwd 1 --config extensions.strip= strip tip -q
288 $ hg --cwd 1 --config extensions.strip= strip tip -q
289 $ hg --cwd 2 --config extensions.strip= strip tip -q
289 $ hg --cwd 2 --config extensions.strip= strip tip -q
290 $ hg --cwd 3 push ../2 # bundle2+
290 $ hg --cwd 3 push ../2 # bundle2+
291 pushing to ../2
291 pushing to ../2
292 searching for changes
292 searching for changes
293 adding changesets
293 adding changesets
294 adding manifests
294 adding manifests
295 adding file changes
295 adding file changes
296 added 1 changesets with 1 changes to 1 files
296 added 1 changesets with 1 changes to 1 files
297 lock: user *, process * (*s) (glob)
297 lock: user *, process * (*s) (glob)
298 wlock: user *, process * (*s) (glob)
298 wlock: user *, process * (*s) (glob)
299
299
300 Test bare push with multiple race checking options
301 --------------------------------------------------
302
303 $ hg init test-bare-push-no-concurrency
304 $ hg init test-bare-push-unrelated-concurrency
305 $ hg -R test-revflag push -r 0 test-bare-push-no-concurrency --config server.concurrent-push-mode=strict
306 pushing to test-bare-push-no-concurrency
307 searching for changes
308 adding changesets
309 adding manifests
310 adding file changes
311 added 1 changesets with 1 changes to 1 files
312 $ hg -R test-revflag push -r 0 test-bare-push-unrelated-concurrency --config server.concurrent-push-mode=check-related
313 pushing to test-bare-push-unrelated-concurrency
314 searching for changes
315 adding changesets
316 adding manifests
317 adding file changes
318 added 1 changesets with 1 changes to 1 files
General Comments 0
You need to be logged in to leave comments. Login now