##// END OF EJS Templates
getbundle: add support for 'bookmarks' boolean argument...
Boris Feld -
r35268:cb4dcd7f default
parent child Browse files
Show More
@@ -1,2175 +1,2188 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 bin,
16 bin,
17 hex,
17 hex,
18 nullid,
18 nullid,
19 )
19 )
20 from . import (
20 from . import (
21 bookmarks as bookmod,
21 bookmarks as bookmod,
22 bundle2,
22 bundle2,
23 changegroup,
23 changegroup,
24 discovery,
24 discovery,
25 error,
25 error,
26 lock as lockmod,
26 lock as lockmod,
27 obsolete,
27 obsolete,
28 phases,
28 phases,
29 pushkey,
29 pushkey,
30 pycompat,
30 pycompat,
31 remotenames,
31 remotenames,
32 scmutil,
32 scmutil,
33 sslutil,
33 sslutil,
34 streamclone,
34 streamclone,
35 url as urlmod,
35 url as urlmod,
36 util,
36 util,
37 )
37 )
38
38
39 urlerr = util.urlerr
39 urlerr = util.urlerr
40 urlreq = util.urlreq
40 urlreq = util.urlreq
41
41
42 # Maps bundle version human names to changegroup versions.
42 # Maps bundle version human names to changegroup versions.
43 _bundlespeccgversions = {'v1': '01',
43 _bundlespeccgversions = {'v1': '01',
44 'v2': '02',
44 'v2': '02',
45 'packed1': 's1',
45 'packed1': 's1',
46 'bundle2': '02', #legacy
46 'bundle2': '02', #legacy
47 }
47 }
48
48
49 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
49 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
50 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
50 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
51
51
52 def parsebundlespec(repo, spec, strict=True, externalnames=False):
52 def parsebundlespec(repo, spec, strict=True, externalnames=False):
53 """Parse a bundle string specification into parts.
53 """Parse a bundle string specification into parts.
54
54
55 Bundle specifications denote a well-defined bundle/exchange format.
55 Bundle specifications denote a well-defined bundle/exchange format.
56 The content of a given specification should not change over time in
56 The content of a given specification should not change over time in
57 order to ensure that bundles produced by a newer version of Mercurial are
57 order to ensure that bundles produced by a newer version of Mercurial are
58 readable from an older version.
58 readable from an older version.
59
59
60 The string currently has the form:
60 The string currently has the form:
61
61
62 <compression>-<type>[;<parameter0>[;<parameter1>]]
62 <compression>-<type>[;<parameter0>[;<parameter1>]]
63
63
64 Where <compression> is one of the supported compression formats
64 Where <compression> is one of the supported compression formats
65 and <type> is (currently) a version string. A ";" can follow the type and
65 and <type> is (currently) a version string. A ";" can follow the type and
66 all text afterwards is interpreted as URI encoded, ";" delimited key=value
66 all text afterwards is interpreted as URI encoded, ";" delimited key=value
67 pairs.
67 pairs.
68
68
69 If ``strict`` is True (the default) <compression> is required. Otherwise,
69 If ``strict`` is True (the default) <compression> is required. Otherwise,
70 it is optional.
70 it is optional.
71
71
72 If ``externalnames`` is False (the default), the human-centric names will
72 If ``externalnames`` is False (the default), the human-centric names will
73 be converted to their internal representation.
73 be converted to their internal representation.
74
74
75 Returns a 3-tuple of (compression, version, parameters). Compression will
75 Returns a 3-tuple of (compression, version, parameters). Compression will
76 be ``None`` if not in strict mode and a compression isn't defined.
76 be ``None`` if not in strict mode and a compression isn't defined.
77
77
78 An ``InvalidBundleSpecification`` is raised when the specification is
78 An ``InvalidBundleSpecification`` is raised when the specification is
79 not syntactically well formed.
79 not syntactically well formed.
80
80
81 An ``UnsupportedBundleSpecification`` is raised when the compression or
81 An ``UnsupportedBundleSpecification`` is raised when the compression or
82 bundle type/version is not recognized.
82 bundle type/version is not recognized.
83
83
84 Note: this function will likely eventually return a more complex data
84 Note: this function will likely eventually return a more complex data
85 structure, including bundle2 part information.
85 structure, including bundle2 part information.
86 """
86 """
87 def parseparams(s):
87 def parseparams(s):
88 if ';' not in s:
88 if ';' not in s:
89 return s, {}
89 return s, {}
90
90
91 params = {}
91 params = {}
92 version, paramstr = s.split(';', 1)
92 version, paramstr = s.split(';', 1)
93
93
94 for p in paramstr.split(';'):
94 for p in paramstr.split(';'):
95 if '=' not in p:
95 if '=' not in p:
96 raise error.InvalidBundleSpecification(
96 raise error.InvalidBundleSpecification(
97 _('invalid bundle specification: '
97 _('invalid bundle specification: '
98 'missing "=" in parameter: %s') % p)
98 'missing "=" in parameter: %s') % p)
99
99
100 key, value = p.split('=', 1)
100 key, value = p.split('=', 1)
101 key = urlreq.unquote(key)
101 key = urlreq.unquote(key)
102 value = urlreq.unquote(value)
102 value = urlreq.unquote(value)
103 params[key] = value
103 params[key] = value
104
104
105 return version, params
105 return version, params
106
106
107
107
108 if strict and '-' not in spec:
108 if strict and '-' not in spec:
109 raise error.InvalidBundleSpecification(
109 raise error.InvalidBundleSpecification(
110 _('invalid bundle specification; '
110 _('invalid bundle specification; '
111 'must be prefixed with compression: %s') % spec)
111 'must be prefixed with compression: %s') % spec)
112
112
113 if '-' in spec:
113 if '-' in spec:
114 compression, version = spec.split('-', 1)
114 compression, version = spec.split('-', 1)
115
115
116 if compression not in util.compengines.supportedbundlenames:
116 if compression not in util.compengines.supportedbundlenames:
117 raise error.UnsupportedBundleSpecification(
117 raise error.UnsupportedBundleSpecification(
118 _('%s compression is not supported') % compression)
118 _('%s compression is not supported') % compression)
119
119
120 version, params = parseparams(version)
120 version, params = parseparams(version)
121
121
122 if version not in _bundlespeccgversions:
122 if version not in _bundlespeccgversions:
123 raise error.UnsupportedBundleSpecification(
123 raise error.UnsupportedBundleSpecification(
124 _('%s is not a recognized bundle version') % version)
124 _('%s is not a recognized bundle version') % version)
125 else:
125 else:
126 # Value could be just the compression or just the version, in which
126 # Value could be just the compression or just the version, in which
127 # case some defaults are assumed (but only when not in strict mode).
127 # case some defaults are assumed (but only when not in strict mode).
128 assert not strict
128 assert not strict
129
129
130 spec, params = parseparams(spec)
130 spec, params = parseparams(spec)
131
131
132 if spec in util.compengines.supportedbundlenames:
132 if spec in util.compengines.supportedbundlenames:
133 compression = spec
133 compression = spec
134 version = 'v1'
134 version = 'v1'
135 # Generaldelta repos require v2.
135 # Generaldelta repos require v2.
136 if 'generaldelta' in repo.requirements:
136 if 'generaldelta' in repo.requirements:
137 version = 'v2'
137 version = 'v2'
138 # Modern compression engines require v2.
138 # Modern compression engines require v2.
139 if compression not in _bundlespecv1compengines:
139 if compression not in _bundlespecv1compengines:
140 version = 'v2'
140 version = 'v2'
141 elif spec in _bundlespeccgversions:
141 elif spec in _bundlespeccgversions:
142 if spec == 'packed1':
142 if spec == 'packed1':
143 compression = 'none'
143 compression = 'none'
144 else:
144 else:
145 compression = 'bzip2'
145 compression = 'bzip2'
146 version = spec
146 version = spec
147 else:
147 else:
148 raise error.UnsupportedBundleSpecification(
148 raise error.UnsupportedBundleSpecification(
149 _('%s is not a recognized bundle specification') % spec)
149 _('%s is not a recognized bundle specification') % spec)
150
150
151 # Bundle version 1 only supports a known set of compression engines.
151 # Bundle version 1 only supports a known set of compression engines.
152 if version == 'v1' and compression not in _bundlespecv1compengines:
152 if version == 'v1' and compression not in _bundlespecv1compengines:
153 raise error.UnsupportedBundleSpecification(
153 raise error.UnsupportedBundleSpecification(
154 _('compression engine %s is not supported on v1 bundles') %
154 _('compression engine %s is not supported on v1 bundles') %
155 compression)
155 compression)
156
156
157 # The specification for packed1 can optionally declare the data formats
157 # The specification for packed1 can optionally declare the data formats
158 # required to apply it. If we see this metadata, compare against what the
158 # required to apply it. If we see this metadata, compare against what the
159 # repo supports and error if the bundle isn't compatible.
159 # repo supports and error if the bundle isn't compatible.
160 if version == 'packed1' and 'requirements' in params:
160 if version == 'packed1' and 'requirements' in params:
161 requirements = set(params['requirements'].split(','))
161 requirements = set(params['requirements'].split(','))
162 missingreqs = requirements - repo.supportedformats
162 missingreqs = requirements - repo.supportedformats
163 if missingreqs:
163 if missingreqs:
164 raise error.UnsupportedBundleSpecification(
164 raise error.UnsupportedBundleSpecification(
165 _('missing support for repository features: %s') %
165 _('missing support for repository features: %s') %
166 ', '.join(sorted(missingreqs)))
166 ', '.join(sorted(missingreqs)))
167
167
168 if not externalnames:
168 if not externalnames:
169 engine = util.compengines.forbundlename(compression)
169 engine = util.compengines.forbundlename(compression)
170 compression = engine.bundletype()[1]
170 compression = engine.bundletype()[1]
171 version = _bundlespeccgversions[version]
171 version = _bundlespeccgversions[version]
172 return compression, version, params
172 return compression, version, params
173
173
174 def readbundle(ui, fh, fname, vfs=None):
174 def readbundle(ui, fh, fname, vfs=None):
175 header = changegroup.readexactly(fh, 4)
175 header = changegroup.readexactly(fh, 4)
176
176
177 alg = None
177 alg = None
178 if not fname:
178 if not fname:
179 fname = "stream"
179 fname = "stream"
180 if not header.startswith('HG') and header.startswith('\0'):
180 if not header.startswith('HG') and header.startswith('\0'):
181 fh = changegroup.headerlessfixup(fh, header)
181 fh = changegroup.headerlessfixup(fh, header)
182 header = "HG10"
182 header = "HG10"
183 alg = 'UN'
183 alg = 'UN'
184 elif vfs:
184 elif vfs:
185 fname = vfs.join(fname)
185 fname = vfs.join(fname)
186
186
187 magic, version = header[0:2], header[2:4]
187 magic, version = header[0:2], header[2:4]
188
188
189 if magic != 'HG':
189 if magic != 'HG':
190 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
190 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
191 if version == '10':
191 if version == '10':
192 if alg is None:
192 if alg is None:
193 alg = changegroup.readexactly(fh, 2)
193 alg = changegroup.readexactly(fh, 2)
194 return changegroup.cg1unpacker(fh, alg)
194 return changegroup.cg1unpacker(fh, alg)
195 elif version.startswith('2'):
195 elif version.startswith('2'):
196 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
196 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
197 elif version == 'S1':
197 elif version == 'S1':
198 return streamclone.streamcloneapplier(fh)
198 return streamclone.streamcloneapplier(fh)
199 else:
199 else:
200 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
200 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
201
201
202 def getbundlespec(ui, fh):
202 def getbundlespec(ui, fh):
203 """Infer the bundlespec from a bundle file handle.
203 """Infer the bundlespec from a bundle file handle.
204
204
205 The input file handle is seeked and the original seek position is not
205 The input file handle is seeked and the original seek position is not
206 restored.
206 restored.
207 """
207 """
208 def speccompression(alg):
208 def speccompression(alg):
209 try:
209 try:
210 return util.compengines.forbundletype(alg).bundletype()[0]
210 return util.compengines.forbundletype(alg).bundletype()[0]
211 except KeyError:
211 except KeyError:
212 return None
212 return None
213
213
214 b = readbundle(ui, fh, None)
214 b = readbundle(ui, fh, None)
215 if isinstance(b, changegroup.cg1unpacker):
215 if isinstance(b, changegroup.cg1unpacker):
216 alg = b._type
216 alg = b._type
217 if alg == '_truncatedBZ':
217 if alg == '_truncatedBZ':
218 alg = 'BZ'
218 alg = 'BZ'
219 comp = speccompression(alg)
219 comp = speccompression(alg)
220 if not comp:
220 if not comp:
221 raise error.Abort(_('unknown compression algorithm: %s') % alg)
221 raise error.Abort(_('unknown compression algorithm: %s') % alg)
222 return '%s-v1' % comp
222 return '%s-v1' % comp
223 elif isinstance(b, bundle2.unbundle20):
223 elif isinstance(b, bundle2.unbundle20):
224 if 'Compression' in b.params:
224 if 'Compression' in b.params:
225 comp = speccompression(b.params['Compression'])
225 comp = speccompression(b.params['Compression'])
226 if not comp:
226 if not comp:
227 raise error.Abort(_('unknown compression algorithm: %s') % comp)
227 raise error.Abort(_('unknown compression algorithm: %s') % comp)
228 else:
228 else:
229 comp = 'none'
229 comp = 'none'
230
230
231 version = None
231 version = None
232 for part in b.iterparts():
232 for part in b.iterparts():
233 if part.type == 'changegroup':
233 if part.type == 'changegroup':
234 version = part.params['version']
234 version = part.params['version']
235 if version in ('01', '02'):
235 if version in ('01', '02'):
236 version = 'v2'
236 version = 'v2'
237 else:
237 else:
238 raise error.Abort(_('changegroup version %s does not have '
238 raise error.Abort(_('changegroup version %s does not have '
239 'a known bundlespec') % version,
239 'a known bundlespec') % version,
240 hint=_('try upgrading your Mercurial '
240 hint=_('try upgrading your Mercurial '
241 'client'))
241 'client'))
242
242
243 if not version:
243 if not version:
244 raise error.Abort(_('could not identify changegroup version in '
244 raise error.Abort(_('could not identify changegroup version in '
245 'bundle'))
245 'bundle'))
246
246
247 return '%s-%s' % (comp, version)
247 return '%s-%s' % (comp, version)
248 elif isinstance(b, streamclone.streamcloneapplier):
248 elif isinstance(b, streamclone.streamcloneapplier):
249 requirements = streamclone.readbundle1header(fh)[2]
249 requirements = streamclone.readbundle1header(fh)[2]
250 params = 'requirements=%s' % ','.join(sorted(requirements))
250 params = 'requirements=%s' % ','.join(sorted(requirements))
251 return 'none-packed1;%s' % urlreq.quote(params)
251 return 'none-packed1;%s' % urlreq.quote(params)
252 else:
252 else:
253 raise error.Abort(_('unknown bundle type: %s') % b)
253 raise error.Abort(_('unknown bundle type: %s') % b)
254
254
255 def _computeoutgoing(repo, heads, common):
255 def _computeoutgoing(repo, heads, common):
256 """Computes which revs are outgoing given a set of common
256 """Computes which revs are outgoing given a set of common
257 and a set of heads.
257 and a set of heads.
258
258
259 This is a separate function so extensions can have access to
259 This is a separate function so extensions can have access to
260 the logic.
260 the logic.
261
261
262 Returns a discovery.outgoing object.
262 Returns a discovery.outgoing object.
263 """
263 """
264 cl = repo.changelog
264 cl = repo.changelog
265 if common:
265 if common:
266 hasnode = cl.hasnode
266 hasnode = cl.hasnode
267 common = [n for n in common if hasnode(n)]
267 common = [n for n in common if hasnode(n)]
268 else:
268 else:
269 common = [nullid]
269 common = [nullid]
270 if not heads:
270 if not heads:
271 heads = cl.heads()
271 heads = cl.heads()
272 return discovery.outgoing(repo, common, heads)
272 return discovery.outgoing(repo, common, heads)
273
273
274 def _forcebundle1(op):
274 def _forcebundle1(op):
275 """return true if a pull/push must use bundle1
275 """return true if a pull/push must use bundle1
276
276
277 This function is used to allow testing of the older bundle version"""
277 This function is used to allow testing of the older bundle version"""
278 ui = op.repo.ui
278 ui = op.repo.ui
279 forcebundle1 = False
279 forcebundle1 = False
280 # The goal is this config is to allow developer to choose the bundle
280 # The goal is this config is to allow developer to choose the bundle
281 # version used during exchanged. This is especially handy during test.
281 # version used during exchanged. This is especially handy during test.
282 # Value is a list of bundle version to be picked from, highest version
282 # Value is a list of bundle version to be picked from, highest version
283 # should be used.
283 # should be used.
284 #
284 #
285 # developer config: devel.legacy.exchange
285 # developer config: devel.legacy.exchange
286 exchange = ui.configlist('devel', 'legacy.exchange')
286 exchange = ui.configlist('devel', 'legacy.exchange')
287 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
287 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
288 return forcebundle1 or not op.remote.capable('bundle2')
288 return forcebundle1 or not op.remote.capable('bundle2')
289
289
290 class pushoperation(object):
290 class pushoperation(object):
291 """A object that represent a single push operation
291 """A object that represent a single push operation
292
292
293 Its purpose is to carry push related state and very common operations.
293 Its purpose is to carry push related state and very common operations.
294
294
295 A new pushoperation should be created at the beginning of each push and
295 A new pushoperation should be created at the beginning of each push and
296 discarded afterward.
296 discarded afterward.
297 """
297 """
298
298
299 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
299 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
300 bookmarks=(), pushvars=None):
300 bookmarks=(), pushvars=None):
301 # repo we push from
301 # repo we push from
302 self.repo = repo
302 self.repo = repo
303 self.ui = repo.ui
303 self.ui = repo.ui
304 # repo we push to
304 # repo we push to
305 self.remote = remote
305 self.remote = remote
306 # force option provided
306 # force option provided
307 self.force = force
307 self.force = force
308 # revs to be pushed (None is "all")
308 # revs to be pushed (None is "all")
309 self.revs = revs
309 self.revs = revs
310 # bookmark explicitly pushed
310 # bookmark explicitly pushed
311 self.bookmarks = bookmarks
311 self.bookmarks = bookmarks
312 # allow push of new branch
312 # allow push of new branch
313 self.newbranch = newbranch
313 self.newbranch = newbranch
314 # step already performed
314 # step already performed
315 # (used to check what steps have been already performed through bundle2)
315 # (used to check what steps have been already performed through bundle2)
316 self.stepsdone = set()
316 self.stepsdone = set()
317 # Integer version of the changegroup push result
317 # Integer version of the changegroup push result
318 # - None means nothing to push
318 # - None means nothing to push
319 # - 0 means HTTP error
319 # - 0 means HTTP error
320 # - 1 means we pushed and remote head count is unchanged *or*
320 # - 1 means we pushed and remote head count is unchanged *or*
321 # we have outgoing changesets but refused to push
321 # we have outgoing changesets but refused to push
322 # - other values as described by addchangegroup()
322 # - other values as described by addchangegroup()
323 self.cgresult = None
323 self.cgresult = None
324 # Boolean value for the bookmark push
324 # Boolean value for the bookmark push
325 self.bkresult = None
325 self.bkresult = None
326 # discover.outgoing object (contains common and outgoing data)
326 # discover.outgoing object (contains common and outgoing data)
327 self.outgoing = None
327 self.outgoing = None
328 # all remote topological heads before the push
328 # all remote topological heads before the push
329 self.remoteheads = None
329 self.remoteheads = None
330 # Details of the remote branch pre and post push
330 # Details of the remote branch pre and post push
331 #
331 #
332 # mapping: {'branch': ([remoteheads],
332 # mapping: {'branch': ([remoteheads],
333 # [newheads],
333 # [newheads],
334 # [unsyncedheads],
334 # [unsyncedheads],
335 # [discardedheads])}
335 # [discardedheads])}
336 # - branch: the branch name
336 # - branch: the branch name
337 # - remoteheads: the list of remote heads known locally
337 # - remoteheads: the list of remote heads known locally
338 # None if the branch is new
338 # None if the branch is new
339 # - newheads: the new remote heads (known locally) with outgoing pushed
339 # - newheads: the new remote heads (known locally) with outgoing pushed
340 # - unsyncedheads: the list of remote heads unknown locally.
340 # - unsyncedheads: the list of remote heads unknown locally.
341 # - discardedheads: the list of remote heads made obsolete by the push
341 # - discardedheads: the list of remote heads made obsolete by the push
342 self.pushbranchmap = None
342 self.pushbranchmap = None
343 # testable as a boolean indicating if any nodes are missing locally.
343 # testable as a boolean indicating if any nodes are missing locally.
344 self.incoming = None
344 self.incoming = None
345 # summary of the remote phase situation
345 # summary of the remote phase situation
346 self.remotephases = None
346 self.remotephases = None
347 # phases changes that must be pushed along side the changesets
347 # phases changes that must be pushed along side the changesets
348 self.outdatedphases = None
348 self.outdatedphases = None
349 # phases changes that must be pushed if changeset push fails
349 # phases changes that must be pushed if changeset push fails
350 self.fallbackoutdatedphases = None
350 self.fallbackoutdatedphases = None
351 # outgoing obsmarkers
351 # outgoing obsmarkers
352 self.outobsmarkers = set()
352 self.outobsmarkers = set()
353 # outgoing bookmarks
353 # outgoing bookmarks
354 self.outbookmarks = []
354 self.outbookmarks = []
355 # transaction manager
355 # transaction manager
356 self.trmanager = None
356 self.trmanager = None
357 # map { pushkey partid -> callback handling failure}
357 # map { pushkey partid -> callback handling failure}
358 # used to handle exception from mandatory pushkey part failure
358 # used to handle exception from mandatory pushkey part failure
359 self.pkfailcb = {}
359 self.pkfailcb = {}
360 # an iterable of pushvars or None
360 # an iterable of pushvars or None
361 self.pushvars = pushvars
361 self.pushvars = pushvars
362
362
363 @util.propertycache
363 @util.propertycache
364 def futureheads(self):
364 def futureheads(self):
365 """future remote heads if the changeset push succeeds"""
365 """future remote heads if the changeset push succeeds"""
366 return self.outgoing.missingheads
366 return self.outgoing.missingheads
367
367
368 @util.propertycache
368 @util.propertycache
369 def fallbackheads(self):
369 def fallbackheads(self):
370 """future remote heads if the changeset push fails"""
370 """future remote heads if the changeset push fails"""
371 if self.revs is None:
371 if self.revs is None:
372 # not target to push, all common are relevant
372 # not target to push, all common are relevant
373 return self.outgoing.commonheads
373 return self.outgoing.commonheads
374 unfi = self.repo.unfiltered()
374 unfi = self.repo.unfiltered()
375 # I want cheads = heads(::missingheads and ::commonheads)
375 # I want cheads = heads(::missingheads and ::commonheads)
376 # (missingheads is revs with secret changeset filtered out)
376 # (missingheads is revs with secret changeset filtered out)
377 #
377 #
378 # This can be expressed as:
378 # This can be expressed as:
379 # cheads = ( (missingheads and ::commonheads)
379 # cheads = ( (missingheads and ::commonheads)
380 # + (commonheads and ::missingheads))"
380 # + (commonheads and ::missingheads))"
381 # )
381 # )
382 #
382 #
383 # while trying to push we already computed the following:
383 # while trying to push we already computed the following:
384 # common = (::commonheads)
384 # common = (::commonheads)
385 # missing = ((commonheads::missingheads) - commonheads)
385 # missing = ((commonheads::missingheads) - commonheads)
386 #
386 #
387 # We can pick:
387 # We can pick:
388 # * missingheads part of common (::commonheads)
388 # * missingheads part of common (::commonheads)
389 common = self.outgoing.common
389 common = self.outgoing.common
390 nm = self.repo.changelog.nodemap
390 nm = self.repo.changelog.nodemap
391 cheads = [node for node in self.revs if nm[node] in common]
391 cheads = [node for node in self.revs if nm[node] in common]
392 # and
392 # and
393 # * commonheads parents on missing
393 # * commonheads parents on missing
394 revset = unfi.set('%ln and parents(roots(%ln))',
394 revset = unfi.set('%ln and parents(roots(%ln))',
395 self.outgoing.commonheads,
395 self.outgoing.commonheads,
396 self.outgoing.missing)
396 self.outgoing.missing)
397 cheads.extend(c.node() for c in revset)
397 cheads.extend(c.node() for c in revset)
398 return cheads
398 return cheads
399
399
400 @property
400 @property
401 def commonheads(self):
401 def commonheads(self):
402 """set of all common heads after changeset bundle push"""
402 """set of all common heads after changeset bundle push"""
403 if self.cgresult:
403 if self.cgresult:
404 return self.futureheads
404 return self.futureheads
405 else:
405 else:
406 return self.fallbackheads
406 return self.fallbackheads
407
407
408 # mapping of message used when pushing bookmark
408 # mapping of message used when pushing bookmark
409 bookmsgmap = {'update': (_("updating bookmark %s\n"),
409 bookmsgmap = {'update': (_("updating bookmark %s\n"),
410 _('updating bookmark %s failed!\n')),
410 _('updating bookmark %s failed!\n')),
411 'export': (_("exporting bookmark %s\n"),
411 'export': (_("exporting bookmark %s\n"),
412 _('exporting bookmark %s failed!\n')),
412 _('exporting bookmark %s failed!\n')),
413 'delete': (_("deleting remote bookmark %s\n"),
413 'delete': (_("deleting remote bookmark %s\n"),
414 _('deleting remote bookmark %s failed!\n')),
414 _('deleting remote bookmark %s failed!\n')),
415 }
415 }
416
416
417
417
418 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
418 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
419 opargs=None):
419 opargs=None):
420 '''Push outgoing changesets (limited by revs) from a local
420 '''Push outgoing changesets (limited by revs) from a local
421 repository to remote. Return an integer:
421 repository to remote. Return an integer:
422 - None means nothing to push
422 - None means nothing to push
423 - 0 means HTTP error
423 - 0 means HTTP error
424 - 1 means we pushed and remote head count is unchanged *or*
424 - 1 means we pushed and remote head count is unchanged *or*
425 we have outgoing changesets but refused to push
425 we have outgoing changesets but refused to push
426 - other values as described by addchangegroup()
426 - other values as described by addchangegroup()
427 '''
427 '''
428 if opargs is None:
428 if opargs is None:
429 opargs = {}
429 opargs = {}
430 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
430 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
431 **pycompat.strkwargs(opargs))
431 **pycompat.strkwargs(opargs))
432 if pushop.remote.local():
432 if pushop.remote.local():
433 missing = (set(pushop.repo.requirements)
433 missing = (set(pushop.repo.requirements)
434 - pushop.remote.local().supported)
434 - pushop.remote.local().supported)
435 if missing:
435 if missing:
436 msg = _("required features are not"
436 msg = _("required features are not"
437 " supported in the destination:"
437 " supported in the destination:"
438 " %s") % (', '.join(sorted(missing)))
438 " %s") % (', '.join(sorted(missing)))
439 raise error.Abort(msg)
439 raise error.Abort(msg)
440
440
441 if not pushop.remote.canpush():
441 if not pushop.remote.canpush():
442 raise error.Abort(_("destination does not support push"))
442 raise error.Abort(_("destination does not support push"))
443
443
444 if not pushop.remote.capable('unbundle'):
444 if not pushop.remote.capable('unbundle'):
445 raise error.Abort(_('cannot push: destination does not support the '
445 raise error.Abort(_('cannot push: destination does not support the '
446 'unbundle wire protocol command'))
446 'unbundle wire protocol command'))
447
447
448 # get lock as we might write phase data
448 # get lock as we might write phase data
449 wlock = lock = None
449 wlock = lock = None
450 try:
450 try:
451 # bundle2 push may receive a reply bundle touching bookmarks or other
451 # bundle2 push may receive a reply bundle touching bookmarks or other
452 # things requiring the wlock. Take it now to ensure proper ordering.
452 # things requiring the wlock. Take it now to ensure proper ordering.
453 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
453 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
454 if (not _forcebundle1(pushop)) and maypushback:
454 if (not _forcebundle1(pushop)) and maypushback:
455 wlock = pushop.repo.wlock()
455 wlock = pushop.repo.wlock()
456 lock = pushop.repo.lock()
456 lock = pushop.repo.lock()
457 pushop.trmanager = transactionmanager(pushop.repo,
457 pushop.trmanager = transactionmanager(pushop.repo,
458 'push-response',
458 'push-response',
459 pushop.remote.url())
459 pushop.remote.url())
460 except IOError as err:
460 except IOError as err:
461 if err.errno != errno.EACCES:
461 if err.errno != errno.EACCES:
462 raise
462 raise
463 # source repo cannot be locked.
463 # source repo cannot be locked.
464 # We do not abort the push, but just disable the local phase
464 # We do not abort the push, but just disable the local phase
465 # synchronisation.
465 # synchronisation.
466 msg = 'cannot lock source repository: %s\n' % err
466 msg = 'cannot lock source repository: %s\n' % err
467 pushop.ui.debug(msg)
467 pushop.ui.debug(msg)
468
468
469 with wlock or util.nullcontextmanager(), \
469 with wlock or util.nullcontextmanager(), \
470 lock or util.nullcontextmanager(), \
470 lock or util.nullcontextmanager(), \
471 pushop.trmanager or util.nullcontextmanager():
471 pushop.trmanager or util.nullcontextmanager():
472 pushop.repo.checkpush(pushop)
472 pushop.repo.checkpush(pushop)
473 _pushdiscovery(pushop)
473 _pushdiscovery(pushop)
474 if not _forcebundle1(pushop):
474 if not _forcebundle1(pushop):
475 _pushbundle2(pushop)
475 _pushbundle2(pushop)
476 _pushchangeset(pushop)
476 _pushchangeset(pushop)
477 _pushsyncphase(pushop)
477 _pushsyncphase(pushop)
478 _pushobsolete(pushop)
478 _pushobsolete(pushop)
479 _pushbookmark(pushop)
479 _pushbookmark(pushop)
480
480
481 return pushop
481 return pushop
482
482
483 # list of steps to perform discovery before push
483 # list of steps to perform discovery before push
484 pushdiscoveryorder = []
484 pushdiscoveryorder = []
485
485
486 # Mapping between step name and function
486 # Mapping between step name and function
487 #
487 #
488 # This exists to help extensions wrap steps if necessary
488 # This exists to help extensions wrap steps if necessary
489 pushdiscoverymapping = {}
489 pushdiscoverymapping = {}
490
490
491 def pushdiscovery(stepname):
491 def pushdiscovery(stepname):
492 """decorator for function performing discovery before push
492 """decorator for function performing discovery before push
493
493
494 The function is added to the step -> function mapping and appended to the
494 The function is added to the step -> function mapping and appended to the
495 list of steps. Beware that decorated function will be added in order (this
495 list of steps. Beware that decorated function will be added in order (this
496 may matter).
496 may matter).
497
497
498 You can only use this decorator for a new step, if you want to wrap a step
498 You can only use this decorator for a new step, if you want to wrap a step
499 from an extension, change the pushdiscovery dictionary directly."""
499 from an extension, change the pushdiscovery dictionary directly."""
500 def dec(func):
500 def dec(func):
501 assert stepname not in pushdiscoverymapping
501 assert stepname not in pushdiscoverymapping
502 pushdiscoverymapping[stepname] = func
502 pushdiscoverymapping[stepname] = func
503 pushdiscoveryorder.append(stepname)
503 pushdiscoveryorder.append(stepname)
504 return func
504 return func
505 return dec
505 return dec
506
506
507 def _pushdiscovery(pushop):
507 def _pushdiscovery(pushop):
508 """Run all discovery steps"""
508 """Run all discovery steps"""
509 for stepname in pushdiscoveryorder:
509 for stepname in pushdiscoveryorder:
510 step = pushdiscoverymapping[stepname]
510 step = pushdiscoverymapping[stepname]
511 step(pushop)
511 step(pushop)
512
512
513 @pushdiscovery('changeset')
513 @pushdiscovery('changeset')
514 def _pushdiscoverychangeset(pushop):
514 def _pushdiscoverychangeset(pushop):
515 """discover the changeset that need to be pushed"""
515 """discover the changeset that need to be pushed"""
516 fci = discovery.findcommonincoming
516 fci = discovery.findcommonincoming
517 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
517 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
518 common, inc, remoteheads = commoninc
518 common, inc, remoteheads = commoninc
519 fco = discovery.findcommonoutgoing
519 fco = discovery.findcommonoutgoing
520 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
520 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
521 commoninc=commoninc, force=pushop.force)
521 commoninc=commoninc, force=pushop.force)
522 pushop.outgoing = outgoing
522 pushop.outgoing = outgoing
523 pushop.remoteheads = remoteheads
523 pushop.remoteheads = remoteheads
524 pushop.incoming = inc
524 pushop.incoming = inc
525
525
526 @pushdiscovery('phase')
526 @pushdiscovery('phase')
527 def _pushdiscoveryphase(pushop):
527 def _pushdiscoveryphase(pushop):
528 """discover the phase that needs to be pushed
528 """discover the phase that needs to be pushed
529
529
530 (computed for both success and failure case for changesets push)"""
530 (computed for both success and failure case for changesets push)"""
531 outgoing = pushop.outgoing
531 outgoing = pushop.outgoing
532 unfi = pushop.repo.unfiltered()
532 unfi = pushop.repo.unfiltered()
533 remotephases = pushop.remote.listkeys('phases')
533 remotephases = pushop.remote.listkeys('phases')
534 if (pushop.ui.configbool('ui', '_usedassubrepo')
534 if (pushop.ui.configbool('ui', '_usedassubrepo')
535 and remotephases # server supports phases
535 and remotephases # server supports phases
536 and not pushop.outgoing.missing # no changesets to be pushed
536 and not pushop.outgoing.missing # no changesets to be pushed
537 and remotephases.get('publishing', False)):
537 and remotephases.get('publishing', False)):
538 # When:
538 # When:
539 # - this is a subrepo push
539 # - this is a subrepo push
540 # - and remote support phase
540 # - and remote support phase
541 # - and no changeset are to be pushed
541 # - and no changeset are to be pushed
542 # - and remote is publishing
542 # - and remote is publishing
543 # We may be in issue 3781 case!
543 # We may be in issue 3781 case!
544 # We drop the possible phase synchronisation done by
544 # We drop the possible phase synchronisation done by
545 # courtesy to publish changesets possibly locally draft
545 # courtesy to publish changesets possibly locally draft
546 # on the remote.
546 # on the remote.
547 pushop.outdatedphases = []
547 pushop.outdatedphases = []
548 pushop.fallbackoutdatedphases = []
548 pushop.fallbackoutdatedphases = []
549 return
549 return
550
550
551 pushop.remotephases = phases.remotephasessummary(pushop.repo,
551 pushop.remotephases = phases.remotephasessummary(pushop.repo,
552 pushop.fallbackheads,
552 pushop.fallbackheads,
553 remotephases)
553 remotephases)
554 droots = pushop.remotephases.draftroots
554 droots = pushop.remotephases.draftroots
555
555
556 extracond = ''
556 extracond = ''
557 if not pushop.remotephases.publishing:
557 if not pushop.remotephases.publishing:
558 extracond = ' and public()'
558 extracond = ' and public()'
559 revset = 'heads((%%ln::%%ln) %s)' % extracond
559 revset = 'heads((%%ln::%%ln) %s)' % extracond
560 # Get the list of all revs draft on remote by public here.
560 # Get the list of all revs draft on remote by public here.
561 # XXX Beware that revset break if droots is not strictly
561 # XXX Beware that revset break if droots is not strictly
562 # XXX root we may want to ensure it is but it is costly
562 # XXX root we may want to ensure it is but it is costly
563 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
563 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
564 if not outgoing.missing:
564 if not outgoing.missing:
565 future = fallback
565 future = fallback
566 else:
566 else:
567 # adds changeset we are going to push as draft
567 # adds changeset we are going to push as draft
568 #
568 #
569 # should not be necessary for publishing server, but because of an
569 # should not be necessary for publishing server, but because of an
570 # issue fixed in xxxxx we have to do it anyway.
570 # issue fixed in xxxxx we have to do it anyway.
571 fdroots = list(unfi.set('roots(%ln + %ln::)',
571 fdroots = list(unfi.set('roots(%ln + %ln::)',
572 outgoing.missing, droots))
572 outgoing.missing, droots))
573 fdroots = [f.node() for f in fdroots]
573 fdroots = [f.node() for f in fdroots]
574 future = list(unfi.set(revset, fdroots, pushop.futureheads))
574 future = list(unfi.set(revset, fdroots, pushop.futureheads))
575 pushop.outdatedphases = future
575 pushop.outdatedphases = future
576 pushop.fallbackoutdatedphases = fallback
576 pushop.fallbackoutdatedphases = fallback
577
577
578 @pushdiscovery('obsmarker')
578 @pushdiscovery('obsmarker')
579 def _pushdiscoveryobsmarkers(pushop):
579 def _pushdiscoveryobsmarkers(pushop):
580 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
580 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
581 and pushop.repo.obsstore
581 and pushop.repo.obsstore
582 and 'obsolete' in pushop.remote.listkeys('namespaces')):
582 and 'obsolete' in pushop.remote.listkeys('namespaces')):
583 repo = pushop.repo
583 repo = pushop.repo
584 # very naive computation, that can be quite expensive on big repo.
584 # very naive computation, that can be quite expensive on big repo.
585 # However: evolution is currently slow on them anyway.
585 # However: evolution is currently slow on them anyway.
586 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
586 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
587 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
587 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
588
588
589 @pushdiscovery('bookmarks')
589 @pushdiscovery('bookmarks')
590 def _pushdiscoverybookmarks(pushop):
590 def _pushdiscoverybookmarks(pushop):
591 ui = pushop.ui
591 ui = pushop.ui
592 repo = pushop.repo.unfiltered()
592 repo = pushop.repo.unfiltered()
593 remote = pushop.remote
593 remote = pushop.remote
594 ui.debug("checking for updated bookmarks\n")
594 ui.debug("checking for updated bookmarks\n")
595 ancestors = ()
595 ancestors = ()
596 if pushop.revs:
596 if pushop.revs:
597 revnums = map(repo.changelog.rev, pushop.revs)
597 revnums = map(repo.changelog.rev, pushop.revs)
598 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
598 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
599 remotebookmark = remote.listkeys('bookmarks')
599 remotebookmark = remote.listkeys('bookmarks')
600
600
601 explicit = set([repo._bookmarks.expandname(bookmark)
601 explicit = set([repo._bookmarks.expandname(bookmark)
602 for bookmark in pushop.bookmarks])
602 for bookmark in pushop.bookmarks])
603
603
604 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
604 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
605 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
605 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
606
606
607 def safehex(x):
607 def safehex(x):
608 if x is None:
608 if x is None:
609 return x
609 return x
610 return hex(x)
610 return hex(x)
611
611
612 def hexifycompbookmarks(bookmarks):
612 def hexifycompbookmarks(bookmarks):
613 for b, scid, dcid in bookmarks:
613 for b, scid, dcid in bookmarks:
614 yield b, safehex(scid), safehex(dcid)
614 yield b, safehex(scid), safehex(dcid)
615
615
616 comp = [hexifycompbookmarks(marks) for marks in comp]
616 comp = [hexifycompbookmarks(marks) for marks in comp]
617 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
617 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
618
618
619 for b, scid, dcid in advsrc:
619 for b, scid, dcid in advsrc:
620 if b in explicit:
620 if b in explicit:
621 explicit.remove(b)
621 explicit.remove(b)
622 if not ancestors or repo[scid].rev() in ancestors:
622 if not ancestors or repo[scid].rev() in ancestors:
623 pushop.outbookmarks.append((b, dcid, scid))
623 pushop.outbookmarks.append((b, dcid, scid))
624 # search added bookmark
624 # search added bookmark
625 for b, scid, dcid in addsrc:
625 for b, scid, dcid in addsrc:
626 if b in explicit:
626 if b in explicit:
627 explicit.remove(b)
627 explicit.remove(b)
628 pushop.outbookmarks.append((b, '', scid))
628 pushop.outbookmarks.append((b, '', scid))
629 # search for overwritten bookmark
629 # search for overwritten bookmark
630 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
630 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
631 if b in explicit:
631 if b in explicit:
632 explicit.remove(b)
632 explicit.remove(b)
633 pushop.outbookmarks.append((b, dcid, scid))
633 pushop.outbookmarks.append((b, dcid, scid))
634 # search for bookmark to delete
634 # search for bookmark to delete
635 for b, scid, dcid in adddst:
635 for b, scid, dcid in adddst:
636 if b in explicit:
636 if b in explicit:
637 explicit.remove(b)
637 explicit.remove(b)
638 # treat as "deleted locally"
638 # treat as "deleted locally"
639 pushop.outbookmarks.append((b, dcid, ''))
639 pushop.outbookmarks.append((b, dcid, ''))
640 # identical bookmarks shouldn't get reported
640 # identical bookmarks shouldn't get reported
641 for b, scid, dcid in same:
641 for b, scid, dcid in same:
642 if b in explicit:
642 if b in explicit:
643 explicit.remove(b)
643 explicit.remove(b)
644
644
645 if explicit:
645 if explicit:
646 explicit = sorted(explicit)
646 explicit = sorted(explicit)
647 # we should probably list all of them
647 # we should probably list all of them
648 ui.warn(_('bookmark %s does not exist on the local '
648 ui.warn(_('bookmark %s does not exist on the local '
649 'or remote repository!\n') % explicit[0])
649 'or remote repository!\n') % explicit[0])
650 pushop.bkresult = 2
650 pushop.bkresult = 2
651
651
652 pushop.outbookmarks.sort()
652 pushop.outbookmarks.sort()
653
653
654 def _pushcheckoutgoing(pushop):
654 def _pushcheckoutgoing(pushop):
655 outgoing = pushop.outgoing
655 outgoing = pushop.outgoing
656 unfi = pushop.repo.unfiltered()
656 unfi = pushop.repo.unfiltered()
657 if not outgoing.missing:
657 if not outgoing.missing:
658 # nothing to push
658 # nothing to push
659 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
659 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
660 return False
660 return False
661 # something to push
661 # something to push
662 if not pushop.force:
662 if not pushop.force:
663 # if repo.obsstore == False --> no obsolete
663 # if repo.obsstore == False --> no obsolete
664 # then, save the iteration
664 # then, save the iteration
665 if unfi.obsstore:
665 if unfi.obsstore:
666 # this message are here for 80 char limit reason
666 # this message are here for 80 char limit reason
667 mso = _("push includes obsolete changeset: %s!")
667 mso = _("push includes obsolete changeset: %s!")
668 mspd = _("push includes phase-divergent changeset: %s!")
668 mspd = _("push includes phase-divergent changeset: %s!")
669 mscd = _("push includes content-divergent changeset: %s!")
669 mscd = _("push includes content-divergent changeset: %s!")
670 mst = {"orphan": _("push includes orphan changeset: %s!"),
670 mst = {"orphan": _("push includes orphan changeset: %s!"),
671 "phase-divergent": mspd,
671 "phase-divergent": mspd,
672 "content-divergent": mscd}
672 "content-divergent": mscd}
673 # If we are to push if there is at least one
673 # If we are to push if there is at least one
674 # obsolete or unstable changeset in missing, at
674 # obsolete or unstable changeset in missing, at
675 # least one of the missinghead will be obsolete or
675 # least one of the missinghead will be obsolete or
676 # unstable. So checking heads only is ok
676 # unstable. So checking heads only is ok
677 for node in outgoing.missingheads:
677 for node in outgoing.missingheads:
678 ctx = unfi[node]
678 ctx = unfi[node]
679 if ctx.obsolete():
679 if ctx.obsolete():
680 raise error.Abort(mso % ctx)
680 raise error.Abort(mso % ctx)
681 elif ctx.isunstable():
681 elif ctx.isunstable():
682 # TODO print more than one instability in the abort
682 # TODO print more than one instability in the abort
683 # message
683 # message
684 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
684 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
685
685
686 discovery.checkheads(pushop)
686 discovery.checkheads(pushop)
687 return True
687 return True
688
688
689 # List of names of steps to perform for an outgoing bundle2, order matters.
689 # List of names of steps to perform for an outgoing bundle2, order matters.
690 b2partsgenorder = []
690 b2partsgenorder = []
691
691
692 # Mapping between step name and function
692 # Mapping between step name and function
693 #
693 #
694 # This exists to help extensions wrap steps if necessary
694 # This exists to help extensions wrap steps if necessary
695 b2partsgenmapping = {}
695 b2partsgenmapping = {}
696
696
697 def b2partsgenerator(stepname, idx=None):
697 def b2partsgenerator(stepname, idx=None):
698 """decorator for function generating bundle2 part
698 """decorator for function generating bundle2 part
699
699
700 The function is added to the step -> function mapping and appended to the
700 The function is added to the step -> function mapping and appended to the
701 list of steps. Beware that decorated functions will be added in order
701 list of steps. Beware that decorated functions will be added in order
702 (this may matter).
702 (this may matter).
703
703
704 You can only use this decorator for new steps, if you want to wrap a step
704 You can only use this decorator for new steps, if you want to wrap a step
705 from an extension, attack the b2partsgenmapping dictionary directly."""
705 from an extension, attack the b2partsgenmapping dictionary directly."""
706 def dec(func):
706 def dec(func):
707 assert stepname not in b2partsgenmapping
707 assert stepname not in b2partsgenmapping
708 b2partsgenmapping[stepname] = func
708 b2partsgenmapping[stepname] = func
709 if idx is None:
709 if idx is None:
710 b2partsgenorder.append(stepname)
710 b2partsgenorder.append(stepname)
711 else:
711 else:
712 b2partsgenorder.insert(idx, stepname)
712 b2partsgenorder.insert(idx, stepname)
713 return func
713 return func
714 return dec
714 return dec
715
715
716 def _pushb2ctxcheckheads(pushop, bundler):
716 def _pushb2ctxcheckheads(pushop, bundler):
717 """Generate race condition checking parts
717 """Generate race condition checking parts
718
718
719 Exists as an independent function to aid extensions
719 Exists as an independent function to aid extensions
720 """
720 """
721 # * 'force' do not check for push race,
721 # * 'force' do not check for push race,
722 # * if we don't push anything, there are nothing to check.
722 # * if we don't push anything, there are nothing to check.
723 if not pushop.force and pushop.outgoing.missingheads:
723 if not pushop.force and pushop.outgoing.missingheads:
724 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
724 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
725 emptyremote = pushop.pushbranchmap is None
725 emptyremote = pushop.pushbranchmap is None
726 if not allowunrelated or emptyremote:
726 if not allowunrelated or emptyremote:
727 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
727 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
728 else:
728 else:
729 affected = set()
729 affected = set()
730 for branch, heads in pushop.pushbranchmap.iteritems():
730 for branch, heads in pushop.pushbranchmap.iteritems():
731 remoteheads, newheads, unsyncedheads, discardedheads = heads
731 remoteheads, newheads, unsyncedheads, discardedheads = heads
732 if remoteheads is not None:
732 if remoteheads is not None:
733 remote = set(remoteheads)
733 remote = set(remoteheads)
734 affected |= set(discardedheads) & remote
734 affected |= set(discardedheads) & remote
735 affected |= remote - set(newheads)
735 affected |= remote - set(newheads)
736 if affected:
736 if affected:
737 data = iter(sorted(affected))
737 data = iter(sorted(affected))
738 bundler.newpart('check:updated-heads', data=data)
738 bundler.newpart('check:updated-heads', data=data)
739
739
740 def _pushing(pushop):
740 def _pushing(pushop):
741 """return True if we are pushing anything"""
741 """return True if we are pushing anything"""
742 return bool(pushop.outgoing.missing
742 return bool(pushop.outgoing.missing
743 or pushop.outdatedphases
743 or pushop.outdatedphases
744 or pushop.outobsmarkers
744 or pushop.outobsmarkers
745 or pushop.outbookmarks)
745 or pushop.outbookmarks)
746
746
747 @b2partsgenerator('check-bookmarks')
747 @b2partsgenerator('check-bookmarks')
748 def _pushb2checkbookmarks(pushop, bundler):
748 def _pushb2checkbookmarks(pushop, bundler):
749 """insert bookmark move checking"""
749 """insert bookmark move checking"""
750 if not _pushing(pushop) or pushop.force:
750 if not _pushing(pushop) or pushop.force:
751 return
751 return
752 b2caps = bundle2.bundle2caps(pushop.remote)
752 b2caps = bundle2.bundle2caps(pushop.remote)
753 hasbookmarkcheck = 'bookmarks' in b2caps
753 hasbookmarkcheck = 'bookmarks' in b2caps
754 if not (pushop.outbookmarks and hasbookmarkcheck):
754 if not (pushop.outbookmarks and hasbookmarkcheck):
755 return
755 return
756 data = []
756 data = []
757 for book, old, new in pushop.outbookmarks:
757 for book, old, new in pushop.outbookmarks:
758 old = bin(old)
758 old = bin(old)
759 data.append((book, old))
759 data.append((book, old))
760 checkdata = bookmod.binaryencode(data)
760 checkdata = bookmod.binaryencode(data)
761 bundler.newpart('check:bookmarks', data=checkdata)
761 bundler.newpart('check:bookmarks', data=checkdata)
762
762
763 @b2partsgenerator('check-phases')
763 @b2partsgenerator('check-phases')
764 def _pushb2checkphases(pushop, bundler):
764 def _pushb2checkphases(pushop, bundler):
765 """insert phase move checking"""
765 """insert phase move checking"""
766 if not _pushing(pushop) or pushop.force:
766 if not _pushing(pushop) or pushop.force:
767 return
767 return
768 b2caps = bundle2.bundle2caps(pushop.remote)
768 b2caps = bundle2.bundle2caps(pushop.remote)
769 hasphaseheads = 'heads' in b2caps.get('phases', ())
769 hasphaseheads = 'heads' in b2caps.get('phases', ())
770 if pushop.remotephases is not None and hasphaseheads:
770 if pushop.remotephases is not None and hasphaseheads:
771 # check that the remote phase has not changed
771 # check that the remote phase has not changed
772 checks = [[] for p in phases.allphases]
772 checks = [[] for p in phases.allphases]
773 checks[phases.public].extend(pushop.remotephases.publicheads)
773 checks[phases.public].extend(pushop.remotephases.publicheads)
774 checks[phases.draft].extend(pushop.remotephases.draftroots)
774 checks[phases.draft].extend(pushop.remotephases.draftroots)
775 if any(checks):
775 if any(checks):
776 for nodes in checks:
776 for nodes in checks:
777 nodes.sort()
777 nodes.sort()
778 checkdata = phases.binaryencode(checks)
778 checkdata = phases.binaryencode(checks)
779 bundler.newpart('check:phases', data=checkdata)
779 bundler.newpart('check:phases', data=checkdata)
780
780
781 @b2partsgenerator('changeset')
781 @b2partsgenerator('changeset')
782 def _pushb2ctx(pushop, bundler):
782 def _pushb2ctx(pushop, bundler):
783 """handle changegroup push through bundle2
783 """handle changegroup push through bundle2
784
784
785 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
785 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
786 """
786 """
787 if 'changesets' in pushop.stepsdone:
787 if 'changesets' in pushop.stepsdone:
788 return
788 return
789 pushop.stepsdone.add('changesets')
789 pushop.stepsdone.add('changesets')
790 # Send known heads to the server for race detection.
790 # Send known heads to the server for race detection.
791 if not _pushcheckoutgoing(pushop):
791 if not _pushcheckoutgoing(pushop):
792 return
792 return
793 pushop.repo.prepushoutgoinghooks(pushop)
793 pushop.repo.prepushoutgoinghooks(pushop)
794
794
795 _pushb2ctxcheckheads(pushop, bundler)
795 _pushb2ctxcheckheads(pushop, bundler)
796
796
797 b2caps = bundle2.bundle2caps(pushop.remote)
797 b2caps = bundle2.bundle2caps(pushop.remote)
798 version = '01'
798 version = '01'
799 cgversions = b2caps.get('changegroup')
799 cgversions = b2caps.get('changegroup')
800 if cgversions: # 3.1 and 3.2 ship with an empty value
800 if cgversions: # 3.1 and 3.2 ship with an empty value
801 cgversions = [v for v in cgversions
801 cgversions = [v for v in cgversions
802 if v in changegroup.supportedoutgoingversions(
802 if v in changegroup.supportedoutgoingversions(
803 pushop.repo)]
803 pushop.repo)]
804 if not cgversions:
804 if not cgversions:
805 raise ValueError(_('no common changegroup version'))
805 raise ValueError(_('no common changegroup version'))
806 version = max(cgversions)
806 version = max(cgversions)
807 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
807 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
808 'push')
808 'push')
809 cgpart = bundler.newpart('changegroup', data=cgstream)
809 cgpart = bundler.newpart('changegroup', data=cgstream)
810 if cgversions:
810 if cgversions:
811 cgpart.addparam('version', version)
811 cgpart.addparam('version', version)
812 if 'treemanifest' in pushop.repo.requirements:
812 if 'treemanifest' in pushop.repo.requirements:
813 cgpart.addparam('treemanifest', '1')
813 cgpart.addparam('treemanifest', '1')
814 def handlereply(op):
814 def handlereply(op):
815 """extract addchangegroup returns from server reply"""
815 """extract addchangegroup returns from server reply"""
816 cgreplies = op.records.getreplies(cgpart.id)
816 cgreplies = op.records.getreplies(cgpart.id)
817 assert len(cgreplies['changegroup']) == 1
817 assert len(cgreplies['changegroup']) == 1
818 pushop.cgresult = cgreplies['changegroup'][0]['return']
818 pushop.cgresult = cgreplies['changegroup'][0]['return']
819 return handlereply
819 return handlereply
820
820
821 @b2partsgenerator('phase')
821 @b2partsgenerator('phase')
822 def _pushb2phases(pushop, bundler):
822 def _pushb2phases(pushop, bundler):
823 """handle phase push through bundle2"""
823 """handle phase push through bundle2"""
824 if 'phases' in pushop.stepsdone:
824 if 'phases' in pushop.stepsdone:
825 return
825 return
826 b2caps = bundle2.bundle2caps(pushop.remote)
826 b2caps = bundle2.bundle2caps(pushop.remote)
827 ui = pushop.repo.ui
827 ui = pushop.repo.ui
828
828
829 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
829 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
830 haspushkey = 'pushkey' in b2caps
830 haspushkey = 'pushkey' in b2caps
831 hasphaseheads = 'heads' in b2caps.get('phases', ())
831 hasphaseheads = 'heads' in b2caps.get('phases', ())
832
832
833 if hasphaseheads and not legacyphase:
833 if hasphaseheads and not legacyphase:
834 return _pushb2phaseheads(pushop, bundler)
834 return _pushb2phaseheads(pushop, bundler)
835 elif haspushkey:
835 elif haspushkey:
836 return _pushb2phasespushkey(pushop, bundler)
836 return _pushb2phasespushkey(pushop, bundler)
837
837
838 def _pushb2phaseheads(pushop, bundler):
838 def _pushb2phaseheads(pushop, bundler):
839 """push phase information through a bundle2 - binary part"""
839 """push phase information through a bundle2 - binary part"""
840 pushop.stepsdone.add('phases')
840 pushop.stepsdone.add('phases')
841 if pushop.outdatedphases:
841 if pushop.outdatedphases:
842 updates = [[] for p in phases.allphases]
842 updates = [[] for p in phases.allphases]
843 updates[0].extend(h.node() for h in pushop.outdatedphases)
843 updates[0].extend(h.node() for h in pushop.outdatedphases)
844 phasedata = phases.binaryencode(updates)
844 phasedata = phases.binaryencode(updates)
845 bundler.newpart('phase-heads', data=phasedata)
845 bundler.newpart('phase-heads', data=phasedata)
846
846
847 def _pushb2phasespushkey(pushop, bundler):
847 def _pushb2phasespushkey(pushop, bundler):
848 """push phase information through a bundle2 - pushkey part"""
848 """push phase information through a bundle2 - pushkey part"""
849 pushop.stepsdone.add('phases')
849 pushop.stepsdone.add('phases')
850 part2node = []
850 part2node = []
851
851
852 def handlefailure(pushop, exc):
852 def handlefailure(pushop, exc):
853 targetid = int(exc.partid)
853 targetid = int(exc.partid)
854 for partid, node in part2node:
854 for partid, node in part2node:
855 if partid == targetid:
855 if partid == targetid:
856 raise error.Abort(_('updating %s to public failed') % node)
856 raise error.Abort(_('updating %s to public failed') % node)
857
857
858 enc = pushkey.encode
858 enc = pushkey.encode
859 for newremotehead in pushop.outdatedphases:
859 for newremotehead in pushop.outdatedphases:
860 part = bundler.newpart('pushkey')
860 part = bundler.newpart('pushkey')
861 part.addparam('namespace', enc('phases'))
861 part.addparam('namespace', enc('phases'))
862 part.addparam('key', enc(newremotehead.hex()))
862 part.addparam('key', enc(newremotehead.hex()))
863 part.addparam('old', enc('%d' % phases.draft))
863 part.addparam('old', enc('%d' % phases.draft))
864 part.addparam('new', enc('%d' % phases.public))
864 part.addparam('new', enc('%d' % phases.public))
865 part2node.append((part.id, newremotehead))
865 part2node.append((part.id, newremotehead))
866 pushop.pkfailcb[part.id] = handlefailure
866 pushop.pkfailcb[part.id] = handlefailure
867
867
868 def handlereply(op):
868 def handlereply(op):
869 for partid, node in part2node:
869 for partid, node in part2node:
870 partrep = op.records.getreplies(partid)
870 partrep = op.records.getreplies(partid)
871 results = partrep['pushkey']
871 results = partrep['pushkey']
872 assert len(results) <= 1
872 assert len(results) <= 1
873 msg = None
873 msg = None
874 if not results:
874 if not results:
875 msg = _('server ignored update of %s to public!\n') % node
875 msg = _('server ignored update of %s to public!\n') % node
876 elif not int(results[0]['return']):
876 elif not int(results[0]['return']):
877 msg = _('updating %s to public failed!\n') % node
877 msg = _('updating %s to public failed!\n') % node
878 if msg is not None:
878 if msg is not None:
879 pushop.ui.warn(msg)
879 pushop.ui.warn(msg)
880 return handlereply
880 return handlereply
881
881
882 @b2partsgenerator('obsmarkers')
882 @b2partsgenerator('obsmarkers')
883 def _pushb2obsmarkers(pushop, bundler):
883 def _pushb2obsmarkers(pushop, bundler):
884 if 'obsmarkers' in pushop.stepsdone:
884 if 'obsmarkers' in pushop.stepsdone:
885 return
885 return
886 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
886 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
887 if obsolete.commonversion(remoteversions) is None:
887 if obsolete.commonversion(remoteversions) is None:
888 return
888 return
889 pushop.stepsdone.add('obsmarkers')
889 pushop.stepsdone.add('obsmarkers')
890 if pushop.outobsmarkers:
890 if pushop.outobsmarkers:
891 markers = sorted(pushop.outobsmarkers)
891 markers = sorted(pushop.outobsmarkers)
892 bundle2.buildobsmarkerspart(bundler, markers)
892 bundle2.buildobsmarkerspart(bundler, markers)
893
893
894 @b2partsgenerator('bookmarks')
894 @b2partsgenerator('bookmarks')
895 def _pushb2bookmarks(pushop, bundler):
895 def _pushb2bookmarks(pushop, bundler):
896 """handle bookmark push through bundle2"""
896 """handle bookmark push through bundle2"""
897 if 'bookmarks' in pushop.stepsdone:
897 if 'bookmarks' in pushop.stepsdone:
898 return
898 return
899 b2caps = bundle2.bundle2caps(pushop.remote)
899 b2caps = bundle2.bundle2caps(pushop.remote)
900
900
901 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
901 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
902 legacybooks = 'bookmarks' in legacy
902 legacybooks = 'bookmarks' in legacy
903
903
904 if not legacybooks and 'bookmarks' in b2caps:
904 if not legacybooks and 'bookmarks' in b2caps:
905 return _pushb2bookmarkspart(pushop, bundler)
905 return _pushb2bookmarkspart(pushop, bundler)
906 elif 'pushkey' in b2caps:
906 elif 'pushkey' in b2caps:
907 return _pushb2bookmarkspushkey(pushop, bundler)
907 return _pushb2bookmarkspushkey(pushop, bundler)
908
908
909 def _bmaction(old, new):
909 def _bmaction(old, new):
910 """small utility for bookmark pushing"""
910 """small utility for bookmark pushing"""
911 if not old:
911 if not old:
912 return 'export'
912 return 'export'
913 elif not new:
913 elif not new:
914 return 'delete'
914 return 'delete'
915 return 'update'
915 return 'update'
916
916
917 def _pushb2bookmarkspart(pushop, bundler):
917 def _pushb2bookmarkspart(pushop, bundler):
918 pushop.stepsdone.add('bookmarks')
918 pushop.stepsdone.add('bookmarks')
919 if not pushop.outbookmarks:
919 if not pushop.outbookmarks:
920 return
920 return
921
921
922 allactions = []
922 allactions = []
923 data = []
923 data = []
924 for book, old, new in pushop.outbookmarks:
924 for book, old, new in pushop.outbookmarks:
925 new = bin(new)
925 new = bin(new)
926 data.append((book, new))
926 data.append((book, new))
927 allactions.append((book, _bmaction(old, new)))
927 allactions.append((book, _bmaction(old, new)))
928 checkdata = bookmod.binaryencode(data)
928 checkdata = bookmod.binaryencode(data)
929 bundler.newpart('bookmarks', data=checkdata)
929 bundler.newpart('bookmarks', data=checkdata)
930
930
931 def handlereply(op):
931 def handlereply(op):
932 ui = pushop.ui
932 ui = pushop.ui
933 # if success
933 # if success
934 for book, action in allactions:
934 for book, action in allactions:
935 ui.status(bookmsgmap[action][0] % book)
935 ui.status(bookmsgmap[action][0] % book)
936
936
937 return handlereply
937 return handlereply
938
938
939 def _pushb2bookmarkspushkey(pushop, bundler):
939 def _pushb2bookmarkspushkey(pushop, bundler):
940 pushop.stepsdone.add('bookmarks')
940 pushop.stepsdone.add('bookmarks')
941 part2book = []
941 part2book = []
942 enc = pushkey.encode
942 enc = pushkey.encode
943
943
944 def handlefailure(pushop, exc):
944 def handlefailure(pushop, exc):
945 targetid = int(exc.partid)
945 targetid = int(exc.partid)
946 for partid, book, action in part2book:
946 for partid, book, action in part2book:
947 if partid == targetid:
947 if partid == targetid:
948 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
948 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
949 # we should not be called for part we did not generated
949 # we should not be called for part we did not generated
950 assert False
950 assert False
951
951
952 for book, old, new in pushop.outbookmarks:
952 for book, old, new in pushop.outbookmarks:
953 part = bundler.newpart('pushkey')
953 part = bundler.newpart('pushkey')
954 part.addparam('namespace', enc('bookmarks'))
954 part.addparam('namespace', enc('bookmarks'))
955 part.addparam('key', enc(book))
955 part.addparam('key', enc(book))
956 part.addparam('old', enc(old))
956 part.addparam('old', enc(old))
957 part.addparam('new', enc(new))
957 part.addparam('new', enc(new))
958 action = 'update'
958 action = 'update'
959 if not old:
959 if not old:
960 action = 'export'
960 action = 'export'
961 elif not new:
961 elif not new:
962 action = 'delete'
962 action = 'delete'
963 part2book.append((part.id, book, action))
963 part2book.append((part.id, book, action))
964 pushop.pkfailcb[part.id] = handlefailure
964 pushop.pkfailcb[part.id] = handlefailure
965
965
966 def handlereply(op):
966 def handlereply(op):
967 ui = pushop.ui
967 ui = pushop.ui
968 for partid, book, action in part2book:
968 for partid, book, action in part2book:
969 partrep = op.records.getreplies(partid)
969 partrep = op.records.getreplies(partid)
970 results = partrep['pushkey']
970 results = partrep['pushkey']
971 assert len(results) <= 1
971 assert len(results) <= 1
972 if not results:
972 if not results:
973 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
973 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
974 else:
974 else:
975 ret = int(results[0]['return'])
975 ret = int(results[0]['return'])
976 if ret:
976 if ret:
977 ui.status(bookmsgmap[action][0] % book)
977 ui.status(bookmsgmap[action][0] % book)
978 else:
978 else:
979 ui.warn(bookmsgmap[action][1] % book)
979 ui.warn(bookmsgmap[action][1] % book)
980 if pushop.bkresult is not None:
980 if pushop.bkresult is not None:
981 pushop.bkresult = 1
981 pushop.bkresult = 1
982 return handlereply
982 return handlereply
983
983
984 @b2partsgenerator('pushvars', idx=0)
984 @b2partsgenerator('pushvars', idx=0)
985 def _getbundlesendvars(pushop, bundler):
985 def _getbundlesendvars(pushop, bundler):
986 '''send shellvars via bundle2'''
986 '''send shellvars via bundle2'''
987 pushvars = pushop.pushvars
987 pushvars = pushop.pushvars
988 if pushvars:
988 if pushvars:
989 shellvars = {}
989 shellvars = {}
990 for raw in pushvars:
990 for raw in pushvars:
991 if '=' not in raw:
991 if '=' not in raw:
992 msg = ("unable to parse variable '%s', should follow "
992 msg = ("unable to parse variable '%s', should follow "
993 "'KEY=VALUE' or 'KEY=' format")
993 "'KEY=VALUE' or 'KEY=' format")
994 raise error.Abort(msg % raw)
994 raise error.Abort(msg % raw)
995 k, v = raw.split('=', 1)
995 k, v = raw.split('=', 1)
996 shellvars[k] = v
996 shellvars[k] = v
997
997
998 part = bundler.newpart('pushvars')
998 part = bundler.newpart('pushvars')
999
999
1000 for key, value in shellvars.iteritems():
1000 for key, value in shellvars.iteritems():
1001 part.addparam(key, value, mandatory=False)
1001 part.addparam(key, value, mandatory=False)
1002
1002
1003 def _pushbundle2(pushop):
1003 def _pushbundle2(pushop):
1004 """push data to the remote using bundle2
1004 """push data to the remote using bundle2
1005
1005
1006 The only currently supported type of data is changegroup but this will
1006 The only currently supported type of data is changegroup but this will
1007 evolve in the future."""
1007 evolve in the future."""
1008 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1008 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1009 pushback = (pushop.trmanager
1009 pushback = (pushop.trmanager
1010 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1010 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1011
1011
1012 # create reply capability
1012 # create reply capability
1013 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1013 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1014 allowpushback=pushback))
1014 allowpushback=pushback))
1015 bundler.newpart('replycaps', data=capsblob)
1015 bundler.newpart('replycaps', data=capsblob)
1016 replyhandlers = []
1016 replyhandlers = []
1017 for partgenname in b2partsgenorder:
1017 for partgenname in b2partsgenorder:
1018 partgen = b2partsgenmapping[partgenname]
1018 partgen = b2partsgenmapping[partgenname]
1019 ret = partgen(pushop, bundler)
1019 ret = partgen(pushop, bundler)
1020 if callable(ret):
1020 if callable(ret):
1021 replyhandlers.append(ret)
1021 replyhandlers.append(ret)
1022 # do not push if nothing to push
1022 # do not push if nothing to push
1023 if bundler.nbparts <= 1:
1023 if bundler.nbparts <= 1:
1024 return
1024 return
1025 stream = util.chunkbuffer(bundler.getchunks())
1025 stream = util.chunkbuffer(bundler.getchunks())
1026 try:
1026 try:
1027 try:
1027 try:
1028 reply = pushop.remote.unbundle(
1028 reply = pushop.remote.unbundle(
1029 stream, ['force'], pushop.remote.url())
1029 stream, ['force'], pushop.remote.url())
1030 except error.BundleValueError as exc:
1030 except error.BundleValueError as exc:
1031 raise error.Abort(_('missing support for %s') % exc)
1031 raise error.Abort(_('missing support for %s') % exc)
1032 try:
1032 try:
1033 trgetter = None
1033 trgetter = None
1034 if pushback:
1034 if pushback:
1035 trgetter = pushop.trmanager.transaction
1035 trgetter = pushop.trmanager.transaction
1036 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1036 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1037 except error.BundleValueError as exc:
1037 except error.BundleValueError as exc:
1038 raise error.Abort(_('missing support for %s') % exc)
1038 raise error.Abort(_('missing support for %s') % exc)
1039 except bundle2.AbortFromPart as exc:
1039 except bundle2.AbortFromPart as exc:
1040 pushop.ui.status(_('remote: %s\n') % exc)
1040 pushop.ui.status(_('remote: %s\n') % exc)
1041 if exc.hint is not None:
1041 if exc.hint is not None:
1042 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1042 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1043 raise error.Abort(_('push failed on remote'))
1043 raise error.Abort(_('push failed on remote'))
1044 except error.PushkeyFailed as exc:
1044 except error.PushkeyFailed as exc:
1045 partid = int(exc.partid)
1045 partid = int(exc.partid)
1046 if partid not in pushop.pkfailcb:
1046 if partid not in pushop.pkfailcb:
1047 raise
1047 raise
1048 pushop.pkfailcb[partid](pushop, exc)
1048 pushop.pkfailcb[partid](pushop, exc)
1049 for rephand in replyhandlers:
1049 for rephand in replyhandlers:
1050 rephand(op)
1050 rephand(op)
1051
1051
1052 def _pushchangeset(pushop):
1052 def _pushchangeset(pushop):
1053 """Make the actual push of changeset bundle to remote repo"""
1053 """Make the actual push of changeset bundle to remote repo"""
1054 if 'changesets' in pushop.stepsdone:
1054 if 'changesets' in pushop.stepsdone:
1055 return
1055 return
1056 pushop.stepsdone.add('changesets')
1056 pushop.stepsdone.add('changesets')
1057 if not _pushcheckoutgoing(pushop):
1057 if not _pushcheckoutgoing(pushop):
1058 return
1058 return
1059
1059
1060 # Should have verified this in push().
1060 # Should have verified this in push().
1061 assert pushop.remote.capable('unbundle')
1061 assert pushop.remote.capable('unbundle')
1062
1062
1063 pushop.repo.prepushoutgoinghooks(pushop)
1063 pushop.repo.prepushoutgoinghooks(pushop)
1064 outgoing = pushop.outgoing
1064 outgoing = pushop.outgoing
1065 # TODO: get bundlecaps from remote
1065 # TODO: get bundlecaps from remote
1066 bundlecaps = None
1066 bundlecaps = None
1067 # create a changegroup from local
1067 # create a changegroup from local
1068 if pushop.revs is None and not (outgoing.excluded
1068 if pushop.revs is None and not (outgoing.excluded
1069 or pushop.repo.changelog.filteredrevs):
1069 or pushop.repo.changelog.filteredrevs):
1070 # push everything,
1070 # push everything,
1071 # use the fast path, no race possible on push
1071 # use the fast path, no race possible on push
1072 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1072 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1073 fastpath=True, bundlecaps=bundlecaps)
1073 fastpath=True, bundlecaps=bundlecaps)
1074 else:
1074 else:
1075 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1075 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1076 'push', bundlecaps=bundlecaps)
1076 'push', bundlecaps=bundlecaps)
1077
1077
1078 # apply changegroup to remote
1078 # apply changegroup to remote
1079 # local repo finds heads on server, finds out what
1079 # local repo finds heads on server, finds out what
1080 # revs it must push. once revs transferred, if server
1080 # revs it must push. once revs transferred, if server
1081 # finds it has different heads (someone else won
1081 # finds it has different heads (someone else won
1082 # commit/push race), server aborts.
1082 # commit/push race), server aborts.
1083 if pushop.force:
1083 if pushop.force:
1084 remoteheads = ['force']
1084 remoteheads = ['force']
1085 else:
1085 else:
1086 remoteheads = pushop.remoteheads
1086 remoteheads = pushop.remoteheads
1087 # ssh: return remote's addchangegroup()
1087 # ssh: return remote's addchangegroup()
1088 # http: return remote's addchangegroup() or 0 for error
1088 # http: return remote's addchangegroup() or 0 for error
1089 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1089 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1090 pushop.repo.url())
1090 pushop.repo.url())
1091
1091
1092 def _pushsyncphase(pushop):
1092 def _pushsyncphase(pushop):
1093 """synchronise phase information locally and remotely"""
1093 """synchronise phase information locally and remotely"""
1094 cheads = pushop.commonheads
1094 cheads = pushop.commonheads
1095 # even when we don't push, exchanging phase data is useful
1095 # even when we don't push, exchanging phase data is useful
1096 remotephases = pushop.remote.listkeys('phases')
1096 remotephases = pushop.remote.listkeys('phases')
1097 if (pushop.ui.configbool('ui', '_usedassubrepo')
1097 if (pushop.ui.configbool('ui', '_usedassubrepo')
1098 and remotephases # server supports phases
1098 and remotephases # server supports phases
1099 and pushop.cgresult is None # nothing was pushed
1099 and pushop.cgresult is None # nothing was pushed
1100 and remotephases.get('publishing', False)):
1100 and remotephases.get('publishing', False)):
1101 # When:
1101 # When:
1102 # - this is a subrepo push
1102 # - this is a subrepo push
1103 # - and remote support phase
1103 # - and remote support phase
1104 # - and no changeset was pushed
1104 # - and no changeset was pushed
1105 # - and remote is publishing
1105 # - and remote is publishing
1106 # We may be in issue 3871 case!
1106 # We may be in issue 3871 case!
1107 # We drop the possible phase synchronisation done by
1107 # We drop the possible phase synchronisation done by
1108 # courtesy to publish changesets possibly locally draft
1108 # courtesy to publish changesets possibly locally draft
1109 # on the remote.
1109 # on the remote.
1110 remotephases = {'publishing': 'True'}
1110 remotephases = {'publishing': 'True'}
1111 if not remotephases: # old server or public only reply from non-publishing
1111 if not remotephases: # old server or public only reply from non-publishing
1112 _localphasemove(pushop, cheads)
1112 _localphasemove(pushop, cheads)
1113 # don't push any phase data as there is nothing to push
1113 # don't push any phase data as there is nothing to push
1114 else:
1114 else:
1115 ana = phases.analyzeremotephases(pushop.repo, cheads,
1115 ana = phases.analyzeremotephases(pushop.repo, cheads,
1116 remotephases)
1116 remotephases)
1117 pheads, droots = ana
1117 pheads, droots = ana
1118 ### Apply remote phase on local
1118 ### Apply remote phase on local
1119 if remotephases.get('publishing', False):
1119 if remotephases.get('publishing', False):
1120 _localphasemove(pushop, cheads)
1120 _localphasemove(pushop, cheads)
1121 else: # publish = False
1121 else: # publish = False
1122 _localphasemove(pushop, pheads)
1122 _localphasemove(pushop, pheads)
1123 _localphasemove(pushop, cheads, phases.draft)
1123 _localphasemove(pushop, cheads, phases.draft)
1124 ### Apply local phase on remote
1124 ### Apply local phase on remote
1125
1125
1126 if pushop.cgresult:
1126 if pushop.cgresult:
1127 if 'phases' in pushop.stepsdone:
1127 if 'phases' in pushop.stepsdone:
1128 # phases already pushed though bundle2
1128 # phases already pushed though bundle2
1129 return
1129 return
1130 outdated = pushop.outdatedphases
1130 outdated = pushop.outdatedphases
1131 else:
1131 else:
1132 outdated = pushop.fallbackoutdatedphases
1132 outdated = pushop.fallbackoutdatedphases
1133
1133
1134 pushop.stepsdone.add('phases')
1134 pushop.stepsdone.add('phases')
1135
1135
1136 # filter heads already turned public by the push
1136 # filter heads already turned public by the push
1137 outdated = [c for c in outdated if c.node() not in pheads]
1137 outdated = [c for c in outdated if c.node() not in pheads]
1138 # fallback to independent pushkey command
1138 # fallback to independent pushkey command
1139 for newremotehead in outdated:
1139 for newremotehead in outdated:
1140 r = pushop.remote.pushkey('phases',
1140 r = pushop.remote.pushkey('phases',
1141 newremotehead.hex(),
1141 newremotehead.hex(),
1142 str(phases.draft),
1142 str(phases.draft),
1143 str(phases.public))
1143 str(phases.public))
1144 if not r:
1144 if not r:
1145 pushop.ui.warn(_('updating %s to public failed!\n')
1145 pushop.ui.warn(_('updating %s to public failed!\n')
1146 % newremotehead)
1146 % newremotehead)
1147
1147
1148 def _localphasemove(pushop, nodes, phase=phases.public):
1148 def _localphasemove(pushop, nodes, phase=phases.public):
1149 """move <nodes> to <phase> in the local source repo"""
1149 """move <nodes> to <phase> in the local source repo"""
1150 if pushop.trmanager:
1150 if pushop.trmanager:
1151 phases.advanceboundary(pushop.repo,
1151 phases.advanceboundary(pushop.repo,
1152 pushop.trmanager.transaction(),
1152 pushop.trmanager.transaction(),
1153 phase,
1153 phase,
1154 nodes)
1154 nodes)
1155 else:
1155 else:
1156 # repo is not locked, do not change any phases!
1156 # repo is not locked, do not change any phases!
1157 # Informs the user that phases should have been moved when
1157 # Informs the user that phases should have been moved when
1158 # applicable.
1158 # applicable.
1159 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1159 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1160 phasestr = phases.phasenames[phase]
1160 phasestr = phases.phasenames[phase]
1161 if actualmoves:
1161 if actualmoves:
1162 pushop.ui.status(_('cannot lock source repo, skipping '
1162 pushop.ui.status(_('cannot lock source repo, skipping '
1163 'local %s phase update\n') % phasestr)
1163 'local %s phase update\n') % phasestr)
1164
1164
1165 def _pushobsolete(pushop):
1165 def _pushobsolete(pushop):
1166 """utility function to push obsolete markers to a remote"""
1166 """utility function to push obsolete markers to a remote"""
1167 if 'obsmarkers' in pushop.stepsdone:
1167 if 'obsmarkers' in pushop.stepsdone:
1168 return
1168 return
1169 repo = pushop.repo
1169 repo = pushop.repo
1170 remote = pushop.remote
1170 remote = pushop.remote
1171 pushop.stepsdone.add('obsmarkers')
1171 pushop.stepsdone.add('obsmarkers')
1172 if pushop.outobsmarkers:
1172 if pushop.outobsmarkers:
1173 pushop.ui.debug('try to push obsolete markers to remote\n')
1173 pushop.ui.debug('try to push obsolete markers to remote\n')
1174 rslts = []
1174 rslts = []
1175 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1175 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1176 for key in sorted(remotedata, reverse=True):
1176 for key in sorted(remotedata, reverse=True):
1177 # reverse sort to ensure we end with dump0
1177 # reverse sort to ensure we end with dump0
1178 data = remotedata[key]
1178 data = remotedata[key]
1179 rslts.append(remote.pushkey('obsolete', key, '', data))
1179 rslts.append(remote.pushkey('obsolete', key, '', data))
1180 if [r for r in rslts if not r]:
1180 if [r for r in rslts if not r]:
1181 msg = _('failed to push some obsolete markers!\n')
1181 msg = _('failed to push some obsolete markers!\n')
1182 repo.ui.warn(msg)
1182 repo.ui.warn(msg)
1183
1183
1184 def _pushbookmark(pushop):
1184 def _pushbookmark(pushop):
1185 """Update bookmark position on remote"""
1185 """Update bookmark position on remote"""
1186 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1186 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1187 return
1187 return
1188 pushop.stepsdone.add('bookmarks')
1188 pushop.stepsdone.add('bookmarks')
1189 ui = pushop.ui
1189 ui = pushop.ui
1190 remote = pushop.remote
1190 remote = pushop.remote
1191
1191
1192 for b, old, new in pushop.outbookmarks:
1192 for b, old, new in pushop.outbookmarks:
1193 action = 'update'
1193 action = 'update'
1194 if not old:
1194 if not old:
1195 action = 'export'
1195 action = 'export'
1196 elif not new:
1196 elif not new:
1197 action = 'delete'
1197 action = 'delete'
1198 if remote.pushkey('bookmarks', b, old, new):
1198 if remote.pushkey('bookmarks', b, old, new):
1199 ui.status(bookmsgmap[action][0] % b)
1199 ui.status(bookmsgmap[action][0] % b)
1200 else:
1200 else:
1201 ui.warn(bookmsgmap[action][1] % b)
1201 ui.warn(bookmsgmap[action][1] % b)
1202 # discovery can have set the value form invalid entry
1202 # discovery can have set the value form invalid entry
1203 if pushop.bkresult is not None:
1203 if pushop.bkresult is not None:
1204 pushop.bkresult = 1
1204 pushop.bkresult = 1
1205
1205
1206 class pulloperation(object):
1206 class pulloperation(object):
1207 """A object that represent a single pull operation
1207 """A object that represent a single pull operation
1208
1208
1209 It purpose is to carry pull related state and very common operation.
1209 It purpose is to carry pull related state and very common operation.
1210
1210
1211 A new should be created at the beginning of each pull and discarded
1211 A new should be created at the beginning of each pull and discarded
1212 afterward.
1212 afterward.
1213 """
1213 """
1214
1214
1215 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1215 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1216 remotebookmarks=None, streamclonerequested=None):
1216 remotebookmarks=None, streamclonerequested=None):
1217 # repo we pull into
1217 # repo we pull into
1218 self.repo = repo
1218 self.repo = repo
1219 # repo we pull from
1219 # repo we pull from
1220 self.remote = remote
1220 self.remote = remote
1221 # revision we try to pull (None is "all")
1221 # revision we try to pull (None is "all")
1222 self.heads = heads
1222 self.heads = heads
1223 # bookmark pulled explicitly
1223 # bookmark pulled explicitly
1224 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1224 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1225 for bookmark in bookmarks]
1225 for bookmark in bookmarks]
1226 # do we force pull?
1226 # do we force pull?
1227 self.force = force
1227 self.force = force
1228 # whether a streaming clone was requested
1228 # whether a streaming clone was requested
1229 self.streamclonerequested = streamclonerequested
1229 self.streamclonerequested = streamclonerequested
1230 # transaction manager
1230 # transaction manager
1231 self.trmanager = None
1231 self.trmanager = None
1232 # set of common changeset between local and remote before pull
1232 # set of common changeset between local and remote before pull
1233 self.common = None
1233 self.common = None
1234 # set of pulled head
1234 # set of pulled head
1235 self.rheads = None
1235 self.rheads = None
1236 # list of missing changeset to fetch remotely
1236 # list of missing changeset to fetch remotely
1237 self.fetch = None
1237 self.fetch = None
1238 # remote bookmarks data
1238 # remote bookmarks data
1239 self.remotebookmarks = remotebookmarks
1239 self.remotebookmarks = remotebookmarks
1240 # result of changegroup pulling (used as return code by pull)
1240 # result of changegroup pulling (used as return code by pull)
1241 self.cgresult = None
1241 self.cgresult = None
1242 # list of step already done
1242 # list of step already done
1243 self.stepsdone = set()
1243 self.stepsdone = set()
1244 # Whether we attempted a clone from pre-generated bundles.
1244 # Whether we attempted a clone from pre-generated bundles.
1245 self.clonebundleattempted = False
1245 self.clonebundleattempted = False
1246
1246
1247 @util.propertycache
1247 @util.propertycache
1248 def pulledsubset(self):
1248 def pulledsubset(self):
1249 """heads of the set of changeset target by the pull"""
1249 """heads of the set of changeset target by the pull"""
1250 # compute target subset
1250 # compute target subset
1251 if self.heads is None:
1251 if self.heads is None:
1252 # We pulled every thing possible
1252 # We pulled every thing possible
1253 # sync on everything common
1253 # sync on everything common
1254 c = set(self.common)
1254 c = set(self.common)
1255 ret = list(self.common)
1255 ret = list(self.common)
1256 for n in self.rheads:
1256 for n in self.rheads:
1257 if n not in c:
1257 if n not in c:
1258 ret.append(n)
1258 ret.append(n)
1259 return ret
1259 return ret
1260 else:
1260 else:
1261 # We pulled a specific subset
1261 # We pulled a specific subset
1262 # sync on this subset
1262 # sync on this subset
1263 return self.heads
1263 return self.heads
1264
1264
1265 @util.propertycache
1265 @util.propertycache
1266 def canusebundle2(self):
1266 def canusebundle2(self):
1267 return not _forcebundle1(self)
1267 return not _forcebundle1(self)
1268
1268
1269 @util.propertycache
1269 @util.propertycache
1270 def remotebundle2caps(self):
1270 def remotebundle2caps(self):
1271 return bundle2.bundle2caps(self.remote)
1271 return bundle2.bundle2caps(self.remote)
1272
1272
1273 def gettransaction(self):
1273 def gettransaction(self):
1274 # deprecated; talk to trmanager directly
1274 # deprecated; talk to trmanager directly
1275 return self.trmanager.transaction()
1275 return self.trmanager.transaction()
1276
1276
1277 class transactionmanager(util.transactional):
1277 class transactionmanager(util.transactional):
1278 """An object to manage the life cycle of a transaction
1278 """An object to manage the life cycle of a transaction
1279
1279
1280 It creates the transaction on demand and calls the appropriate hooks when
1280 It creates the transaction on demand and calls the appropriate hooks when
1281 closing the transaction."""
1281 closing the transaction."""
1282 def __init__(self, repo, source, url):
1282 def __init__(self, repo, source, url):
1283 self.repo = repo
1283 self.repo = repo
1284 self.source = source
1284 self.source = source
1285 self.url = url
1285 self.url = url
1286 self._tr = None
1286 self._tr = None
1287
1287
1288 def transaction(self):
1288 def transaction(self):
1289 """Return an open transaction object, constructing if necessary"""
1289 """Return an open transaction object, constructing if necessary"""
1290 if not self._tr:
1290 if not self._tr:
1291 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1291 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1292 self._tr = self.repo.transaction(trname)
1292 self._tr = self.repo.transaction(trname)
1293 self._tr.hookargs['source'] = self.source
1293 self._tr.hookargs['source'] = self.source
1294 self._tr.hookargs['url'] = self.url
1294 self._tr.hookargs['url'] = self.url
1295 return self._tr
1295 return self._tr
1296
1296
1297 def close(self):
1297 def close(self):
1298 """close transaction if created"""
1298 """close transaction if created"""
1299 if self._tr is not None:
1299 if self._tr is not None:
1300 self._tr.close()
1300 self._tr.close()
1301
1301
1302 def release(self):
1302 def release(self):
1303 """release transaction if created"""
1303 """release transaction if created"""
1304 if self._tr is not None:
1304 if self._tr is not None:
1305 self._tr.release()
1305 self._tr.release()
1306
1306
1307 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1307 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1308 streamclonerequested=None):
1308 streamclonerequested=None):
1309 """Fetch repository data from a remote.
1309 """Fetch repository data from a remote.
1310
1310
1311 This is the main function used to retrieve data from a remote repository.
1311 This is the main function used to retrieve data from a remote repository.
1312
1312
1313 ``repo`` is the local repository to clone into.
1313 ``repo`` is the local repository to clone into.
1314 ``remote`` is a peer instance.
1314 ``remote`` is a peer instance.
1315 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1315 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1316 default) means to pull everything from the remote.
1316 default) means to pull everything from the remote.
1317 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1317 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1318 default, all remote bookmarks are pulled.
1318 default, all remote bookmarks are pulled.
1319 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1319 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1320 initialization.
1320 initialization.
1321 ``streamclonerequested`` is a boolean indicating whether a "streaming
1321 ``streamclonerequested`` is a boolean indicating whether a "streaming
1322 clone" is requested. A "streaming clone" is essentially a raw file copy
1322 clone" is requested. A "streaming clone" is essentially a raw file copy
1323 of revlogs from the server. This only works when the local repository is
1323 of revlogs from the server. This only works when the local repository is
1324 empty. The default value of ``None`` means to respect the server
1324 empty. The default value of ``None`` means to respect the server
1325 configuration for preferring stream clones.
1325 configuration for preferring stream clones.
1326
1326
1327 Returns the ``pulloperation`` created for this pull.
1327 Returns the ``pulloperation`` created for this pull.
1328 """
1328 """
1329 if opargs is None:
1329 if opargs is None:
1330 opargs = {}
1330 opargs = {}
1331 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1331 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1332 streamclonerequested=streamclonerequested, **opargs)
1332 streamclonerequested=streamclonerequested, **opargs)
1333
1333
1334 peerlocal = pullop.remote.local()
1334 peerlocal = pullop.remote.local()
1335 if peerlocal:
1335 if peerlocal:
1336 missing = set(peerlocal.requirements) - pullop.repo.supported
1336 missing = set(peerlocal.requirements) - pullop.repo.supported
1337 if missing:
1337 if missing:
1338 msg = _("required features are not"
1338 msg = _("required features are not"
1339 " supported in the destination:"
1339 " supported in the destination:"
1340 " %s") % (', '.join(sorted(missing)))
1340 " %s") % (', '.join(sorted(missing)))
1341 raise error.Abort(msg)
1341 raise error.Abort(msg)
1342
1342
1343 wlock = lock = None
1343 wlock = lock = None
1344 try:
1344 try:
1345 wlock = pullop.repo.wlock()
1345 wlock = pullop.repo.wlock()
1346 lock = pullop.repo.lock()
1346 lock = pullop.repo.lock()
1347 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1347 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1348 # This should ideally be in _pullbundle2(). However, it needs to run
1348 # This should ideally be in _pullbundle2(). However, it needs to run
1349 # before discovery to avoid extra work.
1349 # before discovery to avoid extra work.
1350 _maybeapplyclonebundle(pullop)
1350 _maybeapplyclonebundle(pullop)
1351 streamclone.maybeperformlegacystreamclone(pullop)
1351 streamclone.maybeperformlegacystreamclone(pullop)
1352 _pulldiscovery(pullop)
1352 _pulldiscovery(pullop)
1353 if pullop.canusebundle2:
1353 if pullop.canusebundle2:
1354 _pullbundle2(pullop)
1354 _pullbundle2(pullop)
1355 _pullchangeset(pullop)
1355 _pullchangeset(pullop)
1356 _pullphase(pullop)
1356 _pullphase(pullop)
1357 _pullbookmarks(pullop)
1357 _pullbookmarks(pullop)
1358 _pullobsolete(pullop)
1358 _pullobsolete(pullop)
1359 pullop.trmanager.close()
1359 pullop.trmanager.close()
1360 finally:
1360 finally:
1361 lockmod.release(pullop.trmanager, lock, wlock)
1361 lockmod.release(pullop.trmanager, lock, wlock)
1362
1362
1363 # storing remotenames
1363 # storing remotenames
1364 if repo.ui.configbool('experimental', 'remotenames'):
1364 if repo.ui.configbool('experimental', 'remotenames'):
1365 remotenames.pullremotenames(repo, remote)
1365 remotenames.pullremotenames(repo, remote)
1366
1366
1367 return pullop
1367 return pullop
1368
1368
1369 # list of steps to perform discovery before pull
1369 # list of steps to perform discovery before pull
1370 pulldiscoveryorder = []
1370 pulldiscoveryorder = []
1371
1371
1372 # Mapping between step name and function
1372 # Mapping between step name and function
1373 #
1373 #
1374 # This exists to help extensions wrap steps if necessary
1374 # This exists to help extensions wrap steps if necessary
1375 pulldiscoverymapping = {}
1375 pulldiscoverymapping = {}
1376
1376
1377 def pulldiscovery(stepname):
1377 def pulldiscovery(stepname):
1378 """decorator for function performing discovery before pull
1378 """decorator for function performing discovery before pull
1379
1379
1380 The function is added to the step -> function mapping and appended to the
1380 The function is added to the step -> function mapping and appended to the
1381 list of steps. Beware that decorated function will be added in order (this
1381 list of steps. Beware that decorated function will be added in order (this
1382 may matter).
1382 may matter).
1383
1383
1384 You can only use this decorator for a new step, if you want to wrap a step
1384 You can only use this decorator for a new step, if you want to wrap a step
1385 from an extension, change the pulldiscovery dictionary directly."""
1385 from an extension, change the pulldiscovery dictionary directly."""
1386 def dec(func):
1386 def dec(func):
1387 assert stepname not in pulldiscoverymapping
1387 assert stepname not in pulldiscoverymapping
1388 pulldiscoverymapping[stepname] = func
1388 pulldiscoverymapping[stepname] = func
1389 pulldiscoveryorder.append(stepname)
1389 pulldiscoveryorder.append(stepname)
1390 return func
1390 return func
1391 return dec
1391 return dec
1392
1392
1393 def _pulldiscovery(pullop):
1393 def _pulldiscovery(pullop):
1394 """Run all discovery steps"""
1394 """Run all discovery steps"""
1395 for stepname in pulldiscoveryorder:
1395 for stepname in pulldiscoveryorder:
1396 step = pulldiscoverymapping[stepname]
1396 step = pulldiscoverymapping[stepname]
1397 step(pullop)
1397 step(pullop)
1398
1398
1399 @pulldiscovery('b1:bookmarks')
1399 @pulldiscovery('b1:bookmarks')
1400 def _pullbookmarkbundle1(pullop):
1400 def _pullbookmarkbundle1(pullop):
1401 """fetch bookmark data in bundle1 case
1401 """fetch bookmark data in bundle1 case
1402
1402
1403 If not using bundle2, we have to fetch bookmarks before changeset
1403 If not using bundle2, we have to fetch bookmarks before changeset
1404 discovery to reduce the chance and impact of race conditions."""
1404 discovery to reduce the chance and impact of race conditions."""
1405 if pullop.remotebookmarks is not None:
1405 if pullop.remotebookmarks is not None:
1406 return
1406 return
1407 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1407 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1408 # all known bundle2 servers now support listkeys, but lets be nice with
1408 # all known bundle2 servers now support listkeys, but lets be nice with
1409 # new implementation.
1409 # new implementation.
1410 return
1410 return
1411 books = pullop.remote.listkeys('bookmarks')
1411 books = pullop.remote.listkeys('bookmarks')
1412 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1412 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1413
1413
1414
1414
1415 @pulldiscovery('changegroup')
1415 @pulldiscovery('changegroup')
1416 def _pulldiscoverychangegroup(pullop):
1416 def _pulldiscoverychangegroup(pullop):
1417 """discovery phase for the pull
1417 """discovery phase for the pull
1418
1418
1419 Current handle changeset discovery only, will change handle all discovery
1419 Current handle changeset discovery only, will change handle all discovery
1420 at some point."""
1420 at some point."""
1421 tmp = discovery.findcommonincoming(pullop.repo,
1421 tmp = discovery.findcommonincoming(pullop.repo,
1422 pullop.remote,
1422 pullop.remote,
1423 heads=pullop.heads,
1423 heads=pullop.heads,
1424 force=pullop.force)
1424 force=pullop.force)
1425 common, fetch, rheads = tmp
1425 common, fetch, rheads = tmp
1426 nm = pullop.repo.unfiltered().changelog.nodemap
1426 nm = pullop.repo.unfiltered().changelog.nodemap
1427 if fetch and rheads:
1427 if fetch and rheads:
1428 # If a remote heads is filtered locally, put in back in common.
1428 # If a remote heads is filtered locally, put in back in common.
1429 #
1429 #
1430 # This is a hackish solution to catch most of "common but locally
1430 # This is a hackish solution to catch most of "common but locally
1431 # hidden situation". We do not performs discovery on unfiltered
1431 # hidden situation". We do not performs discovery on unfiltered
1432 # repository because it end up doing a pathological amount of round
1432 # repository because it end up doing a pathological amount of round
1433 # trip for w huge amount of changeset we do not care about.
1433 # trip for w huge amount of changeset we do not care about.
1434 #
1434 #
1435 # If a set of such "common but filtered" changeset exist on the server
1435 # If a set of such "common but filtered" changeset exist on the server
1436 # but are not including a remote heads, we'll not be able to detect it,
1436 # but are not including a remote heads, we'll not be able to detect it,
1437 scommon = set(common)
1437 scommon = set(common)
1438 for n in rheads:
1438 for n in rheads:
1439 if n in nm:
1439 if n in nm:
1440 if n not in scommon:
1440 if n not in scommon:
1441 common.append(n)
1441 common.append(n)
1442 if set(rheads).issubset(set(common)):
1442 if set(rheads).issubset(set(common)):
1443 fetch = []
1443 fetch = []
1444 pullop.common = common
1444 pullop.common = common
1445 pullop.fetch = fetch
1445 pullop.fetch = fetch
1446 pullop.rheads = rheads
1446 pullop.rheads = rheads
1447
1447
1448 def _pullbundle2(pullop):
1448 def _pullbundle2(pullop):
1449 """pull data using bundle2
1449 """pull data using bundle2
1450
1450
1451 For now, the only supported data are changegroup."""
1451 For now, the only supported data are changegroup."""
1452 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1452 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1453
1453
1454 # At the moment we don't do stream clones over bundle2. If that is
1454 # At the moment we don't do stream clones over bundle2. If that is
1455 # implemented then here's where the check for that will go.
1455 # implemented then here's where the check for that will go.
1456 streaming = False
1456 streaming = False
1457
1457
1458 # pulling changegroup
1458 # pulling changegroup
1459 pullop.stepsdone.add('changegroup')
1459 pullop.stepsdone.add('changegroup')
1460
1460
1461 kwargs['common'] = pullop.common
1461 kwargs['common'] = pullop.common
1462 kwargs['heads'] = pullop.heads or pullop.rheads
1462 kwargs['heads'] = pullop.heads or pullop.rheads
1463 kwargs['cg'] = pullop.fetch
1463 kwargs['cg'] = pullop.fetch
1464
1464
1465 ui = pullop.repo.ui
1465 ui = pullop.repo.ui
1466 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1466 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1467 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1467 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1468 if (not legacyphase and hasbinaryphase):
1468 if (not legacyphase and hasbinaryphase):
1469 kwargs['phases'] = True
1469 kwargs['phases'] = True
1470 pullop.stepsdone.add('phases')
1470 pullop.stepsdone.add('phases')
1471
1471
1472 if 'listkeys' in pullop.remotebundle2caps:
1472 if 'listkeys' in pullop.remotebundle2caps:
1473 if 'phases' not in pullop.stepsdone:
1473 if 'phases' not in pullop.stepsdone:
1474 kwargs['listkeys'] = ['phases']
1474 kwargs['listkeys'] = ['phases']
1475 if pullop.remotebookmarks is None:
1475 if pullop.remotebookmarks is None:
1476 # make sure to always includes bookmark data when migrating
1476 # make sure to always includes bookmark data when migrating
1477 # `hg incoming --bundle` to using this function.
1477 # `hg incoming --bundle` to using this function.
1478 kwargs.setdefault('listkeys', []).append('bookmarks')
1478 kwargs.setdefault('listkeys', []).append('bookmarks')
1479
1479
1480 # If this is a full pull / clone and the server supports the clone bundles
1480 # If this is a full pull / clone and the server supports the clone bundles
1481 # feature, tell the server whether we attempted a clone bundle. The
1481 # feature, tell the server whether we attempted a clone bundle. The
1482 # presence of this flag indicates the client supports clone bundles. This
1482 # presence of this flag indicates the client supports clone bundles. This
1483 # will enable the server to treat clients that support clone bundles
1483 # will enable the server to treat clients that support clone bundles
1484 # differently from those that don't.
1484 # differently from those that don't.
1485 if (pullop.remote.capable('clonebundles')
1485 if (pullop.remote.capable('clonebundles')
1486 and pullop.heads is None and list(pullop.common) == [nullid]):
1486 and pullop.heads is None and list(pullop.common) == [nullid]):
1487 kwargs['cbattempted'] = pullop.clonebundleattempted
1487 kwargs['cbattempted'] = pullop.clonebundleattempted
1488
1488
1489 if streaming:
1489 if streaming:
1490 pullop.repo.ui.status(_('streaming all changes\n'))
1490 pullop.repo.ui.status(_('streaming all changes\n'))
1491 elif not pullop.fetch:
1491 elif not pullop.fetch:
1492 pullop.repo.ui.status(_("no changes found\n"))
1492 pullop.repo.ui.status(_("no changes found\n"))
1493 pullop.cgresult = 0
1493 pullop.cgresult = 0
1494 else:
1494 else:
1495 if pullop.heads is None and list(pullop.common) == [nullid]:
1495 if pullop.heads is None and list(pullop.common) == [nullid]:
1496 pullop.repo.ui.status(_("requesting all changes\n"))
1496 pullop.repo.ui.status(_("requesting all changes\n"))
1497 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1497 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1498 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1498 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1499 if obsolete.commonversion(remoteversions) is not None:
1499 if obsolete.commonversion(remoteversions) is not None:
1500 kwargs['obsmarkers'] = True
1500 kwargs['obsmarkers'] = True
1501 pullop.stepsdone.add('obsmarkers')
1501 pullop.stepsdone.add('obsmarkers')
1502 _pullbundle2extraprepare(pullop, kwargs)
1502 _pullbundle2extraprepare(pullop, kwargs)
1503 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1503 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1504 try:
1504 try:
1505 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1505 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1506 except bundle2.AbortFromPart as exc:
1506 except bundle2.AbortFromPart as exc:
1507 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1507 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1508 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1508 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1509 except error.BundleValueError as exc:
1509 except error.BundleValueError as exc:
1510 raise error.Abort(_('missing support for %s') % exc)
1510 raise error.Abort(_('missing support for %s') % exc)
1511
1511
1512 if pullop.fetch:
1512 if pullop.fetch:
1513 pullop.cgresult = bundle2.combinechangegroupresults(op)
1513 pullop.cgresult = bundle2.combinechangegroupresults(op)
1514
1514
1515 # processing phases change
1515 # processing phases change
1516 for namespace, value in op.records['listkeys']:
1516 for namespace, value in op.records['listkeys']:
1517 if namespace == 'phases':
1517 if namespace == 'phases':
1518 _pullapplyphases(pullop, value)
1518 _pullapplyphases(pullop, value)
1519
1519
1520 # processing bookmark update
1520 # processing bookmark update
1521 for namespace, value in op.records['listkeys']:
1521 for namespace, value in op.records['listkeys']:
1522 if namespace == 'bookmarks':
1522 if namespace == 'bookmarks':
1523 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1523 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1524
1524
1525 # bookmark data were either already there or pulled in the bundle
1525 # bookmark data were either already there or pulled in the bundle
1526 if pullop.remotebookmarks is not None:
1526 if pullop.remotebookmarks is not None:
1527 _pullbookmarks(pullop)
1527 _pullbookmarks(pullop)
1528
1528
1529 def _pullbundle2extraprepare(pullop, kwargs):
1529 def _pullbundle2extraprepare(pullop, kwargs):
1530 """hook function so that extensions can extend the getbundle call"""
1530 """hook function so that extensions can extend the getbundle call"""
1531
1531
1532 def _pullchangeset(pullop):
1532 def _pullchangeset(pullop):
1533 """pull changeset from unbundle into the local repo"""
1533 """pull changeset from unbundle into the local repo"""
1534 # We delay the open of the transaction as late as possible so we
1534 # We delay the open of the transaction as late as possible so we
1535 # don't open transaction for nothing or you break future useful
1535 # don't open transaction for nothing or you break future useful
1536 # rollback call
1536 # rollback call
1537 if 'changegroup' in pullop.stepsdone:
1537 if 'changegroup' in pullop.stepsdone:
1538 return
1538 return
1539 pullop.stepsdone.add('changegroup')
1539 pullop.stepsdone.add('changegroup')
1540 if not pullop.fetch:
1540 if not pullop.fetch:
1541 pullop.repo.ui.status(_("no changes found\n"))
1541 pullop.repo.ui.status(_("no changes found\n"))
1542 pullop.cgresult = 0
1542 pullop.cgresult = 0
1543 return
1543 return
1544 tr = pullop.gettransaction()
1544 tr = pullop.gettransaction()
1545 if pullop.heads is None and list(pullop.common) == [nullid]:
1545 if pullop.heads is None and list(pullop.common) == [nullid]:
1546 pullop.repo.ui.status(_("requesting all changes\n"))
1546 pullop.repo.ui.status(_("requesting all changes\n"))
1547 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1547 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1548 # issue1320, avoid a race if remote changed after discovery
1548 # issue1320, avoid a race if remote changed after discovery
1549 pullop.heads = pullop.rheads
1549 pullop.heads = pullop.rheads
1550
1550
1551 if pullop.remote.capable('getbundle'):
1551 if pullop.remote.capable('getbundle'):
1552 # TODO: get bundlecaps from remote
1552 # TODO: get bundlecaps from remote
1553 cg = pullop.remote.getbundle('pull', common=pullop.common,
1553 cg = pullop.remote.getbundle('pull', common=pullop.common,
1554 heads=pullop.heads or pullop.rheads)
1554 heads=pullop.heads or pullop.rheads)
1555 elif pullop.heads is None:
1555 elif pullop.heads is None:
1556 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1556 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1557 elif not pullop.remote.capable('changegroupsubset'):
1557 elif not pullop.remote.capable('changegroupsubset'):
1558 raise error.Abort(_("partial pull cannot be done because "
1558 raise error.Abort(_("partial pull cannot be done because "
1559 "other repository doesn't support "
1559 "other repository doesn't support "
1560 "changegroupsubset."))
1560 "changegroupsubset."))
1561 else:
1561 else:
1562 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1562 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1563 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1563 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1564 pullop.remote.url())
1564 pullop.remote.url())
1565 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1565 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1566
1566
1567 def _pullphase(pullop):
1567 def _pullphase(pullop):
1568 # Get remote phases data from remote
1568 # Get remote phases data from remote
1569 if 'phases' in pullop.stepsdone:
1569 if 'phases' in pullop.stepsdone:
1570 return
1570 return
1571 remotephases = pullop.remote.listkeys('phases')
1571 remotephases = pullop.remote.listkeys('phases')
1572 _pullapplyphases(pullop, remotephases)
1572 _pullapplyphases(pullop, remotephases)
1573
1573
1574 def _pullapplyphases(pullop, remotephases):
1574 def _pullapplyphases(pullop, remotephases):
1575 """apply phase movement from observed remote state"""
1575 """apply phase movement from observed remote state"""
1576 if 'phases' in pullop.stepsdone:
1576 if 'phases' in pullop.stepsdone:
1577 return
1577 return
1578 pullop.stepsdone.add('phases')
1578 pullop.stepsdone.add('phases')
1579 publishing = bool(remotephases.get('publishing', False))
1579 publishing = bool(remotephases.get('publishing', False))
1580 if remotephases and not publishing:
1580 if remotephases and not publishing:
1581 # remote is new and non-publishing
1581 # remote is new and non-publishing
1582 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1582 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1583 pullop.pulledsubset,
1583 pullop.pulledsubset,
1584 remotephases)
1584 remotephases)
1585 dheads = pullop.pulledsubset
1585 dheads = pullop.pulledsubset
1586 else:
1586 else:
1587 # Remote is old or publishing all common changesets
1587 # Remote is old or publishing all common changesets
1588 # should be seen as public
1588 # should be seen as public
1589 pheads = pullop.pulledsubset
1589 pheads = pullop.pulledsubset
1590 dheads = []
1590 dheads = []
1591 unfi = pullop.repo.unfiltered()
1591 unfi = pullop.repo.unfiltered()
1592 phase = unfi._phasecache.phase
1592 phase = unfi._phasecache.phase
1593 rev = unfi.changelog.nodemap.get
1593 rev = unfi.changelog.nodemap.get
1594 public = phases.public
1594 public = phases.public
1595 draft = phases.draft
1595 draft = phases.draft
1596
1596
1597 # exclude changesets already public locally and update the others
1597 # exclude changesets already public locally and update the others
1598 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1598 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1599 if pheads:
1599 if pheads:
1600 tr = pullop.gettransaction()
1600 tr = pullop.gettransaction()
1601 phases.advanceboundary(pullop.repo, tr, public, pheads)
1601 phases.advanceboundary(pullop.repo, tr, public, pheads)
1602
1602
1603 # exclude changesets already draft locally and update the others
1603 # exclude changesets already draft locally and update the others
1604 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1604 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1605 if dheads:
1605 if dheads:
1606 tr = pullop.gettransaction()
1606 tr = pullop.gettransaction()
1607 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1607 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1608
1608
1609 def _pullbookmarks(pullop):
1609 def _pullbookmarks(pullop):
1610 """process the remote bookmark information to update the local one"""
1610 """process the remote bookmark information to update the local one"""
1611 if 'bookmarks' in pullop.stepsdone:
1611 if 'bookmarks' in pullop.stepsdone:
1612 return
1612 return
1613 pullop.stepsdone.add('bookmarks')
1613 pullop.stepsdone.add('bookmarks')
1614 repo = pullop.repo
1614 repo = pullop.repo
1615 remotebookmarks = pullop.remotebookmarks
1615 remotebookmarks = pullop.remotebookmarks
1616 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1616 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1617 pullop.remote.url(),
1617 pullop.remote.url(),
1618 pullop.gettransaction,
1618 pullop.gettransaction,
1619 explicit=pullop.explicitbookmarks)
1619 explicit=pullop.explicitbookmarks)
1620
1620
1621 def _pullobsolete(pullop):
1621 def _pullobsolete(pullop):
1622 """utility function to pull obsolete markers from a remote
1622 """utility function to pull obsolete markers from a remote
1623
1623
1624 The `gettransaction` is function that return the pull transaction, creating
1624 The `gettransaction` is function that return the pull transaction, creating
1625 one if necessary. We return the transaction to inform the calling code that
1625 one if necessary. We return the transaction to inform the calling code that
1626 a new transaction have been created (when applicable).
1626 a new transaction have been created (when applicable).
1627
1627
1628 Exists mostly to allow overriding for experimentation purpose"""
1628 Exists mostly to allow overriding for experimentation purpose"""
1629 if 'obsmarkers' in pullop.stepsdone:
1629 if 'obsmarkers' in pullop.stepsdone:
1630 return
1630 return
1631 pullop.stepsdone.add('obsmarkers')
1631 pullop.stepsdone.add('obsmarkers')
1632 tr = None
1632 tr = None
1633 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1633 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1634 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1634 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1635 remoteobs = pullop.remote.listkeys('obsolete')
1635 remoteobs = pullop.remote.listkeys('obsolete')
1636 if 'dump0' in remoteobs:
1636 if 'dump0' in remoteobs:
1637 tr = pullop.gettransaction()
1637 tr = pullop.gettransaction()
1638 markers = []
1638 markers = []
1639 for key in sorted(remoteobs, reverse=True):
1639 for key in sorted(remoteobs, reverse=True):
1640 if key.startswith('dump'):
1640 if key.startswith('dump'):
1641 data = util.b85decode(remoteobs[key])
1641 data = util.b85decode(remoteobs[key])
1642 version, newmarks = obsolete._readmarkers(data)
1642 version, newmarks = obsolete._readmarkers(data)
1643 markers += newmarks
1643 markers += newmarks
1644 if markers:
1644 if markers:
1645 pullop.repo.obsstore.add(tr, markers)
1645 pullop.repo.obsstore.add(tr, markers)
1646 pullop.repo.invalidatevolatilesets()
1646 pullop.repo.invalidatevolatilesets()
1647 return tr
1647 return tr
1648
1648
1649 def caps20to10(repo):
1649 def caps20to10(repo):
1650 """return a set with appropriate options to use bundle20 during getbundle"""
1650 """return a set with appropriate options to use bundle20 during getbundle"""
1651 caps = {'HG20'}
1651 caps = {'HG20'}
1652 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1652 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1653 caps.add('bundle2=' + urlreq.quote(capsblob))
1653 caps.add('bundle2=' + urlreq.quote(capsblob))
1654 return caps
1654 return caps
1655
1655
1656 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1656 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1657 getbundle2partsorder = []
1657 getbundle2partsorder = []
1658
1658
1659 # Mapping between step name and function
1659 # Mapping between step name and function
1660 #
1660 #
1661 # This exists to help extensions wrap steps if necessary
1661 # This exists to help extensions wrap steps if necessary
1662 getbundle2partsmapping = {}
1662 getbundle2partsmapping = {}
1663
1663
1664 def getbundle2partsgenerator(stepname, idx=None):
1664 def getbundle2partsgenerator(stepname, idx=None):
1665 """decorator for function generating bundle2 part for getbundle
1665 """decorator for function generating bundle2 part for getbundle
1666
1666
1667 The function is added to the step -> function mapping and appended to the
1667 The function is added to the step -> function mapping and appended to the
1668 list of steps. Beware that decorated functions will be added in order
1668 list of steps. Beware that decorated functions will be added in order
1669 (this may matter).
1669 (this may matter).
1670
1670
1671 You can only use this decorator for new steps, if you want to wrap a step
1671 You can only use this decorator for new steps, if you want to wrap a step
1672 from an extension, attack the getbundle2partsmapping dictionary directly."""
1672 from an extension, attack the getbundle2partsmapping dictionary directly."""
1673 def dec(func):
1673 def dec(func):
1674 assert stepname not in getbundle2partsmapping
1674 assert stepname not in getbundle2partsmapping
1675 getbundle2partsmapping[stepname] = func
1675 getbundle2partsmapping[stepname] = func
1676 if idx is None:
1676 if idx is None:
1677 getbundle2partsorder.append(stepname)
1677 getbundle2partsorder.append(stepname)
1678 else:
1678 else:
1679 getbundle2partsorder.insert(idx, stepname)
1679 getbundle2partsorder.insert(idx, stepname)
1680 return func
1680 return func
1681 return dec
1681 return dec
1682
1682
1683 def bundle2requested(bundlecaps):
1683 def bundle2requested(bundlecaps):
1684 if bundlecaps is not None:
1684 if bundlecaps is not None:
1685 return any(cap.startswith('HG2') for cap in bundlecaps)
1685 return any(cap.startswith('HG2') for cap in bundlecaps)
1686 return False
1686 return False
1687
1687
1688 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1688 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1689 **kwargs):
1689 **kwargs):
1690 """Return chunks constituting a bundle's raw data.
1690 """Return chunks constituting a bundle's raw data.
1691
1691
1692 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1692 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1693 passed.
1693 passed.
1694
1694
1695 Returns an iterator over raw chunks (of varying sizes).
1695 Returns an iterator over raw chunks (of varying sizes).
1696 """
1696 """
1697 kwargs = pycompat.byteskwargs(kwargs)
1697 kwargs = pycompat.byteskwargs(kwargs)
1698 usebundle2 = bundle2requested(bundlecaps)
1698 usebundle2 = bundle2requested(bundlecaps)
1699 # bundle10 case
1699 # bundle10 case
1700 if not usebundle2:
1700 if not usebundle2:
1701 if bundlecaps and not kwargs.get('cg', True):
1701 if bundlecaps and not kwargs.get('cg', True):
1702 raise ValueError(_('request for bundle10 must include changegroup'))
1702 raise ValueError(_('request for bundle10 must include changegroup'))
1703
1703
1704 if kwargs:
1704 if kwargs:
1705 raise ValueError(_('unsupported getbundle arguments: %s')
1705 raise ValueError(_('unsupported getbundle arguments: %s')
1706 % ', '.join(sorted(kwargs.keys())))
1706 % ', '.join(sorted(kwargs.keys())))
1707 outgoing = _computeoutgoing(repo, heads, common)
1707 outgoing = _computeoutgoing(repo, heads, common)
1708 return changegroup.makestream(repo, outgoing, '01', source,
1708 return changegroup.makestream(repo, outgoing, '01', source,
1709 bundlecaps=bundlecaps)
1709 bundlecaps=bundlecaps)
1710
1710
1711 # bundle20 case
1711 # bundle20 case
1712 b2caps = {}
1712 b2caps = {}
1713 for bcaps in bundlecaps:
1713 for bcaps in bundlecaps:
1714 if bcaps.startswith('bundle2='):
1714 if bcaps.startswith('bundle2='):
1715 blob = urlreq.unquote(bcaps[len('bundle2='):])
1715 blob = urlreq.unquote(bcaps[len('bundle2='):])
1716 b2caps.update(bundle2.decodecaps(blob))
1716 b2caps.update(bundle2.decodecaps(blob))
1717 bundler = bundle2.bundle20(repo.ui, b2caps)
1717 bundler = bundle2.bundle20(repo.ui, b2caps)
1718
1718
1719 kwargs['heads'] = heads
1719 kwargs['heads'] = heads
1720 kwargs['common'] = common
1720 kwargs['common'] = common
1721
1721
1722 for name in getbundle2partsorder:
1722 for name in getbundle2partsorder:
1723 func = getbundle2partsmapping[name]
1723 func = getbundle2partsmapping[name]
1724 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1724 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1725 **pycompat.strkwargs(kwargs))
1725 **pycompat.strkwargs(kwargs))
1726
1726
1727 return bundler.getchunks()
1727 return bundler.getchunks()
1728
1728
1729 @getbundle2partsgenerator('changegroup')
1729 @getbundle2partsgenerator('changegroup')
1730 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1730 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1731 b2caps=None, heads=None, common=None, **kwargs):
1731 b2caps=None, heads=None, common=None, **kwargs):
1732 """add a changegroup part to the requested bundle"""
1732 """add a changegroup part to the requested bundle"""
1733 cgstream = None
1733 cgstream = None
1734 if kwargs.get('cg', True):
1734 if kwargs.get('cg', True):
1735 # build changegroup bundle here.
1735 # build changegroup bundle here.
1736 version = '01'
1736 version = '01'
1737 cgversions = b2caps.get('changegroup')
1737 cgversions = b2caps.get('changegroup')
1738 if cgversions: # 3.1 and 3.2 ship with an empty value
1738 if cgversions: # 3.1 and 3.2 ship with an empty value
1739 cgversions = [v for v in cgversions
1739 cgversions = [v for v in cgversions
1740 if v in changegroup.supportedoutgoingversions(repo)]
1740 if v in changegroup.supportedoutgoingversions(repo)]
1741 if not cgversions:
1741 if not cgversions:
1742 raise ValueError(_('no common changegroup version'))
1742 raise ValueError(_('no common changegroup version'))
1743 version = max(cgversions)
1743 version = max(cgversions)
1744 outgoing = _computeoutgoing(repo, heads, common)
1744 outgoing = _computeoutgoing(repo, heads, common)
1745 if outgoing.missing:
1745 if outgoing.missing:
1746 cgstream = changegroup.makestream(repo, outgoing, version, source,
1746 cgstream = changegroup.makestream(repo, outgoing, version, source,
1747 bundlecaps=bundlecaps)
1747 bundlecaps=bundlecaps)
1748
1748
1749 if cgstream:
1749 if cgstream:
1750 part = bundler.newpart('changegroup', data=cgstream)
1750 part = bundler.newpart('changegroup', data=cgstream)
1751 if cgversions:
1751 if cgversions:
1752 part.addparam('version', version)
1752 part.addparam('version', version)
1753 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1753 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1754 mandatory=False)
1754 mandatory=False)
1755 if 'treemanifest' in repo.requirements:
1755 if 'treemanifest' in repo.requirements:
1756 part.addparam('treemanifest', '1')
1756 part.addparam('treemanifest', '1')
1757
1757
1758 @getbundle2partsgenerator('bookmarks')
1759 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
1760 b2caps=None, **kwargs):
1761 """add a bookmark part to the requested bundle"""
1762 if not kwargs.get('bookmarks', False):
1763 return
1764 if 'bookmarks' not in b2caps:
1765 raise ValueError(_('no common bookmarks exchange method'))
1766 books = bookmod.listbinbookmarks(repo)
1767 data = bookmod.binaryencode(books)
1768 if data:
1769 bundler.newpart('bookmarks', data=data)
1770
1758 @getbundle2partsgenerator('listkeys')
1771 @getbundle2partsgenerator('listkeys')
1759 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1772 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1760 b2caps=None, **kwargs):
1773 b2caps=None, **kwargs):
1761 """add parts containing listkeys namespaces to the requested bundle"""
1774 """add parts containing listkeys namespaces to the requested bundle"""
1762 listkeys = kwargs.get('listkeys', ())
1775 listkeys = kwargs.get('listkeys', ())
1763 for namespace in listkeys:
1776 for namespace in listkeys:
1764 part = bundler.newpart('listkeys')
1777 part = bundler.newpart('listkeys')
1765 part.addparam('namespace', namespace)
1778 part.addparam('namespace', namespace)
1766 keys = repo.listkeys(namespace).items()
1779 keys = repo.listkeys(namespace).items()
1767 part.data = pushkey.encodekeys(keys)
1780 part.data = pushkey.encodekeys(keys)
1768
1781
1769 @getbundle2partsgenerator('obsmarkers')
1782 @getbundle2partsgenerator('obsmarkers')
1770 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1783 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1771 b2caps=None, heads=None, **kwargs):
1784 b2caps=None, heads=None, **kwargs):
1772 """add an obsolescence markers part to the requested bundle"""
1785 """add an obsolescence markers part to the requested bundle"""
1773 if kwargs.get('obsmarkers', False):
1786 if kwargs.get('obsmarkers', False):
1774 if heads is None:
1787 if heads is None:
1775 heads = repo.heads()
1788 heads = repo.heads()
1776 subset = [c.node() for c in repo.set('::%ln', heads)]
1789 subset = [c.node() for c in repo.set('::%ln', heads)]
1777 markers = repo.obsstore.relevantmarkers(subset)
1790 markers = repo.obsstore.relevantmarkers(subset)
1778 markers = sorted(markers)
1791 markers = sorted(markers)
1779 bundle2.buildobsmarkerspart(bundler, markers)
1792 bundle2.buildobsmarkerspart(bundler, markers)
1780
1793
1781 @getbundle2partsgenerator('phases')
1794 @getbundle2partsgenerator('phases')
1782 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1795 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1783 b2caps=None, heads=None, **kwargs):
1796 b2caps=None, heads=None, **kwargs):
1784 """add phase heads part to the requested bundle"""
1797 """add phase heads part to the requested bundle"""
1785 if kwargs.get('phases', False):
1798 if kwargs.get('phases', False):
1786 if not 'heads' in b2caps.get('phases'):
1799 if not 'heads' in b2caps.get('phases'):
1787 raise ValueError(_('no common phases exchange method'))
1800 raise ValueError(_('no common phases exchange method'))
1788 if heads is None:
1801 if heads is None:
1789 heads = repo.heads()
1802 heads = repo.heads()
1790
1803
1791 headsbyphase = collections.defaultdict(set)
1804 headsbyphase = collections.defaultdict(set)
1792 if repo.publishing():
1805 if repo.publishing():
1793 headsbyphase[phases.public] = heads
1806 headsbyphase[phases.public] = heads
1794 else:
1807 else:
1795 # find the appropriate heads to move
1808 # find the appropriate heads to move
1796
1809
1797 phase = repo._phasecache.phase
1810 phase = repo._phasecache.phase
1798 node = repo.changelog.node
1811 node = repo.changelog.node
1799 rev = repo.changelog.rev
1812 rev = repo.changelog.rev
1800 for h in heads:
1813 for h in heads:
1801 headsbyphase[phase(repo, rev(h))].add(h)
1814 headsbyphase[phase(repo, rev(h))].add(h)
1802 seenphases = list(headsbyphase.keys())
1815 seenphases = list(headsbyphase.keys())
1803
1816
1804 # We do not handle anything but public and draft phase for now)
1817 # We do not handle anything but public and draft phase for now)
1805 if seenphases:
1818 if seenphases:
1806 assert max(seenphases) <= phases.draft
1819 assert max(seenphases) <= phases.draft
1807
1820
1808 # if client is pulling non-public changesets, we need to find
1821 # if client is pulling non-public changesets, we need to find
1809 # intermediate public heads.
1822 # intermediate public heads.
1810 draftheads = headsbyphase.get(phases.draft, set())
1823 draftheads = headsbyphase.get(phases.draft, set())
1811 if draftheads:
1824 if draftheads:
1812 publicheads = headsbyphase.get(phases.public, set())
1825 publicheads = headsbyphase.get(phases.public, set())
1813
1826
1814 revset = 'heads(only(%ln, %ln) and public())'
1827 revset = 'heads(only(%ln, %ln) and public())'
1815 extraheads = repo.revs(revset, draftheads, publicheads)
1828 extraheads = repo.revs(revset, draftheads, publicheads)
1816 for r in extraheads:
1829 for r in extraheads:
1817 headsbyphase[phases.public].add(node(r))
1830 headsbyphase[phases.public].add(node(r))
1818
1831
1819 # transform data in a format used by the encoding function
1832 # transform data in a format used by the encoding function
1820 phasemapping = []
1833 phasemapping = []
1821 for phase in phases.allphases:
1834 for phase in phases.allphases:
1822 phasemapping.append(sorted(headsbyphase[phase]))
1835 phasemapping.append(sorted(headsbyphase[phase]))
1823
1836
1824 # generate the actual part
1837 # generate the actual part
1825 phasedata = phases.binaryencode(phasemapping)
1838 phasedata = phases.binaryencode(phasemapping)
1826 bundler.newpart('phase-heads', data=phasedata)
1839 bundler.newpart('phase-heads', data=phasedata)
1827
1840
1828 @getbundle2partsgenerator('hgtagsfnodes')
1841 @getbundle2partsgenerator('hgtagsfnodes')
1829 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1842 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1830 b2caps=None, heads=None, common=None,
1843 b2caps=None, heads=None, common=None,
1831 **kwargs):
1844 **kwargs):
1832 """Transfer the .hgtags filenodes mapping.
1845 """Transfer the .hgtags filenodes mapping.
1833
1846
1834 Only values for heads in this bundle will be transferred.
1847 Only values for heads in this bundle will be transferred.
1835
1848
1836 The part data consists of pairs of 20 byte changeset node and .hgtags
1849 The part data consists of pairs of 20 byte changeset node and .hgtags
1837 filenodes raw values.
1850 filenodes raw values.
1838 """
1851 """
1839 # Don't send unless:
1852 # Don't send unless:
1840 # - changeset are being exchanged,
1853 # - changeset are being exchanged,
1841 # - the client supports it.
1854 # - the client supports it.
1842 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1855 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1843 return
1856 return
1844
1857
1845 outgoing = _computeoutgoing(repo, heads, common)
1858 outgoing = _computeoutgoing(repo, heads, common)
1846 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1859 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1847
1860
1848 def check_heads(repo, their_heads, context):
1861 def check_heads(repo, their_heads, context):
1849 """check if the heads of a repo have been modified
1862 """check if the heads of a repo have been modified
1850
1863
1851 Used by peer for unbundling.
1864 Used by peer for unbundling.
1852 """
1865 """
1853 heads = repo.heads()
1866 heads = repo.heads()
1854 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1867 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1855 if not (their_heads == ['force'] or their_heads == heads or
1868 if not (their_heads == ['force'] or their_heads == heads or
1856 their_heads == ['hashed', heads_hash]):
1869 their_heads == ['hashed', heads_hash]):
1857 # someone else committed/pushed/unbundled while we
1870 # someone else committed/pushed/unbundled while we
1858 # were transferring data
1871 # were transferring data
1859 raise error.PushRaced('repository changed while %s - '
1872 raise error.PushRaced('repository changed while %s - '
1860 'please try again' % context)
1873 'please try again' % context)
1861
1874
1862 def unbundle(repo, cg, heads, source, url):
1875 def unbundle(repo, cg, heads, source, url):
1863 """Apply a bundle to a repo.
1876 """Apply a bundle to a repo.
1864
1877
1865 this function makes sure the repo is locked during the application and have
1878 this function makes sure the repo is locked during the application and have
1866 mechanism to check that no push race occurred between the creation of the
1879 mechanism to check that no push race occurred between the creation of the
1867 bundle and its application.
1880 bundle and its application.
1868
1881
1869 If the push was raced as PushRaced exception is raised."""
1882 If the push was raced as PushRaced exception is raised."""
1870 r = 0
1883 r = 0
1871 # need a transaction when processing a bundle2 stream
1884 # need a transaction when processing a bundle2 stream
1872 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1885 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1873 lockandtr = [None, None, None]
1886 lockandtr = [None, None, None]
1874 recordout = None
1887 recordout = None
1875 # quick fix for output mismatch with bundle2 in 3.4
1888 # quick fix for output mismatch with bundle2 in 3.4
1876 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
1889 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
1877 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1890 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1878 captureoutput = True
1891 captureoutput = True
1879 try:
1892 try:
1880 # note: outside bundle1, 'heads' is expected to be empty and this
1893 # note: outside bundle1, 'heads' is expected to be empty and this
1881 # 'check_heads' call wil be a no-op
1894 # 'check_heads' call wil be a no-op
1882 check_heads(repo, heads, 'uploading changes')
1895 check_heads(repo, heads, 'uploading changes')
1883 # push can proceed
1896 # push can proceed
1884 if not isinstance(cg, bundle2.unbundle20):
1897 if not isinstance(cg, bundle2.unbundle20):
1885 # legacy case: bundle1 (changegroup 01)
1898 # legacy case: bundle1 (changegroup 01)
1886 txnname = "\n".join([source, util.hidepassword(url)])
1899 txnname = "\n".join([source, util.hidepassword(url)])
1887 with repo.lock(), repo.transaction(txnname) as tr:
1900 with repo.lock(), repo.transaction(txnname) as tr:
1888 op = bundle2.applybundle(repo, cg, tr, source, url)
1901 op = bundle2.applybundle(repo, cg, tr, source, url)
1889 r = bundle2.combinechangegroupresults(op)
1902 r = bundle2.combinechangegroupresults(op)
1890 else:
1903 else:
1891 r = None
1904 r = None
1892 try:
1905 try:
1893 def gettransaction():
1906 def gettransaction():
1894 if not lockandtr[2]:
1907 if not lockandtr[2]:
1895 lockandtr[0] = repo.wlock()
1908 lockandtr[0] = repo.wlock()
1896 lockandtr[1] = repo.lock()
1909 lockandtr[1] = repo.lock()
1897 lockandtr[2] = repo.transaction(source)
1910 lockandtr[2] = repo.transaction(source)
1898 lockandtr[2].hookargs['source'] = source
1911 lockandtr[2].hookargs['source'] = source
1899 lockandtr[2].hookargs['url'] = url
1912 lockandtr[2].hookargs['url'] = url
1900 lockandtr[2].hookargs['bundle2'] = '1'
1913 lockandtr[2].hookargs['bundle2'] = '1'
1901 return lockandtr[2]
1914 return lockandtr[2]
1902
1915
1903 # Do greedy locking by default until we're satisfied with lazy
1916 # Do greedy locking by default until we're satisfied with lazy
1904 # locking.
1917 # locking.
1905 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1918 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1906 gettransaction()
1919 gettransaction()
1907
1920
1908 op = bundle2.bundleoperation(repo, gettransaction,
1921 op = bundle2.bundleoperation(repo, gettransaction,
1909 captureoutput=captureoutput)
1922 captureoutput=captureoutput)
1910 try:
1923 try:
1911 op = bundle2.processbundle(repo, cg, op=op)
1924 op = bundle2.processbundle(repo, cg, op=op)
1912 finally:
1925 finally:
1913 r = op.reply
1926 r = op.reply
1914 if captureoutput and r is not None:
1927 if captureoutput and r is not None:
1915 repo.ui.pushbuffer(error=True, subproc=True)
1928 repo.ui.pushbuffer(error=True, subproc=True)
1916 def recordout(output):
1929 def recordout(output):
1917 r.newpart('output', data=output, mandatory=False)
1930 r.newpart('output', data=output, mandatory=False)
1918 if lockandtr[2] is not None:
1931 if lockandtr[2] is not None:
1919 lockandtr[2].close()
1932 lockandtr[2].close()
1920 except BaseException as exc:
1933 except BaseException as exc:
1921 exc.duringunbundle2 = True
1934 exc.duringunbundle2 = True
1922 if captureoutput and r is not None:
1935 if captureoutput and r is not None:
1923 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1936 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1924 def recordout(output):
1937 def recordout(output):
1925 part = bundle2.bundlepart('output', data=output,
1938 part = bundle2.bundlepart('output', data=output,
1926 mandatory=False)
1939 mandatory=False)
1927 parts.append(part)
1940 parts.append(part)
1928 raise
1941 raise
1929 finally:
1942 finally:
1930 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1943 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1931 if recordout is not None:
1944 if recordout is not None:
1932 recordout(repo.ui.popbuffer())
1945 recordout(repo.ui.popbuffer())
1933 return r
1946 return r
1934
1947
1935 def _maybeapplyclonebundle(pullop):
1948 def _maybeapplyclonebundle(pullop):
1936 """Apply a clone bundle from a remote, if possible."""
1949 """Apply a clone bundle from a remote, if possible."""
1937
1950
1938 repo = pullop.repo
1951 repo = pullop.repo
1939 remote = pullop.remote
1952 remote = pullop.remote
1940
1953
1941 if not repo.ui.configbool('ui', 'clonebundles'):
1954 if not repo.ui.configbool('ui', 'clonebundles'):
1942 return
1955 return
1943
1956
1944 # Only run if local repo is empty.
1957 # Only run if local repo is empty.
1945 if len(repo):
1958 if len(repo):
1946 return
1959 return
1947
1960
1948 if pullop.heads:
1961 if pullop.heads:
1949 return
1962 return
1950
1963
1951 if not remote.capable('clonebundles'):
1964 if not remote.capable('clonebundles'):
1952 return
1965 return
1953
1966
1954 res = remote._call('clonebundles')
1967 res = remote._call('clonebundles')
1955
1968
1956 # If we call the wire protocol command, that's good enough to record the
1969 # If we call the wire protocol command, that's good enough to record the
1957 # attempt.
1970 # attempt.
1958 pullop.clonebundleattempted = True
1971 pullop.clonebundleattempted = True
1959
1972
1960 entries = parseclonebundlesmanifest(repo, res)
1973 entries = parseclonebundlesmanifest(repo, res)
1961 if not entries:
1974 if not entries:
1962 repo.ui.note(_('no clone bundles available on remote; '
1975 repo.ui.note(_('no clone bundles available on remote; '
1963 'falling back to regular clone\n'))
1976 'falling back to regular clone\n'))
1964 return
1977 return
1965
1978
1966 entries = filterclonebundleentries(
1979 entries = filterclonebundleentries(
1967 repo, entries, streamclonerequested=pullop.streamclonerequested)
1980 repo, entries, streamclonerequested=pullop.streamclonerequested)
1968
1981
1969 if not entries:
1982 if not entries:
1970 # There is a thundering herd concern here. However, if a server
1983 # There is a thundering herd concern here. However, if a server
1971 # operator doesn't advertise bundles appropriate for its clients,
1984 # operator doesn't advertise bundles appropriate for its clients,
1972 # they deserve what's coming. Furthermore, from a client's
1985 # they deserve what's coming. Furthermore, from a client's
1973 # perspective, no automatic fallback would mean not being able to
1986 # perspective, no automatic fallback would mean not being able to
1974 # clone!
1987 # clone!
1975 repo.ui.warn(_('no compatible clone bundles available on server; '
1988 repo.ui.warn(_('no compatible clone bundles available on server; '
1976 'falling back to regular clone\n'))
1989 'falling back to regular clone\n'))
1977 repo.ui.warn(_('(you may want to report this to the server '
1990 repo.ui.warn(_('(you may want to report this to the server '
1978 'operator)\n'))
1991 'operator)\n'))
1979 return
1992 return
1980
1993
1981 entries = sortclonebundleentries(repo.ui, entries)
1994 entries = sortclonebundleentries(repo.ui, entries)
1982
1995
1983 url = entries[0]['URL']
1996 url = entries[0]['URL']
1984 repo.ui.status(_('applying clone bundle from %s\n') % url)
1997 repo.ui.status(_('applying clone bundle from %s\n') % url)
1985 if trypullbundlefromurl(repo.ui, repo, url):
1998 if trypullbundlefromurl(repo.ui, repo, url):
1986 repo.ui.status(_('finished applying clone bundle\n'))
1999 repo.ui.status(_('finished applying clone bundle\n'))
1987 # Bundle failed.
2000 # Bundle failed.
1988 #
2001 #
1989 # We abort by default to avoid the thundering herd of
2002 # We abort by default to avoid the thundering herd of
1990 # clients flooding a server that was expecting expensive
2003 # clients flooding a server that was expecting expensive
1991 # clone load to be offloaded.
2004 # clone load to be offloaded.
1992 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2005 elif repo.ui.configbool('ui', 'clonebundlefallback'):
1993 repo.ui.warn(_('falling back to normal clone\n'))
2006 repo.ui.warn(_('falling back to normal clone\n'))
1994 else:
2007 else:
1995 raise error.Abort(_('error applying bundle'),
2008 raise error.Abort(_('error applying bundle'),
1996 hint=_('if this error persists, consider contacting '
2009 hint=_('if this error persists, consider contacting '
1997 'the server operator or disable clone '
2010 'the server operator or disable clone '
1998 'bundles via '
2011 'bundles via '
1999 '"--config ui.clonebundles=false"'))
2012 '"--config ui.clonebundles=false"'))
2000
2013
2001 def parseclonebundlesmanifest(repo, s):
2014 def parseclonebundlesmanifest(repo, s):
2002 """Parses the raw text of a clone bundles manifest.
2015 """Parses the raw text of a clone bundles manifest.
2003
2016
2004 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2017 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2005 to the URL and other keys are the attributes for the entry.
2018 to the URL and other keys are the attributes for the entry.
2006 """
2019 """
2007 m = []
2020 m = []
2008 for line in s.splitlines():
2021 for line in s.splitlines():
2009 fields = line.split()
2022 fields = line.split()
2010 if not fields:
2023 if not fields:
2011 continue
2024 continue
2012 attrs = {'URL': fields[0]}
2025 attrs = {'URL': fields[0]}
2013 for rawattr in fields[1:]:
2026 for rawattr in fields[1:]:
2014 key, value = rawattr.split('=', 1)
2027 key, value = rawattr.split('=', 1)
2015 key = urlreq.unquote(key)
2028 key = urlreq.unquote(key)
2016 value = urlreq.unquote(value)
2029 value = urlreq.unquote(value)
2017 attrs[key] = value
2030 attrs[key] = value
2018
2031
2019 # Parse BUNDLESPEC into components. This makes client-side
2032 # Parse BUNDLESPEC into components. This makes client-side
2020 # preferences easier to specify since you can prefer a single
2033 # preferences easier to specify since you can prefer a single
2021 # component of the BUNDLESPEC.
2034 # component of the BUNDLESPEC.
2022 if key == 'BUNDLESPEC':
2035 if key == 'BUNDLESPEC':
2023 try:
2036 try:
2024 comp, version, params = parsebundlespec(repo, value,
2037 comp, version, params = parsebundlespec(repo, value,
2025 externalnames=True)
2038 externalnames=True)
2026 attrs['COMPRESSION'] = comp
2039 attrs['COMPRESSION'] = comp
2027 attrs['VERSION'] = version
2040 attrs['VERSION'] = version
2028 except error.InvalidBundleSpecification:
2041 except error.InvalidBundleSpecification:
2029 pass
2042 pass
2030 except error.UnsupportedBundleSpecification:
2043 except error.UnsupportedBundleSpecification:
2031 pass
2044 pass
2032
2045
2033 m.append(attrs)
2046 m.append(attrs)
2034
2047
2035 return m
2048 return m
2036
2049
2037 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2050 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2038 """Remove incompatible clone bundle manifest entries.
2051 """Remove incompatible clone bundle manifest entries.
2039
2052
2040 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2053 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2041 and returns a new list consisting of only the entries that this client
2054 and returns a new list consisting of only the entries that this client
2042 should be able to apply.
2055 should be able to apply.
2043
2056
2044 There is no guarantee we'll be able to apply all returned entries because
2057 There is no guarantee we'll be able to apply all returned entries because
2045 the metadata we use to filter on may be missing or wrong.
2058 the metadata we use to filter on may be missing or wrong.
2046 """
2059 """
2047 newentries = []
2060 newentries = []
2048 for entry in entries:
2061 for entry in entries:
2049 spec = entry.get('BUNDLESPEC')
2062 spec = entry.get('BUNDLESPEC')
2050 if spec:
2063 if spec:
2051 try:
2064 try:
2052 comp, version, params = parsebundlespec(repo, spec, strict=True)
2065 comp, version, params = parsebundlespec(repo, spec, strict=True)
2053
2066
2054 # If a stream clone was requested, filter out non-streamclone
2067 # If a stream clone was requested, filter out non-streamclone
2055 # entries.
2068 # entries.
2056 if streamclonerequested and (comp != 'UN' or version != 's1'):
2069 if streamclonerequested and (comp != 'UN' or version != 's1'):
2057 repo.ui.debug('filtering %s because not a stream clone\n' %
2070 repo.ui.debug('filtering %s because not a stream clone\n' %
2058 entry['URL'])
2071 entry['URL'])
2059 continue
2072 continue
2060
2073
2061 except error.InvalidBundleSpecification as e:
2074 except error.InvalidBundleSpecification as e:
2062 repo.ui.debug(str(e) + '\n')
2075 repo.ui.debug(str(e) + '\n')
2063 continue
2076 continue
2064 except error.UnsupportedBundleSpecification as e:
2077 except error.UnsupportedBundleSpecification as e:
2065 repo.ui.debug('filtering %s because unsupported bundle '
2078 repo.ui.debug('filtering %s because unsupported bundle '
2066 'spec: %s\n' % (entry['URL'], str(e)))
2079 'spec: %s\n' % (entry['URL'], str(e)))
2067 continue
2080 continue
2068 # If we don't have a spec and requested a stream clone, we don't know
2081 # If we don't have a spec and requested a stream clone, we don't know
2069 # what the entry is so don't attempt to apply it.
2082 # what the entry is so don't attempt to apply it.
2070 elif streamclonerequested:
2083 elif streamclonerequested:
2071 repo.ui.debug('filtering %s because cannot determine if a stream '
2084 repo.ui.debug('filtering %s because cannot determine if a stream '
2072 'clone bundle\n' % entry['URL'])
2085 'clone bundle\n' % entry['URL'])
2073 continue
2086 continue
2074
2087
2075 if 'REQUIRESNI' in entry and not sslutil.hassni:
2088 if 'REQUIRESNI' in entry and not sslutil.hassni:
2076 repo.ui.debug('filtering %s because SNI not supported\n' %
2089 repo.ui.debug('filtering %s because SNI not supported\n' %
2077 entry['URL'])
2090 entry['URL'])
2078 continue
2091 continue
2079
2092
2080 newentries.append(entry)
2093 newentries.append(entry)
2081
2094
2082 return newentries
2095 return newentries
2083
2096
2084 class clonebundleentry(object):
2097 class clonebundleentry(object):
2085 """Represents an item in a clone bundles manifest.
2098 """Represents an item in a clone bundles manifest.
2086
2099
2087 This rich class is needed to support sorting since sorted() in Python 3
2100 This rich class is needed to support sorting since sorted() in Python 3
2088 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2101 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2089 won't work.
2102 won't work.
2090 """
2103 """
2091
2104
2092 def __init__(self, value, prefers):
2105 def __init__(self, value, prefers):
2093 self.value = value
2106 self.value = value
2094 self.prefers = prefers
2107 self.prefers = prefers
2095
2108
2096 def _cmp(self, other):
2109 def _cmp(self, other):
2097 for prefkey, prefvalue in self.prefers:
2110 for prefkey, prefvalue in self.prefers:
2098 avalue = self.value.get(prefkey)
2111 avalue = self.value.get(prefkey)
2099 bvalue = other.value.get(prefkey)
2112 bvalue = other.value.get(prefkey)
2100
2113
2101 # Special case for b missing attribute and a matches exactly.
2114 # Special case for b missing attribute and a matches exactly.
2102 if avalue is not None and bvalue is None and avalue == prefvalue:
2115 if avalue is not None and bvalue is None and avalue == prefvalue:
2103 return -1
2116 return -1
2104
2117
2105 # Special case for a missing attribute and b matches exactly.
2118 # Special case for a missing attribute and b matches exactly.
2106 if bvalue is not None and avalue is None and bvalue == prefvalue:
2119 if bvalue is not None and avalue is None and bvalue == prefvalue:
2107 return 1
2120 return 1
2108
2121
2109 # We can't compare unless attribute present on both.
2122 # We can't compare unless attribute present on both.
2110 if avalue is None or bvalue is None:
2123 if avalue is None or bvalue is None:
2111 continue
2124 continue
2112
2125
2113 # Same values should fall back to next attribute.
2126 # Same values should fall back to next attribute.
2114 if avalue == bvalue:
2127 if avalue == bvalue:
2115 continue
2128 continue
2116
2129
2117 # Exact matches come first.
2130 # Exact matches come first.
2118 if avalue == prefvalue:
2131 if avalue == prefvalue:
2119 return -1
2132 return -1
2120 if bvalue == prefvalue:
2133 if bvalue == prefvalue:
2121 return 1
2134 return 1
2122
2135
2123 # Fall back to next attribute.
2136 # Fall back to next attribute.
2124 continue
2137 continue
2125
2138
2126 # If we got here we couldn't sort by attributes and prefers. Fall
2139 # If we got here we couldn't sort by attributes and prefers. Fall
2127 # back to index order.
2140 # back to index order.
2128 return 0
2141 return 0
2129
2142
2130 def __lt__(self, other):
2143 def __lt__(self, other):
2131 return self._cmp(other) < 0
2144 return self._cmp(other) < 0
2132
2145
2133 def __gt__(self, other):
2146 def __gt__(self, other):
2134 return self._cmp(other) > 0
2147 return self._cmp(other) > 0
2135
2148
2136 def __eq__(self, other):
2149 def __eq__(self, other):
2137 return self._cmp(other) == 0
2150 return self._cmp(other) == 0
2138
2151
2139 def __le__(self, other):
2152 def __le__(self, other):
2140 return self._cmp(other) <= 0
2153 return self._cmp(other) <= 0
2141
2154
2142 def __ge__(self, other):
2155 def __ge__(self, other):
2143 return self._cmp(other) >= 0
2156 return self._cmp(other) >= 0
2144
2157
2145 def __ne__(self, other):
2158 def __ne__(self, other):
2146 return self._cmp(other) != 0
2159 return self._cmp(other) != 0
2147
2160
2148 def sortclonebundleentries(ui, entries):
2161 def sortclonebundleentries(ui, entries):
2149 prefers = ui.configlist('ui', 'clonebundleprefers')
2162 prefers = ui.configlist('ui', 'clonebundleprefers')
2150 if not prefers:
2163 if not prefers:
2151 return list(entries)
2164 return list(entries)
2152
2165
2153 prefers = [p.split('=', 1) for p in prefers]
2166 prefers = [p.split('=', 1) for p in prefers]
2154
2167
2155 items = sorted(clonebundleentry(v, prefers) for v in entries)
2168 items = sorted(clonebundleentry(v, prefers) for v in entries)
2156 return [i.value for i in items]
2169 return [i.value for i in items]
2157
2170
2158 def trypullbundlefromurl(ui, repo, url):
2171 def trypullbundlefromurl(ui, repo, url):
2159 """Attempt to apply a bundle from a URL."""
2172 """Attempt to apply a bundle from a URL."""
2160 with repo.lock(), repo.transaction('bundleurl') as tr:
2173 with repo.lock(), repo.transaction('bundleurl') as tr:
2161 try:
2174 try:
2162 fh = urlmod.open(ui, url)
2175 fh = urlmod.open(ui, url)
2163 cg = readbundle(ui, fh, 'stream')
2176 cg = readbundle(ui, fh, 'stream')
2164
2177
2165 if isinstance(cg, streamclone.streamcloneapplier):
2178 if isinstance(cg, streamclone.streamcloneapplier):
2166 cg.apply(repo)
2179 cg.apply(repo)
2167 else:
2180 else:
2168 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2181 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2169 return True
2182 return True
2170 except urlerr.httperror as e:
2183 except urlerr.httperror as e:
2171 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2184 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2172 except urlerr.urlerror as e:
2185 except urlerr.urlerror as e:
2173 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2186 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2174
2187
2175 return False
2188 return False
@@ -1,923 +1,925 b''
1 The Mercurial wire protocol is a request-response based protocol
1 The Mercurial wire protocol is a request-response based protocol
2 with multiple wire representations.
2 with multiple wire representations.
3
3
4 Each request is modeled as a command name, a dictionary of arguments, and
4 Each request is modeled as a command name, a dictionary of arguments, and
5 optional raw input. Command arguments and their types are intrinsic
5 optional raw input. Command arguments and their types are intrinsic
6 properties of commands. So is the response type of the command. This means
6 properties of commands. So is the response type of the command. This means
7 clients can't always send arbitrary arguments to servers and servers can't
7 clients can't always send arbitrary arguments to servers and servers can't
8 return multiple response types.
8 return multiple response types.
9
9
10 The protocol is synchronous and does not support multiplexing (concurrent
10 The protocol is synchronous and does not support multiplexing (concurrent
11 commands).
11 commands).
12
12
13 Transport Protocols
13 Transport Protocols
14 ===================
14 ===================
15
15
16 HTTP Transport
16 HTTP Transport
17 --------------
17 --------------
18
18
19 Commands are issued as HTTP/1.0 or HTTP/1.1 requests. Commands are
19 Commands are issued as HTTP/1.0 or HTTP/1.1 requests. Commands are
20 sent to the base URL of the repository with the command name sent in
20 sent to the base URL of the repository with the command name sent in
21 the ``cmd`` query string parameter. e.g.
21 the ``cmd`` query string parameter. e.g.
22 ``https://example.com/repo?cmd=capabilities``. The HTTP method is ``GET``
22 ``https://example.com/repo?cmd=capabilities``. The HTTP method is ``GET``
23 or ``POST`` depending on the command and whether there is a request
23 or ``POST`` depending on the command and whether there is a request
24 body.
24 body.
25
25
26 Command arguments can be sent multiple ways.
26 Command arguments can be sent multiple ways.
27
27
28 The simplest is part of the URL query string using ``x-www-form-urlencoded``
28 The simplest is part of the URL query string using ``x-www-form-urlencoded``
29 encoding (see Python's ``urllib.urlencode()``. However, many servers impose
29 encoding (see Python's ``urllib.urlencode()``. However, many servers impose
30 length limitations on the URL. So this mechanism is typically only used if
30 length limitations on the URL. So this mechanism is typically only used if
31 the server doesn't support other mechanisms.
31 the server doesn't support other mechanisms.
32
32
33 If the server supports the ``httpheader`` capability, command arguments can
33 If the server supports the ``httpheader`` capability, command arguments can
34 be sent in HTTP request headers named ``X-HgArg-<N>`` where ``<N>`` is an
34 be sent in HTTP request headers named ``X-HgArg-<N>`` where ``<N>`` is an
35 integer starting at 1. A ``x-www-form-urlencoded`` representation of the
35 integer starting at 1. A ``x-www-form-urlencoded`` representation of the
36 arguments is obtained. This full string is then split into chunks and sent
36 arguments is obtained. This full string is then split into chunks and sent
37 in numbered ``X-HgArg-<N>`` headers. The maximum length of each HTTP header
37 in numbered ``X-HgArg-<N>`` headers. The maximum length of each HTTP header
38 is defined by the server in the ``httpheader`` capability value, which defaults
38 is defined by the server in the ``httpheader`` capability value, which defaults
39 to ``1024``. The server reassembles the encoded arguments string by
39 to ``1024``. The server reassembles the encoded arguments string by
40 concatenating the ``X-HgArg-<N>`` headers then URL decodes them into a
40 concatenating the ``X-HgArg-<N>`` headers then URL decodes them into a
41 dictionary.
41 dictionary.
42
42
43 The list of ``X-HgArg-<N>`` headers should be added to the ``Vary`` request
43 The list of ``X-HgArg-<N>`` headers should be added to the ``Vary`` request
44 header to instruct caches to take these headers into consideration when caching
44 header to instruct caches to take these headers into consideration when caching
45 requests.
45 requests.
46
46
47 If the server supports the ``httppostargs`` capability, the client
47 If the server supports the ``httppostargs`` capability, the client
48 may send command arguments in the HTTP request body as part of an
48 may send command arguments in the HTTP request body as part of an
49 HTTP POST request. The command arguments will be URL encoded just like
49 HTTP POST request. The command arguments will be URL encoded just like
50 they would for sending them via HTTP headers. However, no splitting is
50 they would for sending them via HTTP headers. However, no splitting is
51 performed: the raw arguments are included in the HTTP request body.
51 performed: the raw arguments are included in the HTTP request body.
52
52
53 The client sends a ``X-HgArgs-Post`` header with the string length of the
53 The client sends a ``X-HgArgs-Post`` header with the string length of the
54 encoded arguments data. Additional data may be included in the HTTP
54 encoded arguments data. Additional data may be included in the HTTP
55 request body immediately following the argument data. The offset of the
55 request body immediately following the argument data. The offset of the
56 non-argument data is defined by the ``X-HgArgs-Post`` header. The
56 non-argument data is defined by the ``X-HgArgs-Post`` header. The
57 ``X-HgArgs-Post`` header is not required if there is no argument data.
57 ``X-HgArgs-Post`` header is not required if there is no argument data.
58
58
59 Additional command data can be sent as part of the HTTP request body. The
59 Additional command data can be sent as part of the HTTP request body. The
60 default ``Content-Type`` when sending data is ``application/mercurial-0.1``.
60 default ``Content-Type`` when sending data is ``application/mercurial-0.1``.
61 A ``Content-Length`` header is currently always sent.
61 A ``Content-Length`` header is currently always sent.
62
62
63 Example HTTP requests::
63 Example HTTP requests::
64
64
65 GET /repo?cmd=capabilities
65 GET /repo?cmd=capabilities
66 X-HgArg-1: foo=bar&baz=hello%20world
66 X-HgArg-1: foo=bar&baz=hello%20world
67
67
68 The request media type should be chosen based on server support. If the
68 The request media type should be chosen based on server support. If the
69 ``httpmediatype`` server capability is present, the client should send
69 ``httpmediatype`` server capability is present, the client should send
70 the newest mutually supported media type. If this capability is absent,
70 the newest mutually supported media type. If this capability is absent,
71 the client must assume the server only supports the
71 the client must assume the server only supports the
72 ``application/mercurial-0.1`` media type.
72 ``application/mercurial-0.1`` media type.
73
73
74 The ``Content-Type`` HTTP response header identifies the response as coming
74 The ``Content-Type`` HTTP response header identifies the response as coming
75 from Mercurial and can also be used to signal an error has occurred.
75 from Mercurial and can also be used to signal an error has occurred.
76
76
77 The ``application/mercurial-*`` media types indicate a generic Mercurial
77 The ``application/mercurial-*`` media types indicate a generic Mercurial
78 data type.
78 data type.
79
79
80 The ``application/mercurial-0.1`` media type is raw Mercurial data. It is the
80 The ``application/mercurial-0.1`` media type is raw Mercurial data. It is the
81 predecessor of the format below.
81 predecessor of the format below.
82
82
83 The ``application/mercurial-0.2`` media type is compression framed Mercurial
83 The ``application/mercurial-0.2`` media type is compression framed Mercurial
84 data. The first byte of the payload indicates the length of the compression
84 data. The first byte of the payload indicates the length of the compression
85 format identifier that follows. Next are N bytes indicating the compression
85 format identifier that follows. Next are N bytes indicating the compression
86 format. e.g. ``zlib``. The remaining bytes are compressed according to that
86 format. e.g. ``zlib``. The remaining bytes are compressed according to that
87 compression format. The decompressed data behaves the same as with
87 compression format. The decompressed data behaves the same as with
88 ``application/mercurial-0.1``.
88 ``application/mercurial-0.1``.
89
89
90 The ``application/hg-error`` media type indicates a generic error occurred.
90 The ``application/hg-error`` media type indicates a generic error occurred.
91 The content of the HTTP response body typically holds text describing the
91 The content of the HTTP response body typically holds text describing the
92 error.
92 error.
93
93
94 The ``application/hg-changegroup`` media type indicates a changegroup response
94 The ``application/hg-changegroup`` media type indicates a changegroup response
95 type.
95 type.
96
96
97 Clients also accept the ``text/plain`` media type. All other media
97 Clients also accept the ``text/plain`` media type. All other media
98 types should cause the client to error.
98 types should cause the client to error.
99
99
100 Behavior of media types is further described in the ``Content Negotiation``
100 Behavior of media types is further described in the ``Content Negotiation``
101 section below.
101 section below.
102
102
103 Clients should issue a ``User-Agent`` request header that identifies the client.
103 Clients should issue a ``User-Agent`` request header that identifies the client.
104 The server should not use the ``User-Agent`` for feature detection.
104 The server should not use the ``User-Agent`` for feature detection.
105
105
106 A command returning a ``string`` response issues a
106 A command returning a ``string`` response issues a
107 ``application/mercurial-0.*`` media type and the HTTP response body contains
107 ``application/mercurial-0.*`` media type and the HTTP response body contains
108 the raw string value (after compression decoding, if used). A
108 the raw string value (after compression decoding, if used). A
109 ``Content-Length`` header is typically issued, but not required.
109 ``Content-Length`` header is typically issued, but not required.
110
110
111 A command returning a ``stream`` response issues a
111 A command returning a ``stream`` response issues a
112 ``application/mercurial-0.*`` media type and the HTTP response is typically
112 ``application/mercurial-0.*`` media type and the HTTP response is typically
113 using *chunked transfer* (``Transfer-Encoding: chunked``).
113 using *chunked transfer* (``Transfer-Encoding: chunked``).
114
114
115 SSH Transport
115 SSH Transport
116 =============
116 =============
117
117
118 The SSH transport is a custom text-based protocol suitable for use over any
118 The SSH transport is a custom text-based protocol suitable for use over any
119 bi-directional stream transport. It is most commonly used with SSH.
119 bi-directional stream transport. It is most commonly used with SSH.
120
120
121 A SSH transport server can be started with ``hg serve --stdio``. The stdin,
121 A SSH transport server can be started with ``hg serve --stdio``. The stdin,
122 stderr, and stdout file descriptors of the started process are used to exchange
122 stderr, and stdout file descriptors of the started process are used to exchange
123 data. When Mercurial connects to a remote server over SSH, it actually starts
123 data. When Mercurial connects to a remote server over SSH, it actually starts
124 a ``hg serve --stdio`` process on the remote server.
124 a ``hg serve --stdio`` process on the remote server.
125
125
126 Commands are issued by sending the command name followed by a trailing newline
126 Commands are issued by sending the command name followed by a trailing newline
127 ``\n`` to the server. e.g. ``capabilities\n``.
127 ``\n`` to the server. e.g. ``capabilities\n``.
128
128
129 Command arguments are sent in the following format::
129 Command arguments are sent in the following format::
130
130
131 <argument> <length>\n<value>
131 <argument> <length>\n<value>
132
132
133 That is, the argument string name followed by a space followed by the
133 That is, the argument string name followed by a space followed by the
134 integer length of the value (expressed as a string) followed by a newline
134 integer length of the value (expressed as a string) followed by a newline
135 (``\n``) followed by the raw argument value.
135 (``\n``) followed by the raw argument value.
136
136
137 Dictionary arguments are encoded differently::
137 Dictionary arguments are encoded differently::
138
138
139 <argument> <# elements>\n
139 <argument> <# elements>\n
140 <key1> <length1>\n<value1>
140 <key1> <length1>\n<value1>
141 <key2> <length2>\n<value2>
141 <key2> <length2>\n<value2>
142 ...
142 ...
143
143
144 Non-argument data is sent immediately after the final argument value. It is
144 Non-argument data is sent immediately after the final argument value. It is
145 encoded in chunks::
145 encoded in chunks::
146
146
147 <length>\n<data>
147 <length>\n<data>
148
148
149 Each command declares a list of supported arguments and their types. If a
149 Each command declares a list of supported arguments and their types. If a
150 client sends an unknown argument to the server, the server should abort
150 client sends an unknown argument to the server, the server should abort
151 immediately. The special argument ``*`` in a command's definition indicates
151 immediately. The special argument ``*`` in a command's definition indicates
152 that all argument names are allowed.
152 that all argument names are allowed.
153
153
154 The definition of supported arguments and types is initially made when a
154 The definition of supported arguments and types is initially made when a
155 new command is implemented. The client and server must initially independently
155 new command is implemented. The client and server must initially independently
156 agree on the arguments and their types. This initial set of arguments can be
156 agree on the arguments and their types. This initial set of arguments can be
157 supplemented through the presence of *capabilities* advertised by the server.
157 supplemented through the presence of *capabilities* advertised by the server.
158
158
159 Each command has a defined expected response type.
159 Each command has a defined expected response type.
160
160
161 A ``string`` response type is a length framed value. The response consists of
161 A ``string`` response type is a length framed value. The response consists of
162 the string encoded integer length of a value followed by a newline (``\n``)
162 the string encoded integer length of a value followed by a newline (``\n``)
163 followed by the value. Empty values are allowed (and are represented as
163 followed by the value. Empty values are allowed (and are represented as
164 ``0\n``).
164 ``0\n``).
165
165
166 A ``stream`` response type consists of raw bytes of data. There is no framing.
166 A ``stream`` response type consists of raw bytes of data. There is no framing.
167
167
168 A generic error response type is also supported. It consists of a an error
168 A generic error response type is also supported. It consists of a an error
169 message written to ``stderr`` followed by ``\n-\n``. In addition, ``\n`` is
169 message written to ``stderr`` followed by ``\n-\n``. In addition, ``\n`` is
170 written to ``stdout``.
170 written to ``stdout``.
171
171
172 If the server receives an unknown command, it will send an empty ``string``
172 If the server receives an unknown command, it will send an empty ``string``
173 response.
173 response.
174
174
175 The server terminates if it receives an empty command (a ``\n`` character).
175 The server terminates if it receives an empty command (a ``\n`` character).
176
176
177 Capabilities
177 Capabilities
178 ============
178 ============
179
179
180 Servers advertise supported wire protocol features. This allows clients to
180 Servers advertise supported wire protocol features. This allows clients to
181 probe for server features before blindly calling a command or passing a
181 probe for server features before blindly calling a command or passing a
182 specific argument.
182 specific argument.
183
183
184 The server's features are exposed via a *capabilities* string. This is a
184 The server's features are exposed via a *capabilities* string. This is a
185 space-delimited string of tokens/features. Some features are single words
185 space-delimited string of tokens/features. Some features are single words
186 like ``lookup`` or ``batch``. Others are complicated key-value pairs
186 like ``lookup`` or ``batch``. Others are complicated key-value pairs
187 advertising sub-features. e.g. ``httpheader=2048``. When complex, non-word
187 advertising sub-features. e.g. ``httpheader=2048``. When complex, non-word
188 values are used, each feature name can define its own encoding of sub-values.
188 values are used, each feature name can define its own encoding of sub-values.
189 Comma-delimited and ``x-www-form-urlencoded`` values are common.
189 Comma-delimited and ``x-www-form-urlencoded`` values are common.
190
190
191 The following document capabilities defined by the canonical Mercurial server
191 The following document capabilities defined by the canonical Mercurial server
192 implementation.
192 implementation.
193
193
194 batch
194 batch
195 -----
195 -----
196
196
197 Whether the server supports the ``batch`` command.
197 Whether the server supports the ``batch`` command.
198
198
199 This capability/command was introduced in Mercurial 1.9 (released July 2011).
199 This capability/command was introduced in Mercurial 1.9 (released July 2011).
200
200
201 branchmap
201 branchmap
202 ---------
202 ---------
203
203
204 Whether the server supports the ``branchmap`` command.
204 Whether the server supports the ``branchmap`` command.
205
205
206 This capability/command was introduced in Mercurial 1.3 (released July 2009).
206 This capability/command was introduced in Mercurial 1.3 (released July 2009).
207
207
208 bundle2-exp
208 bundle2-exp
209 -----------
209 -----------
210
210
211 Precursor to ``bundle2`` capability that was used before bundle2 was a
211 Precursor to ``bundle2`` capability that was used before bundle2 was a
212 stable feature.
212 stable feature.
213
213
214 This capability was introduced in Mercurial 3.0 behind an experimental
214 This capability was introduced in Mercurial 3.0 behind an experimental
215 flag. This capability should not be observed in the wild.
215 flag. This capability should not be observed in the wild.
216
216
217 bundle2
217 bundle2
218 -------
218 -------
219
219
220 Indicates whether the server supports the ``bundle2`` data exchange format.
220 Indicates whether the server supports the ``bundle2`` data exchange format.
221
221
222 The value of the capability is a URL quoted, newline (``\n``) delimited
222 The value of the capability is a URL quoted, newline (``\n``) delimited
223 list of keys or key-value pairs.
223 list of keys or key-value pairs.
224
224
225 A key is simply a URL encoded string.
225 A key is simply a URL encoded string.
226
226
227 A key-value pair is a URL encoded key separated from a URL encoded value by
227 A key-value pair is a URL encoded key separated from a URL encoded value by
228 an ``=``. If the value is a list, elements are delimited by a ``,`` after
228 an ``=``. If the value is a list, elements are delimited by a ``,`` after
229 URL encoding.
229 URL encoding.
230
230
231 For example, say we have the values::
231 For example, say we have the values::
232
232
233 {'HG20': [], 'changegroup': ['01', '02'], 'digests': ['sha1', 'sha512']}
233 {'HG20': [], 'changegroup': ['01', '02'], 'digests': ['sha1', 'sha512']}
234
234
235 We would first construct a string::
235 We would first construct a string::
236
236
237 HG20\nchangegroup=01,02\ndigests=sha1,sha512
237 HG20\nchangegroup=01,02\ndigests=sha1,sha512
238
238
239 We would then URL quote this string::
239 We would then URL quote this string::
240
240
241 HG20%0Achangegroup%3D01%2C02%0Adigests%3Dsha1%2Csha512
241 HG20%0Achangegroup%3D01%2C02%0Adigests%3Dsha1%2Csha512
242
242
243 This capability was introduced in Mercurial 3.4 (released May 2015).
243 This capability was introduced in Mercurial 3.4 (released May 2015).
244
244
245 changegroupsubset
245 changegroupsubset
246 -----------------
246 -----------------
247
247
248 Whether the server supports the ``changegroupsubset`` command.
248 Whether the server supports the ``changegroupsubset`` command.
249
249
250 This capability was introduced in Mercurial 0.9.2 (released December
250 This capability was introduced in Mercurial 0.9.2 (released December
251 2006).
251 2006).
252
252
253 This capability was introduced at the same time as the ``lookup``
253 This capability was introduced at the same time as the ``lookup``
254 capability/command.
254 capability/command.
255
255
256 compression
256 compression
257 -----------
257 -----------
258
258
259 Declares support for negotiating compression formats.
259 Declares support for negotiating compression formats.
260
260
261 Presence of this capability indicates the server supports dynamic selection
261 Presence of this capability indicates the server supports dynamic selection
262 of compression formats based on the client request.
262 of compression formats based on the client request.
263
263
264 Servers advertising this capability are required to support the
264 Servers advertising this capability are required to support the
265 ``application/mercurial-0.2`` media type in response to commands returning
265 ``application/mercurial-0.2`` media type in response to commands returning
266 streams. Servers may support this media type on any command.
266 streams. Servers may support this media type on any command.
267
267
268 The value of the capability is a comma-delimited list of strings declaring
268 The value of the capability is a comma-delimited list of strings declaring
269 supported compression formats. The order of the compression formats is in
269 supported compression formats. The order of the compression formats is in
270 server-preferred order, most preferred first.
270 server-preferred order, most preferred first.
271
271
272 The identifiers used by the official Mercurial distribution are:
272 The identifiers used by the official Mercurial distribution are:
273
273
274 bzip2
274 bzip2
275 bzip2
275 bzip2
276 none
276 none
277 uncompressed / raw data
277 uncompressed / raw data
278 zlib
278 zlib
279 zlib (no gzip header)
279 zlib (no gzip header)
280 zstd
280 zstd
281 zstd
281 zstd
282
282
283 This capability was introduced in Mercurial 4.1 (released February 2017).
283 This capability was introduced in Mercurial 4.1 (released February 2017).
284
284
285 getbundle
285 getbundle
286 ---------
286 ---------
287
287
288 Whether the server supports the ``getbundle`` command.
288 Whether the server supports the ``getbundle`` command.
289
289
290 This capability was introduced in Mercurial 1.9 (released July 2011).
290 This capability was introduced in Mercurial 1.9 (released July 2011).
291
291
292 httpheader
292 httpheader
293 ----------
293 ----------
294
294
295 Whether the server supports receiving command arguments via HTTP request
295 Whether the server supports receiving command arguments via HTTP request
296 headers.
296 headers.
297
297
298 The value of the capability is an integer describing the max header
298 The value of the capability is an integer describing the max header
299 length that clients should send. Clients should ignore any content after a
299 length that clients should send. Clients should ignore any content after a
300 comma in the value, as this is reserved for future use.
300 comma in the value, as this is reserved for future use.
301
301
302 This capability was introduced in Mercurial 1.9 (released July 2011).
302 This capability was introduced in Mercurial 1.9 (released July 2011).
303
303
304 httpmediatype
304 httpmediatype
305 -------------
305 -------------
306
306
307 Indicates which HTTP media types (``Content-Type`` header) the server is
307 Indicates which HTTP media types (``Content-Type`` header) the server is
308 capable of receiving and sending.
308 capable of receiving and sending.
309
309
310 The value of the capability is a comma-delimited list of strings identifying
310 The value of the capability is a comma-delimited list of strings identifying
311 support for media type and transmission direction. The following strings may
311 support for media type and transmission direction. The following strings may
312 be present:
312 be present:
313
313
314 0.1rx
314 0.1rx
315 Indicates server support for receiving ``application/mercurial-0.1`` media
315 Indicates server support for receiving ``application/mercurial-0.1`` media
316 types.
316 types.
317
317
318 0.1tx
318 0.1tx
319 Indicates server support for sending ``application/mercurial-0.1`` media
319 Indicates server support for sending ``application/mercurial-0.1`` media
320 types.
320 types.
321
321
322 0.2rx
322 0.2rx
323 Indicates server support for receiving ``application/mercurial-0.2`` media
323 Indicates server support for receiving ``application/mercurial-0.2`` media
324 types.
324 types.
325
325
326 0.2tx
326 0.2tx
327 Indicates server support for sending ``application/mercurial-0.2`` media
327 Indicates server support for sending ``application/mercurial-0.2`` media
328 types.
328 types.
329
329
330 minrx=X
330 minrx=X
331 Minimum media type version the server is capable of receiving. Value is a
331 Minimum media type version the server is capable of receiving. Value is a
332 string like ``0.2``.
332 string like ``0.2``.
333
333
334 This capability can be used by servers to limit connections from legacy
334 This capability can be used by servers to limit connections from legacy
335 clients not using the latest supported media type. However, only clients
335 clients not using the latest supported media type. However, only clients
336 with knowledge of this capability will know to consult this value. This
336 with knowledge of this capability will know to consult this value. This
337 capability is present so the client may issue a more user-friendly error
337 capability is present so the client may issue a more user-friendly error
338 when the server has locked out a legacy client.
338 when the server has locked out a legacy client.
339
339
340 mintx=X
340 mintx=X
341 Minimum media type version the server is capable of sending. Value is a
341 Minimum media type version the server is capable of sending. Value is a
342 string like ``0.1``.
342 string like ``0.1``.
343
343
344 Servers advertising support for the ``application/mercurial-0.2`` media type
344 Servers advertising support for the ``application/mercurial-0.2`` media type
345 should also advertise the ``compression`` capability.
345 should also advertise the ``compression`` capability.
346
346
347 This capability was introduced in Mercurial 4.1 (released February 2017).
347 This capability was introduced in Mercurial 4.1 (released February 2017).
348
348
349 httppostargs
349 httppostargs
350 ------------
350 ------------
351
351
352 **Experimental**
352 **Experimental**
353
353
354 Indicates that the server supports and prefers clients send command arguments
354 Indicates that the server supports and prefers clients send command arguments
355 via a HTTP POST request as part of the request body.
355 via a HTTP POST request as part of the request body.
356
356
357 This capability was introduced in Mercurial 3.8 (released May 2016).
357 This capability was introduced in Mercurial 3.8 (released May 2016).
358
358
359 known
359 known
360 -----
360 -----
361
361
362 Whether the server supports the ``known`` command.
362 Whether the server supports the ``known`` command.
363
363
364 This capability/command was introduced in Mercurial 1.9 (released July 2011).
364 This capability/command was introduced in Mercurial 1.9 (released July 2011).
365
365
366 lookup
366 lookup
367 ------
367 ------
368
368
369 Whether the server supports the ``lookup`` command.
369 Whether the server supports the ``lookup`` command.
370
370
371 This capability was introduced in Mercurial 0.9.2 (released December
371 This capability was introduced in Mercurial 0.9.2 (released December
372 2006).
372 2006).
373
373
374 This capability was introduced at the same time as the ``changegroupsubset``
374 This capability was introduced at the same time as the ``changegroupsubset``
375 capability/command.
375 capability/command.
376
376
377 pushkey
377 pushkey
378 -------
378 -------
379
379
380 Whether the server supports the ``pushkey`` and ``listkeys`` commands.
380 Whether the server supports the ``pushkey`` and ``listkeys`` commands.
381
381
382 This capability was introduced in Mercurial 1.6 (released July 2010).
382 This capability was introduced in Mercurial 1.6 (released July 2010).
383
383
384 standardbundle
384 standardbundle
385 --------------
385 --------------
386
386
387 **Unsupported**
387 **Unsupported**
388
388
389 This capability was introduced during the Mercurial 0.9.2 development cycle in
389 This capability was introduced during the Mercurial 0.9.2 development cycle in
390 2006. It was never present in a release, as it was replaced by the ``unbundle``
390 2006. It was never present in a release, as it was replaced by the ``unbundle``
391 capability. This capability should not be encountered in the wild.
391 capability. This capability should not be encountered in the wild.
392
392
393 stream-preferred
393 stream-preferred
394 ----------------
394 ----------------
395
395
396 If present the server prefers that clients clone using the streaming clone
396 If present the server prefers that clients clone using the streaming clone
397 protocol (``hg clone --stream``) rather than the standard
397 protocol (``hg clone --stream``) rather than the standard
398 changegroup/bundle based protocol.
398 changegroup/bundle based protocol.
399
399
400 This capability was introduced in Mercurial 2.2 (released May 2012).
400 This capability was introduced in Mercurial 2.2 (released May 2012).
401
401
402 streamreqs
402 streamreqs
403 ----------
403 ----------
404
404
405 Indicates whether the server supports *streaming clones* and the *requirements*
405 Indicates whether the server supports *streaming clones* and the *requirements*
406 that clients must support to receive it.
406 that clients must support to receive it.
407
407
408 If present, the server supports the ``stream_out`` command, which transmits
408 If present, the server supports the ``stream_out`` command, which transmits
409 raw revlogs from the repository instead of changegroups. This provides a faster
409 raw revlogs from the repository instead of changegroups. This provides a faster
410 cloning mechanism at the expense of more bandwidth used.
410 cloning mechanism at the expense of more bandwidth used.
411
411
412 The value of this capability is a comma-delimited list of repo format
412 The value of this capability is a comma-delimited list of repo format
413 *requirements*. These are requirements that impact the reading of data in
413 *requirements*. These are requirements that impact the reading of data in
414 the ``.hg/store`` directory. An example value is
414 the ``.hg/store`` directory. An example value is
415 ``streamreqs=generaldelta,revlogv1`` indicating the server repo requires
415 ``streamreqs=generaldelta,revlogv1`` indicating the server repo requires
416 the ``revlogv1`` and ``generaldelta`` requirements.
416 the ``revlogv1`` and ``generaldelta`` requirements.
417
417
418 If the only format requirement is ``revlogv1``, the server may expose the
418 If the only format requirement is ``revlogv1``, the server may expose the
419 ``stream`` capability instead of the ``streamreqs`` capability.
419 ``stream`` capability instead of the ``streamreqs`` capability.
420
420
421 This capability was introduced in Mercurial 1.7 (released November 2010).
421 This capability was introduced in Mercurial 1.7 (released November 2010).
422
422
423 stream
423 stream
424 ------
424 ------
425
425
426 Whether the server supports *streaming clones* from ``revlogv1`` repos.
426 Whether the server supports *streaming clones* from ``revlogv1`` repos.
427
427
428 If present, the server supports the ``stream_out`` command, which transmits
428 If present, the server supports the ``stream_out`` command, which transmits
429 raw revlogs from the repository instead of changegroups. This provides a faster
429 raw revlogs from the repository instead of changegroups. This provides a faster
430 cloning mechanism at the expense of more bandwidth used.
430 cloning mechanism at the expense of more bandwidth used.
431
431
432 This capability was introduced in Mercurial 0.9.1 (released July 2006).
432 This capability was introduced in Mercurial 0.9.1 (released July 2006).
433
433
434 When initially introduced, the value of the capability was the numeric
434 When initially introduced, the value of the capability was the numeric
435 revlog revision. e.g. ``stream=1``. This indicates the changegroup is using
435 revlog revision. e.g. ``stream=1``. This indicates the changegroup is using
436 ``revlogv1``. This simple integer value wasn't powerful enough, so the
436 ``revlogv1``. This simple integer value wasn't powerful enough, so the
437 ``streamreqs`` capability was invented to handle cases where the repo
437 ``streamreqs`` capability was invented to handle cases where the repo
438 requirements have more than just ``revlogv1``. Newer servers omit the
438 requirements have more than just ``revlogv1``. Newer servers omit the
439 ``=1`` since it was the only value supported and the value of ``1`` can
439 ``=1`` since it was the only value supported and the value of ``1`` can
440 be implied by clients.
440 be implied by clients.
441
441
442 unbundlehash
442 unbundlehash
443 ------------
443 ------------
444
444
445 Whether the ``unbundle`` commands supports receiving a hash of all the
445 Whether the ``unbundle`` commands supports receiving a hash of all the
446 heads instead of a list.
446 heads instead of a list.
447
447
448 For more, see the documentation for the ``unbundle`` command.
448 For more, see the documentation for the ``unbundle`` command.
449
449
450 This capability was introduced in Mercurial 1.9 (released July 2011).
450 This capability was introduced in Mercurial 1.9 (released July 2011).
451
451
452 unbundle
452 unbundle
453 --------
453 --------
454
454
455 Whether the server supports pushing via the ``unbundle`` command.
455 Whether the server supports pushing via the ``unbundle`` command.
456
456
457 This capability/command has been present since Mercurial 0.9.1 (released
457 This capability/command has been present since Mercurial 0.9.1 (released
458 July 2006).
458 July 2006).
459
459
460 Mercurial 0.9.2 (released December 2006) added values to the capability
460 Mercurial 0.9.2 (released December 2006) added values to the capability
461 indicating which bundle types the server supports receiving. This value is a
461 indicating which bundle types the server supports receiving. This value is a
462 comma-delimited list. e.g. ``HG10GZ,HG10BZ,HG10UN``. The order of values
462 comma-delimited list. e.g. ``HG10GZ,HG10BZ,HG10UN``. The order of values
463 reflects the priority/preference of that type, where the first value is the
463 reflects the priority/preference of that type, where the first value is the
464 most preferred type.
464 most preferred type.
465
465
466 Handshake Protocol
466 Handshake Protocol
467 ==================
467 ==================
468
468
469 While not explicitly required, it is common for clients to perform a
469 While not explicitly required, it is common for clients to perform a
470 *handshake* when connecting to a server. The handshake accomplishes 2 things:
470 *handshake* when connecting to a server. The handshake accomplishes 2 things:
471
471
472 * Obtaining capabilities and other server features
472 * Obtaining capabilities and other server features
473 * Flushing extra server output (e.g. SSH servers may print extra text
473 * Flushing extra server output (e.g. SSH servers may print extra text
474 when connecting that may confuse the wire protocol)
474 when connecting that may confuse the wire protocol)
475
475
476 This isn't a traditional *handshake* as far as network protocols go because
476 This isn't a traditional *handshake* as far as network protocols go because
477 there is no persistent state as a result of the handshake: the handshake is
477 there is no persistent state as a result of the handshake: the handshake is
478 simply the issuing of commands and commands are stateless.
478 simply the issuing of commands and commands are stateless.
479
479
480 The canonical clients perform a capabilities lookup at connection establishment
480 The canonical clients perform a capabilities lookup at connection establishment
481 time. This is because clients must assume a server only supports the features
481 time. This is because clients must assume a server only supports the features
482 of the original Mercurial server implementation until proven otherwise (from
482 of the original Mercurial server implementation until proven otherwise (from
483 advertised capabilities). Nearly every server running today supports features
483 advertised capabilities). Nearly every server running today supports features
484 that weren't present in the original Mercurial server implementation. Rather
484 that weren't present in the original Mercurial server implementation. Rather
485 than wait for a client to perform functionality that needs to consult
485 than wait for a client to perform functionality that needs to consult
486 capabilities, it issues the lookup at connection start to avoid any delay later.
486 capabilities, it issues the lookup at connection start to avoid any delay later.
487
487
488 For HTTP servers, the client sends a ``capabilities`` command request as
488 For HTTP servers, the client sends a ``capabilities`` command request as
489 soon as the connection is established. The server responds with a capabilities
489 soon as the connection is established. The server responds with a capabilities
490 string, which the client parses.
490 string, which the client parses.
491
491
492 For SSH servers, the client sends the ``hello`` command (no arguments)
492 For SSH servers, the client sends the ``hello`` command (no arguments)
493 and a ``between`` command with the ``pairs`` argument having the value
493 and a ``between`` command with the ``pairs`` argument having the value
494 ``0000000000000000000000000000000000000000-0000000000000000000000000000000000000000``.
494 ``0000000000000000000000000000000000000000-0000000000000000000000000000000000000000``.
495
495
496 The ``between`` command has been supported since the original Mercurial
496 The ``between`` command has been supported since the original Mercurial
497 server. Requesting the empty range will return a ``\n`` string response,
497 server. Requesting the empty range will return a ``\n`` string response,
498 which will be encoded as ``1\n\n`` (value length of ``1`` followed by a newline
498 which will be encoded as ``1\n\n`` (value length of ``1`` followed by a newline
499 followed by the value, which happens to be a newline).
499 followed by the value, which happens to be a newline).
500
500
501 The ``hello`` command was later introduced. Servers supporting it will issue
501 The ``hello`` command was later introduced. Servers supporting it will issue
502 a response to that command before sending the ``1\n\n`` response to the
502 a response to that command before sending the ``1\n\n`` response to the
503 ``between`` command. Servers not supporting ``hello`` will send an empty
503 ``between`` command. Servers not supporting ``hello`` will send an empty
504 response (``0\n``).
504 response (``0\n``).
505
505
506 In addition to the expected output from the ``hello`` and ``between`` commands,
506 In addition to the expected output from the ``hello`` and ``between`` commands,
507 servers may also send other output, such as *message of the day (MOTD)*
507 servers may also send other output, such as *message of the day (MOTD)*
508 announcements. Clients assume servers will send this output before the
508 announcements. Clients assume servers will send this output before the
509 Mercurial server replies to the client-issued commands. So any server output
509 Mercurial server replies to the client-issued commands. So any server output
510 not conforming to the expected command responses is assumed to be not related
510 not conforming to the expected command responses is assumed to be not related
511 to Mercurial and can be ignored.
511 to Mercurial and can be ignored.
512
512
513 Content Negotiation
513 Content Negotiation
514 ===================
514 ===================
515
515
516 The wire protocol has some mechanisms to help peers determine what content
516 The wire protocol has some mechanisms to help peers determine what content
517 types and encoding the other side will accept. Historically, these mechanisms
517 types and encoding the other side will accept. Historically, these mechanisms
518 have been built into commands themselves because most commands only send a
518 have been built into commands themselves because most commands only send a
519 well-defined response type and only certain commands needed to support
519 well-defined response type and only certain commands needed to support
520 functionality like compression.
520 functionality like compression.
521
521
522 Currently, only the HTTP transport supports content negotiation at the protocol
522 Currently, only the HTTP transport supports content negotiation at the protocol
523 layer.
523 layer.
524
524
525 HTTP requests advertise supported response formats via the ``X-HgProto-<N>``
525 HTTP requests advertise supported response formats via the ``X-HgProto-<N>``
526 request header, where ``<N>`` is an integer starting at 1 allowing the logical
526 request header, where ``<N>`` is an integer starting at 1 allowing the logical
527 value to span multiple headers. This value consists of a list of
527 value to span multiple headers. This value consists of a list of
528 space-delimited parameters. Each parameter denotes a feature or capability.
528 space-delimited parameters. Each parameter denotes a feature or capability.
529
529
530 The following parameters are defined:
530 The following parameters are defined:
531
531
532 0.1
532 0.1
533 Indicates the client supports receiving ``application/mercurial-0.1``
533 Indicates the client supports receiving ``application/mercurial-0.1``
534 responses.
534 responses.
535
535
536 0.2
536 0.2
537 Indicates the client supports receiving ``application/mercurial-0.2``
537 Indicates the client supports receiving ``application/mercurial-0.2``
538 responses.
538 responses.
539
539
540 comp
540 comp
541 Indicates compression formats the client can decode. Value is a list of
541 Indicates compression formats the client can decode. Value is a list of
542 comma delimited strings identifying compression formats ordered from
542 comma delimited strings identifying compression formats ordered from
543 most preferential to least preferential. e.g. ``comp=zstd,zlib,none``.
543 most preferential to least preferential. e.g. ``comp=zstd,zlib,none``.
544
544
545 This parameter does not have an effect if only the ``0.1`` parameter
545 This parameter does not have an effect if only the ``0.1`` parameter
546 is defined, as support for ``application/mercurial-0.2`` or greater is
546 is defined, as support for ``application/mercurial-0.2`` or greater is
547 required to use arbitrary compression formats.
547 required to use arbitrary compression formats.
548
548
549 If this parameter is not advertised, the server interprets this as
549 If this parameter is not advertised, the server interprets this as
550 equivalent to ``zlib,none``.
550 equivalent to ``zlib,none``.
551
551
552 Clients may choose to only send this header if the ``httpmediatype``
552 Clients may choose to only send this header if the ``httpmediatype``
553 server capability is present, as currently all server-side features
553 server capability is present, as currently all server-side features
554 consulting this header require the client to opt in to new protocol features
554 consulting this header require the client to opt in to new protocol features
555 advertised via the ``httpmediatype`` capability.
555 advertised via the ``httpmediatype`` capability.
556
556
557 A server that doesn't receive an ``X-HgProto-<N>`` header should infer a
557 A server that doesn't receive an ``X-HgProto-<N>`` header should infer a
558 value of ``0.1``. This is compatible with legacy clients.
558 value of ``0.1``. This is compatible with legacy clients.
559
559
560 A server receiving a request indicating support for multiple media type
560 A server receiving a request indicating support for multiple media type
561 versions may respond with any of the supported media types. Not all servers
561 versions may respond with any of the supported media types. Not all servers
562 may support all media types on all commands.
562 may support all media types on all commands.
563
563
564 Commands
564 Commands
565 ========
565 ========
566
566
567 This section contains a list of all wire protocol commands implemented by
567 This section contains a list of all wire protocol commands implemented by
568 the canonical Mercurial server.
568 the canonical Mercurial server.
569
569
570 batch
570 batch
571 -----
571 -----
572
572
573 Issue multiple commands while sending a single command request. The purpose
573 Issue multiple commands while sending a single command request. The purpose
574 of this command is to allow a client to issue multiple commands while avoiding
574 of this command is to allow a client to issue multiple commands while avoiding
575 multiple round trips to the server therefore enabling commands to complete
575 multiple round trips to the server therefore enabling commands to complete
576 quicker.
576 quicker.
577
577
578 The command accepts a ``cmds`` argument that contains a list of commands to
578 The command accepts a ``cmds`` argument that contains a list of commands to
579 execute.
579 execute.
580
580
581 The value of ``cmds`` is a ``;`` delimited list of strings. Each string has the
581 The value of ``cmds`` is a ``;`` delimited list of strings. Each string has the
582 form ``<command> <arguments>``. That is, the command name followed by a space
582 form ``<command> <arguments>``. That is, the command name followed by a space
583 followed by an argument string.
583 followed by an argument string.
584
584
585 The argument string is a ``,`` delimited list of ``<key>=<value>`` values
585 The argument string is a ``,`` delimited list of ``<key>=<value>`` values
586 corresponding to command arguments. Both the argument name and value are
586 corresponding to command arguments. Both the argument name and value are
587 escaped using a special substitution map::
587 escaped using a special substitution map::
588
588
589 : -> :c
589 : -> :c
590 , -> :o
590 , -> :o
591 ; -> :s
591 ; -> :s
592 = -> :e
592 = -> :e
593
593
594 The response type for this command is ``string``. The value contains a
594 The response type for this command is ``string``. The value contains a
595 ``;`` delimited list of responses for each requested command. Each value
595 ``;`` delimited list of responses for each requested command. Each value
596 in this list is escaped using the same substitution map used for arguments.
596 in this list is escaped using the same substitution map used for arguments.
597
597
598 If an error occurs, the generic error response may be sent.
598 If an error occurs, the generic error response may be sent.
599
599
600 between
600 between
601 -------
601 -------
602
602
603 (Legacy command used for discovery in old clients)
603 (Legacy command used for discovery in old clients)
604
604
605 Obtain nodes between pairs of nodes.
605 Obtain nodes between pairs of nodes.
606
606
607 The ``pairs`` arguments contains a space-delimited list of ``-`` delimited
607 The ``pairs`` arguments contains a space-delimited list of ``-`` delimited
608 hex node pairs. e.g.::
608 hex node pairs. e.g.::
609
609
610 a072279d3f7fd3a4aa7ffa1a5af8efc573e1c896-6dc58916e7c070f678682bfe404d2e2d68291a18
610 a072279d3f7fd3a4aa7ffa1a5af8efc573e1c896-6dc58916e7c070f678682bfe404d2e2d68291a18
611
611
612 Return type is a ``string``. Value consists of lines corresponding to each
612 Return type is a ``string``. Value consists of lines corresponding to each
613 requested range. Each line contains a space-delimited list of hex nodes.
613 requested range. Each line contains a space-delimited list of hex nodes.
614 A newline ``\n`` terminates each line, including the last one.
614 A newline ``\n`` terminates each line, including the last one.
615
615
616 branchmap
616 branchmap
617 ---------
617 ---------
618
618
619 Obtain heads in named branches.
619 Obtain heads in named branches.
620
620
621 Accepts no arguments. Return type is a ``string``.
621 Accepts no arguments. Return type is a ``string``.
622
622
623 Return value contains lines with URL encoded branch names followed by a space
623 Return value contains lines with URL encoded branch names followed by a space
624 followed by a space-delimited list of hex nodes of heads on that branch.
624 followed by a space-delimited list of hex nodes of heads on that branch.
625 e.g.::
625 e.g.::
626
626
627 default a072279d3f7fd3a4aa7ffa1a5af8efc573e1c896 6dc58916e7c070f678682bfe404d2e2d68291a18
627 default a072279d3f7fd3a4aa7ffa1a5af8efc573e1c896 6dc58916e7c070f678682bfe404d2e2d68291a18
628 stable baae3bf31522f41dd5e6d7377d0edd8d1cf3fccc
628 stable baae3bf31522f41dd5e6d7377d0edd8d1cf3fccc
629
629
630 There is no trailing newline.
630 There is no trailing newline.
631
631
632 branches
632 branches
633 --------
633 --------
634
634
635 (Legacy command used for discovery in old clients. Clients with ``getbundle``
635 (Legacy command used for discovery in old clients. Clients with ``getbundle``
636 use the ``known`` and ``heads`` commands instead.)
636 use the ``known`` and ``heads`` commands instead.)
637
637
638 Obtain ancestor changesets of specific nodes back to a branch point.
638 Obtain ancestor changesets of specific nodes back to a branch point.
639
639
640 Despite the name, this command has nothing to do with Mercurial named branches.
640 Despite the name, this command has nothing to do with Mercurial named branches.
641 Instead, it is related to DAG branches.
641 Instead, it is related to DAG branches.
642
642
643 The command accepts a ``nodes`` argument, which is a string of space-delimited
643 The command accepts a ``nodes`` argument, which is a string of space-delimited
644 hex nodes.
644 hex nodes.
645
645
646 For each node requested, the server will find the first ancestor node that is
646 For each node requested, the server will find the first ancestor node that is
647 a DAG root or is a merge.
647 a DAG root or is a merge.
648
648
649 Return type is a ``string``. Return value contains lines with result data for
649 Return type is a ``string``. Return value contains lines with result data for
650 each requested node. Each line contains space-delimited nodes followed by a
650 each requested node. Each line contains space-delimited nodes followed by a
651 newline (``\n``). The 4 nodes reported on each line correspond to the requested
651 newline (``\n``). The 4 nodes reported on each line correspond to the requested
652 node, the ancestor node found, and its 2 parent nodes (which may be the null
652 node, the ancestor node found, and its 2 parent nodes (which may be the null
653 node).
653 node).
654
654
655 capabilities
655 capabilities
656 ------------
656 ------------
657
657
658 Obtain the capabilities string for the repo.
658 Obtain the capabilities string for the repo.
659
659
660 Unlike the ``hello`` command, the capabilities string is not prefixed.
660 Unlike the ``hello`` command, the capabilities string is not prefixed.
661 There is no trailing newline.
661 There is no trailing newline.
662
662
663 This command does not accept any arguments. Return type is a ``string``.
663 This command does not accept any arguments. Return type is a ``string``.
664
664
665 changegroup
665 changegroup
666 -----------
666 -----------
667
667
668 (Legacy command: use ``getbundle`` instead)
668 (Legacy command: use ``getbundle`` instead)
669
669
670 Obtain a changegroup version 1 with data for changesets that are
670 Obtain a changegroup version 1 with data for changesets that are
671 descendants of client-specified changesets.
671 descendants of client-specified changesets.
672
672
673 The ``roots`` arguments contains a list of space-delimited hex nodes.
673 The ``roots`` arguments contains a list of space-delimited hex nodes.
674
674
675 The server responds with a changegroup version 1 containing all
675 The server responds with a changegroup version 1 containing all
676 changesets between the requested root/base nodes and the repo's head nodes
676 changesets between the requested root/base nodes and the repo's head nodes
677 at the time of the request.
677 at the time of the request.
678
678
679 The return type is a ``stream``.
679 The return type is a ``stream``.
680
680
681 changegroupsubset
681 changegroupsubset
682 -----------------
682 -----------------
683
683
684 (Legacy command: use ``getbundle`` instead)
684 (Legacy command: use ``getbundle`` instead)
685
685
686 Obtain a changegroup version 1 with data for changesetsets between
686 Obtain a changegroup version 1 with data for changesetsets between
687 client specified base and head nodes.
687 client specified base and head nodes.
688
688
689 The ``bases`` argument contains a list of space-delimited hex nodes.
689 The ``bases`` argument contains a list of space-delimited hex nodes.
690 The ``heads`` argument contains a list of space-delimited hex nodes.
690 The ``heads`` argument contains a list of space-delimited hex nodes.
691
691
692 The server responds with a changegroup version 1 containing all
692 The server responds with a changegroup version 1 containing all
693 changesets between the requested base and head nodes at the time of the
693 changesets between the requested base and head nodes at the time of the
694 request.
694 request.
695
695
696 The return type is a ``stream``.
696 The return type is a ``stream``.
697
697
698 clonebundles
698 clonebundles
699 ------------
699 ------------
700
700
701 Obtains a manifest of bundle URLs available to seed clones.
701 Obtains a manifest of bundle URLs available to seed clones.
702
702
703 Each returned line contains a URL followed by metadata. See the
703 Each returned line contains a URL followed by metadata. See the
704 documentation in the ``clonebundles`` extension for more.
704 documentation in the ``clonebundles`` extension for more.
705
705
706 The return type is a ``string``.
706 The return type is a ``string``.
707
707
708 getbundle
708 getbundle
709 ---------
709 ---------
710
710
711 Obtain a bundle containing repository data.
711 Obtain a bundle containing repository data.
712
712
713 This command accepts the following arguments:
713 This command accepts the following arguments:
714
714
715 heads
715 heads
716 List of space-delimited hex nodes of heads to retrieve.
716 List of space-delimited hex nodes of heads to retrieve.
717 common
717 common
718 List of space-delimited hex nodes that the client has in common with the
718 List of space-delimited hex nodes that the client has in common with the
719 server.
719 server.
720 obsmarkers
720 obsmarkers
721 Boolean indicating whether to include obsolescence markers as part
721 Boolean indicating whether to include obsolescence markers as part
722 of the response. Only works with bundle2.
722 of the response. Only works with bundle2.
723 bundlecaps
723 bundlecaps
724 Comma-delimited set of strings defining client bundle capabilities.
724 Comma-delimited set of strings defining client bundle capabilities.
725 listkeys
725 listkeys
726 Comma-delimited list of strings of ``pushkey`` namespaces. For each
726 Comma-delimited list of strings of ``pushkey`` namespaces. For each
727 namespace listed, a bundle2 part will be included with the content of
727 namespace listed, a bundle2 part will be included with the content of
728 that namespace.
728 that namespace.
729 cg
729 cg
730 Boolean indicating whether changegroup data is requested.
730 Boolean indicating whether changegroup data is requested.
731 cbattempted
731 cbattempted
732 Boolean indicating whether the client attempted to use the *clone bundles*
732 Boolean indicating whether the client attempted to use the *clone bundles*
733 feature before performing this request.
733 feature before performing this request.
734 bookmarks
735 Boolean indicating whether bookmark data is requested.
734 phases
736 phases
735 Boolean indicating whether phases data is requested.
737 Boolean indicating whether phases data is requested.
736
738
737 The return type on success is a ``stream`` where the value is bundle.
739 The return type on success is a ``stream`` where the value is bundle.
738 On the HTTP transport, the response is zlib compressed.
740 On the HTTP transport, the response is zlib compressed.
739
741
740 If an error occurs, a generic error response can be sent.
742 If an error occurs, a generic error response can be sent.
741
743
742 Unless the client sends a false value for the ``cg`` argument, the returned
744 Unless the client sends a false value for the ``cg`` argument, the returned
743 bundle contains a changegroup with the nodes between the specified ``common``
745 bundle contains a changegroup with the nodes between the specified ``common``
744 and ``heads`` nodes. Depending on the command arguments, the type and content
746 and ``heads`` nodes. Depending on the command arguments, the type and content
745 of the returned bundle can vary significantly.
747 of the returned bundle can vary significantly.
746
748
747 The default behavior is for the server to send a raw changegroup version
749 The default behavior is for the server to send a raw changegroup version
748 ``01`` response.
750 ``01`` response.
749
751
750 If the ``bundlecaps`` provided by the client contain a value beginning
752 If the ``bundlecaps`` provided by the client contain a value beginning
751 with ``HG2``, a bundle2 will be returned. The bundle2 data may contain
753 with ``HG2``, a bundle2 will be returned. The bundle2 data may contain
752 additional repository data, such as ``pushkey`` namespace values.
754 additional repository data, such as ``pushkey`` namespace values.
753
755
754 heads
756 heads
755 -----
757 -----
756
758
757 Returns a list of space-delimited hex nodes of repository heads followed
759 Returns a list of space-delimited hex nodes of repository heads followed
758 by a newline. e.g.
760 by a newline. e.g.
759 ``a9eeb3adc7ddb5006c088e9eda61791c777cbf7c 31f91a3da534dc849f0d6bfc00a395a97cf218a1\n``
761 ``a9eeb3adc7ddb5006c088e9eda61791c777cbf7c 31f91a3da534dc849f0d6bfc00a395a97cf218a1\n``
760
762
761 This command does not accept any arguments. The return type is a ``string``.
763 This command does not accept any arguments. The return type is a ``string``.
762
764
763 hello
765 hello
764 -----
766 -----
765
767
766 Returns lines describing interesting things about the server in an RFC-822
768 Returns lines describing interesting things about the server in an RFC-822
767 like format.
769 like format.
768
770
769 Currently, the only line defines the server capabilities. It has the form::
771 Currently, the only line defines the server capabilities. It has the form::
770
772
771 capabilities: <value>
773 capabilities: <value>
772
774
773 See above for more about the capabilities string.
775 See above for more about the capabilities string.
774
776
775 SSH clients typically issue this command as soon as a connection is
777 SSH clients typically issue this command as soon as a connection is
776 established.
778 established.
777
779
778 This command does not accept any arguments. The return type is a ``string``.
780 This command does not accept any arguments. The return type is a ``string``.
779
781
780 listkeys
782 listkeys
781 --------
783 --------
782
784
783 List values in a specified ``pushkey`` namespace.
785 List values in a specified ``pushkey`` namespace.
784
786
785 The ``namespace`` argument defines the pushkey namespace to operate on.
787 The ``namespace`` argument defines the pushkey namespace to operate on.
786
788
787 The return type is a ``string``. The value is an encoded dictionary of keys.
789 The return type is a ``string``. The value is an encoded dictionary of keys.
788
790
789 Key-value pairs are delimited by newlines (``\n``). Within each line, keys and
791 Key-value pairs are delimited by newlines (``\n``). Within each line, keys and
790 values are separated by a tab (``\t``). Keys and values are both strings.
792 values are separated by a tab (``\t``). Keys and values are both strings.
791
793
792 lookup
794 lookup
793 ------
795 ------
794
796
795 Try to resolve a value to a known repository revision.
797 Try to resolve a value to a known repository revision.
796
798
797 The ``key`` argument is converted from bytes to an
799 The ``key`` argument is converted from bytes to an
798 ``encoding.localstr`` instance then passed into
800 ``encoding.localstr`` instance then passed into
799 ``localrepository.__getitem__`` in an attempt to resolve it.
801 ``localrepository.__getitem__`` in an attempt to resolve it.
800
802
801 The return type is a ``string``.
803 The return type is a ``string``.
802
804
803 Upon successful resolution, returns ``1 <hex node>\n``. On failure,
805 Upon successful resolution, returns ``1 <hex node>\n``. On failure,
804 returns ``0 <error string>\n``. e.g.::
806 returns ``0 <error string>\n``. e.g.::
805
807
806 1 273ce12ad8f155317b2c078ec75a4eba507f1fba\n
808 1 273ce12ad8f155317b2c078ec75a4eba507f1fba\n
807
809
808 0 unknown revision 'foo'\n
810 0 unknown revision 'foo'\n
809
811
810 known
812 known
811 -----
813 -----
812
814
813 Determine whether multiple nodes are known.
815 Determine whether multiple nodes are known.
814
816
815 The ``nodes`` argument is a list of space-delimited hex nodes to check
817 The ``nodes`` argument is a list of space-delimited hex nodes to check
816 for existence.
818 for existence.
817
819
818 The return type is ``string``.
820 The return type is ``string``.
819
821
820 Returns a string consisting of ``0``s and ``1``s indicating whether nodes
822 Returns a string consisting of ``0``s and ``1``s indicating whether nodes
821 are known. If the Nth node specified in the ``nodes`` argument is known,
823 are known. If the Nth node specified in the ``nodes`` argument is known,
822 a ``1`` will be returned at byte offset N. If the node isn't known, ``0``
824 a ``1`` will be returned at byte offset N. If the node isn't known, ``0``
823 will be present at byte offset N.
825 will be present at byte offset N.
824
826
825 There is no trailing newline.
827 There is no trailing newline.
826
828
827 pushkey
829 pushkey
828 -------
830 -------
829
831
830 Set a value using the ``pushkey`` protocol.
832 Set a value using the ``pushkey`` protocol.
831
833
832 Accepts arguments ``namespace``, ``key``, ``old``, and ``new``, which
834 Accepts arguments ``namespace``, ``key``, ``old``, and ``new``, which
833 correspond to the pushkey namespace to operate on, the key within that
835 correspond to the pushkey namespace to operate on, the key within that
834 namespace to change, the old value (which may be empty), and the new value.
836 namespace to change, the old value (which may be empty), and the new value.
835 All arguments are string types.
837 All arguments are string types.
836
838
837 The return type is a ``string``. The value depends on the transport protocol.
839 The return type is a ``string``. The value depends on the transport protocol.
838
840
839 The SSH transport sends a string encoded integer followed by a newline
841 The SSH transport sends a string encoded integer followed by a newline
840 (``\n``) which indicates operation result. The server may send additional
842 (``\n``) which indicates operation result. The server may send additional
841 output on the ``stderr`` stream that should be displayed to the user.
843 output on the ``stderr`` stream that should be displayed to the user.
842
844
843 The HTTP transport sends a string encoded integer followed by a newline
845 The HTTP transport sends a string encoded integer followed by a newline
844 followed by additional server output that should be displayed to the user.
846 followed by additional server output that should be displayed to the user.
845 This may include output from hooks, etc.
847 This may include output from hooks, etc.
846
848
847 The integer result varies by namespace. ``0`` means an error has occurred
849 The integer result varies by namespace. ``0`` means an error has occurred
848 and there should be additional output to display to the user.
850 and there should be additional output to display to the user.
849
851
850 stream_out
852 stream_out
851 ----------
853 ----------
852
854
853 Obtain *streaming clone* data.
855 Obtain *streaming clone* data.
854
856
855 The return type is either a ``string`` or a ``stream``, depending on
857 The return type is either a ``string`` or a ``stream``, depending on
856 whether the request was fulfilled properly.
858 whether the request was fulfilled properly.
857
859
858 A return value of ``1\n`` indicates the server is not configured to serve
860 A return value of ``1\n`` indicates the server is not configured to serve
859 this data. If this is seen by the client, they may not have verified the
861 this data. If this is seen by the client, they may not have verified the
860 ``stream`` capability is set before making the request.
862 ``stream`` capability is set before making the request.
861
863
862 A return value of ``2\n`` indicates the server was unable to lock the
864 A return value of ``2\n`` indicates the server was unable to lock the
863 repository to generate data.
865 repository to generate data.
864
866
865 All other responses are a ``stream`` of bytes. The first line of this data
867 All other responses are a ``stream`` of bytes. The first line of this data
866 contains 2 space-delimited integers corresponding to the path count and
868 contains 2 space-delimited integers corresponding to the path count and
867 payload size, respectively::
869 payload size, respectively::
868
870
869 <path count> <payload size>\n
871 <path count> <payload size>\n
870
872
871 The ``<payload size>`` is the total size of path data: it does not include
873 The ``<payload size>`` is the total size of path data: it does not include
872 the size of the per-path header lines.
874 the size of the per-path header lines.
873
875
874 Following that header are ``<path count>`` entries. Each entry consists of a
876 Following that header are ``<path count>`` entries. Each entry consists of a
875 line with metadata followed by raw revlog data. The line consists of::
877 line with metadata followed by raw revlog data. The line consists of::
876
878
877 <store path>\0<size>\n
879 <store path>\0<size>\n
878
880
879 The ``<store path>`` is the encoded store path of the data that follows.
881 The ``<store path>`` is the encoded store path of the data that follows.
880 ``<size>`` is the amount of data for this store path/revlog that follows the
882 ``<size>`` is the amount of data for this store path/revlog that follows the
881 newline.
883 newline.
882
884
883 There is no trailer to indicate end of data. Instead, the client should stop
885 There is no trailer to indicate end of data. Instead, the client should stop
884 reading after ``<path count>`` entries are consumed.
886 reading after ``<path count>`` entries are consumed.
885
887
886 unbundle
888 unbundle
887 --------
889 --------
888
890
889 Send a bundle containing data (usually changegroup data) to the server.
891 Send a bundle containing data (usually changegroup data) to the server.
890
892
891 Accepts the argument ``heads``, which is a space-delimited list of hex nodes
893 Accepts the argument ``heads``, which is a space-delimited list of hex nodes
892 corresponding to server repository heads observed by the client. This is used
894 corresponding to server repository heads observed by the client. This is used
893 to detect race conditions and abort push operations before a server performs
895 to detect race conditions and abort push operations before a server performs
894 too much work or a client transfers too much data.
896 too much work or a client transfers too much data.
895
897
896 The request payload consists of a bundle to be applied to the repository,
898 The request payload consists of a bundle to be applied to the repository,
897 similarly to as if :hg:`unbundle` were called.
899 similarly to as if :hg:`unbundle` were called.
898
900
899 In most scenarios, a special ``push response`` type is returned. This type
901 In most scenarios, a special ``push response`` type is returned. This type
900 contains an integer describing the change in heads as a result of the
902 contains an integer describing the change in heads as a result of the
901 operation. A value of ``0`` indicates nothing changed. ``1`` means the number
903 operation. A value of ``0`` indicates nothing changed. ``1`` means the number
902 of heads remained the same. Values ``2`` and larger indicate the number of
904 of heads remained the same. Values ``2`` and larger indicate the number of
903 added heads minus 1. e.g. ``3`` means 2 heads were added. Negative values
905 added heads minus 1. e.g. ``3`` means 2 heads were added. Negative values
904 indicate the number of fewer heads, also off by 1. e.g. ``-2`` means there
906 indicate the number of fewer heads, also off by 1. e.g. ``-2`` means there
905 is 1 fewer head.
907 is 1 fewer head.
906
908
907 The encoding of the ``push response`` type varies by transport.
909 The encoding of the ``push response`` type varies by transport.
908
910
909 For the SSH transport, this type is composed of 2 ``string`` responses: an
911 For the SSH transport, this type is composed of 2 ``string`` responses: an
910 empty response (``0\n``) followed by the integer result value. e.g.
912 empty response (``0\n``) followed by the integer result value. e.g.
911 ``1\n2``. So the full response might be ``0\n1\n2``.
913 ``1\n2``. So the full response might be ``0\n1\n2``.
912
914
913 For the HTTP transport, the response is a ``string`` type composed of an
915 For the HTTP transport, the response is a ``string`` type composed of an
914 integer result value followed by a newline (``\n``) followed by string
916 integer result value followed by a newline (``\n``) followed by string
915 content holding server output that should be displayed on the client (output
917 content holding server output that should be displayed on the client (output
916 hooks, etc).
918 hooks, etc).
917
919
918 In some cases, the server may respond with a ``bundle2`` bundle. In this
920 In some cases, the server may respond with a ``bundle2`` bundle. In this
919 case, the response type is ``stream``. For the HTTP transport, the response
921 case, the response type is ``stream``. For the HTTP transport, the response
920 is zlib compressed.
922 is zlib compressed.
921
923
922 The server may also respond with a generic error type, which contains a string
924 The server may also respond with a generic error type, which contains a string
923 indicating the failure.
925 indicating the failure.
@@ -1,1069 +1,1070 b''
1 # wireproto.py - generic wire protocol support functions
1 # wireproto.py - generic wire protocol support functions
2 #
2 #
3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import hashlib
10 import hashlib
11 import os
11 import os
12 import tempfile
12 import tempfile
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 bin,
16 bin,
17 hex,
17 hex,
18 nullid,
18 nullid,
19 )
19 )
20
20
21 from . import (
21 from . import (
22 bundle2,
22 bundle2,
23 changegroup as changegroupmod,
23 changegroup as changegroupmod,
24 discovery,
24 discovery,
25 encoding,
25 encoding,
26 error,
26 error,
27 exchange,
27 exchange,
28 peer,
28 peer,
29 pushkey as pushkeymod,
29 pushkey as pushkeymod,
30 pycompat,
30 pycompat,
31 repository,
31 repository,
32 streamclone,
32 streamclone,
33 util,
33 util,
34 )
34 )
35
35
36 urlerr = util.urlerr
36 urlerr = util.urlerr
37 urlreq = util.urlreq
37 urlreq = util.urlreq
38
38
39 bundle2requiredmain = _('incompatible Mercurial client; bundle2 required')
39 bundle2requiredmain = _('incompatible Mercurial client; bundle2 required')
40 bundle2requiredhint = _('see https://www.mercurial-scm.org/wiki/'
40 bundle2requiredhint = _('see https://www.mercurial-scm.org/wiki/'
41 'IncompatibleClient')
41 'IncompatibleClient')
42 bundle2required = '%s\n(%s)\n' % (bundle2requiredmain, bundle2requiredhint)
42 bundle2required = '%s\n(%s)\n' % (bundle2requiredmain, bundle2requiredhint)
43
43
44 class abstractserverproto(object):
44 class abstractserverproto(object):
45 """abstract class that summarizes the protocol API
45 """abstract class that summarizes the protocol API
46
46
47 Used as reference and documentation.
47 Used as reference and documentation.
48 """
48 """
49
49
50 def getargs(self, args):
50 def getargs(self, args):
51 """return the value for arguments in <args>
51 """return the value for arguments in <args>
52
52
53 returns a list of values (same order as <args>)"""
53 returns a list of values (same order as <args>)"""
54 raise NotImplementedError()
54 raise NotImplementedError()
55
55
56 def getfile(self, fp):
56 def getfile(self, fp):
57 """write the whole content of a file into a file like object
57 """write the whole content of a file into a file like object
58
58
59 The file is in the form::
59 The file is in the form::
60
60
61 (<chunk-size>\n<chunk>)+0\n
61 (<chunk-size>\n<chunk>)+0\n
62
62
63 chunk size is the ascii version of the int.
63 chunk size is the ascii version of the int.
64 """
64 """
65 raise NotImplementedError()
65 raise NotImplementedError()
66
66
67 def redirect(self):
67 def redirect(self):
68 """may setup interception for stdout and stderr
68 """may setup interception for stdout and stderr
69
69
70 See also the `restore` method."""
70 See also the `restore` method."""
71 raise NotImplementedError()
71 raise NotImplementedError()
72
72
73 # If the `redirect` function does install interception, the `restore`
73 # If the `redirect` function does install interception, the `restore`
74 # function MUST be defined. If interception is not used, this function
74 # function MUST be defined. If interception is not used, this function
75 # MUST NOT be defined.
75 # MUST NOT be defined.
76 #
76 #
77 # left commented here on purpose
77 # left commented here on purpose
78 #
78 #
79 #def restore(self):
79 #def restore(self):
80 # """reinstall previous stdout and stderr and return intercepted stdout
80 # """reinstall previous stdout and stderr and return intercepted stdout
81 # """
81 # """
82 # raise NotImplementedError()
82 # raise NotImplementedError()
83
83
84 class remoteiterbatcher(peer.iterbatcher):
84 class remoteiterbatcher(peer.iterbatcher):
85 def __init__(self, remote):
85 def __init__(self, remote):
86 super(remoteiterbatcher, self).__init__()
86 super(remoteiterbatcher, self).__init__()
87 self._remote = remote
87 self._remote = remote
88
88
89 def __getattr__(self, name):
89 def __getattr__(self, name):
90 # Validate this method is batchable, since submit() only supports
90 # Validate this method is batchable, since submit() only supports
91 # batchable methods.
91 # batchable methods.
92 fn = getattr(self._remote, name)
92 fn = getattr(self._remote, name)
93 if not getattr(fn, 'batchable', None):
93 if not getattr(fn, 'batchable', None):
94 raise error.ProgrammingError('Attempted to batch a non-batchable '
94 raise error.ProgrammingError('Attempted to batch a non-batchable '
95 'call to %r' % name)
95 'call to %r' % name)
96
96
97 return super(remoteiterbatcher, self).__getattr__(name)
97 return super(remoteiterbatcher, self).__getattr__(name)
98
98
99 def submit(self):
99 def submit(self):
100 """Break the batch request into many patch calls and pipeline them.
100 """Break the batch request into many patch calls and pipeline them.
101
101
102 This is mostly valuable over http where request sizes can be
102 This is mostly valuable over http where request sizes can be
103 limited, but can be used in other places as well.
103 limited, but can be used in other places as well.
104 """
104 """
105 # 2-tuple of (command, arguments) that represents what will be
105 # 2-tuple of (command, arguments) that represents what will be
106 # sent over the wire.
106 # sent over the wire.
107 requests = []
107 requests = []
108
108
109 # 4-tuple of (command, final future, @batchable generator, remote
109 # 4-tuple of (command, final future, @batchable generator, remote
110 # future).
110 # future).
111 results = []
111 results = []
112
112
113 for command, args, opts, finalfuture in self.calls:
113 for command, args, opts, finalfuture in self.calls:
114 mtd = getattr(self._remote, command)
114 mtd = getattr(self._remote, command)
115 batchable = mtd.batchable(mtd.__self__, *args, **opts)
115 batchable = mtd.batchable(mtd.__self__, *args, **opts)
116
116
117 commandargs, fremote = next(batchable)
117 commandargs, fremote = next(batchable)
118 assert fremote
118 assert fremote
119 requests.append((command, commandargs))
119 requests.append((command, commandargs))
120 results.append((command, finalfuture, batchable, fremote))
120 results.append((command, finalfuture, batchable, fremote))
121
121
122 if requests:
122 if requests:
123 self._resultiter = self._remote._submitbatch(requests)
123 self._resultiter = self._remote._submitbatch(requests)
124
124
125 self._results = results
125 self._results = results
126
126
127 def results(self):
127 def results(self):
128 for command, finalfuture, batchable, remotefuture in self._results:
128 for command, finalfuture, batchable, remotefuture in self._results:
129 # Get the raw result, set it in the remote future, feed it
129 # Get the raw result, set it in the remote future, feed it
130 # back into the @batchable generator so it can be decoded, and
130 # back into the @batchable generator so it can be decoded, and
131 # set the result on the final future to this value.
131 # set the result on the final future to this value.
132 remoteresult = next(self._resultiter)
132 remoteresult = next(self._resultiter)
133 remotefuture.set(remoteresult)
133 remotefuture.set(remoteresult)
134 finalfuture.set(next(batchable))
134 finalfuture.set(next(batchable))
135
135
136 # Verify our @batchable generators only emit 2 values.
136 # Verify our @batchable generators only emit 2 values.
137 try:
137 try:
138 next(batchable)
138 next(batchable)
139 except StopIteration:
139 except StopIteration:
140 pass
140 pass
141 else:
141 else:
142 raise error.ProgrammingError('%s @batchable generator emitted '
142 raise error.ProgrammingError('%s @batchable generator emitted '
143 'unexpected value count' % command)
143 'unexpected value count' % command)
144
144
145 yield finalfuture.value
145 yield finalfuture.value
146
146
147 # Forward a couple of names from peer to make wireproto interactions
147 # Forward a couple of names from peer to make wireproto interactions
148 # slightly more sensible.
148 # slightly more sensible.
149 batchable = peer.batchable
149 batchable = peer.batchable
150 future = peer.future
150 future = peer.future
151
151
152 # list of nodes encoding / decoding
152 # list of nodes encoding / decoding
153
153
154 def decodelist(l, sep=' '):
154 def decodelist(l, sep=' '):
155 if l:
155 if l:
156 return [bin(v) for v in l.split(sep)]
156 return [bin(v) for v in l.split(sep)]
157 return []
157 return []
158
158
159 def encodelist(l, sep=' '):
159 def encodelist(l, sep=' '):
160 try:
160 try:
161 return sep.join(map(hex, l))
161 return sep.join(map(hex, l))
162 except TypeError:
162 except TypeError:
163 raise
163 raise
164
164
165 # batched call argument encoding
165 # batched call argument encoding
166
166
167 def escapearg(plain):
167 def escapearg(plain):
168 return (plain
168 return (plain
169 .replace(':', ':c')
169 .replace(':', ':c')
170 .replace(',', ':o')
170 .replace(',', ':o')
171 .replace(';', ':s')
171 .replace(';', ':s')
172 .replace('=', ':e'))
172 .replace('=', ':e'))
173
173
174 def unescapearg(escaped):
174 def unescapearg(escaped):
175 return (escaped
175 return (escaped
176 .replace(':e', '=')
176 .replace(':e', '=')
177 .replace(':s', ';')
177 .replace(':s', ';')
178 .replace(':o', ',')
178 .replace(':o', ',')
179 .replace(':c', ':'))
179 .replace(':c', ':'))
180
180
181 def encodebatchcmds(req):
181 def encodebatchcmds(req):
182 """Return a ``cmds`` argument value for the ``batch`` command."""
182 """Return a ``cmds`` argument value for the ``batch`` command."""
183 cmds = []
183 cmds = []
184 for op, argsdict in req:
184 for op, argsdict in req:
185 # Old servers didn't properly unescape argument names. So prevent
185 # Old servers didn't properly unescape argument names. So prevent
186 # the sending of argument names that may not be decoded properly by
186 # the sending of argument names that may not be decoded properly by
187 # servers.
187 # servers.
188 assert all(escapearg(k) == k for k in argsdict)
188 assert all(escapearg(k) == k for k in argsdict)
189
189
190 args = ','.join('%s=%s' % (escapearg(k), escapearg(v))
190 args = ','.join('%s=%s' % (escapearg(k), escapearg(v))
191 for k, v in argsdict.iteritems())
191 for k, v in argsdict.iteritems())
192 cmds.append('%s %s' % (op, args))
192 cmds.append('%s %s' % (op, args))
193
193
194 return ';'.join(cmds)
194 return ';'.join(cmds)
195
195
196 # mapping of options accepted by getbundle and their types
196 # mapping of options accepted by getbundle and their types
197 #
197 #
198 # Meant to be extended by extensions. It is extensions responsibility to ensure
198 # Meant to be extended by extensions. It is extensions responsibility to ensure
199 # such options are properly processed in exchange.getbundle.
199 # such options are properly processed in exchange.getbundle.
200 #
200 #
201 # supported types are:
201 # supported types are:
202 #
202 #
203 # :nodes: list of binary nodes
203 # :nodes: list of binary nodes
204 # :csv: list of comma-separated values
204 # :csv: list of comma-separated values
205 # :scsv: list of comma-separated values return as set
205 # :scsv: list of comma-separated values return as set
206 # :plain: string with no transformation needed.
206 # :plain: string with no transformation needed.
207 gboptsmap = {'heads': 'nodes',
207 gboptsmap = {'heads': 'nodes',
208 'bookmarks': 'boolean',
208 'common': 'nodes',
209 'common': 'nodes',
209 'obsmarkers': 'boolean',
210 'obsmarkers': 'boolean',
210 'phases': 'boolean',
211 'phases': 'boolean',
211 'bundlecaps': 'scsv',
212 'bundlecaps': 'scsv',
212 'listkeys': 'csv',
213 'listkeys': 'csv',
213 'cg': 'boolean',
214 'cg': 'boolean',
214 'cbattempted': 'boolean'}
215 'cbattempted': 'boolean'}
215
216
216 # client side
217 # client side
217
218
218 class wirepeer(repository.legacypeer):
219 class wirepeer(repository.legacypeer):
219 """Client-side interface for communicating with a peer repository.
220 """Client-side interface for communicating with a peer repository.
220
221
221 Methods commonly call wire protocol commands of the same name.
222 Methods commonly call wire protocol commands of the same name.
222
223
223 See also httppeer.py and sshpeer.py for protocol-specific
224 See also httppeer.py and sshpeer.py for protocol-specific
224 implementations of this interface.
225 implementations of this interface.
225 """
226 """
226 # Begin of basewirepeer interface.
227 # Begin of basewirepeer interface.
227
228
228 def iterbatch(self):
229 def iterbatch(self):
229 return remoteiterbatcher(self)
230 return remoteiterbatcher(self)
230
231
231 @batchable
232 @batchable
232 def lookup(self, key):
233 def lookup(self, key):
233 self.requirecap('lookup', _('look up remote revision'))
234 self.requirecap('lookup', _('look up remote revision'))
234 f = future()
235 f = future()
235 yield {'key': encoding.fromlocal(key)}, f
236 yield {'key': encoding.fromlocal(key)}, f
236 d = f.value
237 d = f.value
237 success, data = d[:-1].split(" ", 1)
238 success, data = d[:-1].split(" ", 1)
238 if int(success):
239 if int(success):
239 yield bin(data)
240 yield bin(data)
240 else:
241 else:
241 self._abort(error.RepoError(data))
242 self._abort(error.RepoError(data))
242
243
243 @batchable
244 @batchable
244 def heads(self):
245 def heads(self):
245 f = future()
246 f = future()
246 yield {}, f
247 yield {}, f
247 d = f.value
248 d = f.value
248 try:
249 try:
249 yield decodelist(d[:-1])
250 yield decodelist(d[:-1])
250 except ValueError:
251 except ValueError:
251 self._abort(error.ResponseError(_("unexpected response:"), d))
252 self._abort(error.ResponseError(_("unexpected response:"), d))
252
253
253 @batchable
254 @batchable
254 def known(self, nodes):
255 def known(self, nodes):
255 f = future()
256 f = future()
256 yield {'nodes': encodelist(nodes)}, f
257 yield {'nodes': encodelist(nodes)}, f
257 d = f.value
258 d = f.value
258 try:
259 try:
259 yield [bool(int(b)) for b in d]
260 yield [bool(int(b)) for b in d]
260 except ValueError:
261 except ValueError:
261 self._abort(error.ResponseError(_("unexpected response:"), d))
262 self._abort(error.ResponseError(_("unexpected response:"), d))
262
263
263 @batchable
264 @batchable
264 def branchmap(self):
265 def branchmap(self):
265 f = future()
266 f = future()
266 yield {}, f
267 yield {}, f
267 d = f.value
268 d = f.value
268 try:
269 try:
269 branchmap = {}
270 branchmap = {}
270 for branchpart in d.splitlines():
271 for branchpart in d.splitlines():
271 branchname, branchheads = branchpart.split(' ', 1)
272 branchname, branchheads = branchpart.split(' ', 1)
272 branchname = encoding.tolocal(urlreq.unquote(branchname))
273 branchname = encoding.tolocal(urlreq.unquote(branchname))
273 branchheads = decodelist(branchheads)
274 branchheads = decodelist(branchheads)
274 branchmap[branchname] = branchheads
275 branchmap[branchname] = branchheads
275 yield branchmap
276 yield branchmap
276 except TypeError:
277 except TypeError:
277 self._abort(error.ResponseError(_("unexpected response:"), d))
278 self._abort(error.ResponseError(_("unexpected response:"), d))
278
279
279 @batchable
280 @batchable
280 def listkeys(self, namespace):
281 def listkeys(self, namespace):
281 if not self.capable('pushkey'):
282 if not self.capable('pushkey'):
282 yield {}, None
283 yield {}, None
283 f = future()
284 f = future()
284 self.ui.debug('preparing listkeys for "%s"\n' % namespace)
285 self.ui.debug('preparing listkeys for "%s"\n' % namespace)
285 yield {'namespace': encoding.fromlocal(namespace)}, f
286 yield {'namespace': encoding.fromlocal(namespace)}, f
286 d = f.value
287 d = f.value
287 self.ui.debug('received listkey for "%s": %i bytes\n'
288 self.ui.debug('received listkey for "%s": %i bytes\n'
288 % (namespace, len(d)))
289 % (namespace, len(d)))
289 yield pushkeymod.decodekeys(d)
290 yield pushkeymod.decodekeys(d)
290
291
291 @batchable
292 @batchable
292 def pushkey(self, namespace, key, old, new):
293 def pushkey(self, namespace, key, old, new):
293 if not self.capable('pushkey'):
294 if not self.capable('pushkey'):
294 yield False, None
295 yield False, None
295 f = future()
296 f = future()
296 self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
297 self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
297 yield {'namespace': encoding.fromlocal(namespace),
298 yield {'namespace': encoding.fromlocal(namespace),
298 'key': encoding.fromlocal(key),
299 'key': encoding.fromlocal(key),
299 'old': encoding.fromlocal(old),
300 'old': encoding.fromlocal(old),
300 'new': encoding.fromlocal(new)}, f
301 'new': encoding.fromlocal(new)}, f
301 d = f.value
302 d = f.value
302 d, output = d.split('\n', 1)
303 d, output = d.split('\n', 1)
303 try:
304 try:
304 d = bool(int(d))
305 d = bool(int(d))
305 except ValueError:
306 except ValueError:
306 raise error.ResponseError(
307 raise error.ResponseError(
307 _('push failed (unexpected response):'), d)
308 _('push failed (unexpected response):'), d)
308 for l in output.splitlines(True):
309 for l in output.splitlines(True):
309 self.ui.status(_('remote: '), l)
310 self.ui.status(_('remote: '), l)
310 yield d
311 yield d
311
312
312 def stream_out(self):
313 def stream_out(self):
313 return self._callstream('stream_out')
314 return self._callstream('stream_out')
314
315
315 def getbundle(self, source, **kwargs):
316 def getbundle(self, source, **kwargs):
316 kwargs = pycompat.byteskwargs(kwargs)
317 kwargs = pycompat.byteskwargs(kwargs)
317 self.requirecap('getbundle', _('look up remote changes'))
318 self.requirecap('getbundle', _('look up remote changes'))
318 opts = {}
319 opts = {}
319 bundlecaps = kwargs.get('bundlecaps')
320 bundlecaps = kwargs.get('bundlecaps')
320 if bundlecaps is not None:
321 if bundlecaps is not None:
321 kwargs['bundlecaps'] = sorted(bundlecaps)
322 kwargs['bundlecaps'] = sorted(bundlecaps)
322 else:
323 else:
323 bundlecaps = () # kwargs could have it to None
324 bundlecaps = () # kwargs could have it to None
324 for key, value in kwargs.iteritems():
325 for key, value in kwargs.iteritems():
325 if value is None:
326 if value is None:
326 continue
327 continue
327 keytype = gboptsmap.get(key)
328 keytype = gboptsmap.get(key)
328 if keytype is None:
329 if keytype is None:
329 raise error.ProgrammingError(
330 raise error.ProgrammingError(
330 'Unexpectedly None keytype for key %s' % key)
331 'Unexpectedly None keytype for key %s' % key)
331 elif keytype == 'nodes':
332 elif keytype == 'nodes':
332 value = encodelist(value)
333 value = encodelist(value)
333 elif keytype in ('csv', 'scsv'):
334 elif keytype in ('csv', 'scsv'):
334 value = ','.join(value)
335 value = ','.join(value)
335 elif keytype == 'boolean':
336 elif keytype == 'boolean':
336 value = '%i' % bool(value)
337 value = '%i' % bool(value)
337 elif keytype != 'plain':
338 elif keytype != 'plain':
338 raise KeyError('unknown getbundle option type %s'
339 raise KeyError('unknown getbundle option type %s'
339 % keytype)
340 % keytype)
340 opts[key] = value
341 opts[key] = value
341 f = self._callcompressable("getbundle", **pycompat.strkwargs(opts))
342 f = self._callcompressable("getbundle", **pycompat.strkwargs(opts))
342 if any((cap.startswith('HG2') for cap in bundlecaps)):
343 if any((cap.startswith('HG2') for cap in bundlecaps)):
343 return bundle2.getunbundler(self.ui, f)
344 return bundle2.getunbundler(self.ui, f)
344 else:
345 else:
345 return changegroupmod.cg1unpacker(f, 'UN')
346 return changegroupmod.cg1unpacker(f, 'UN')
346
347
347 def unbundle(self, cg, heads, url):
348 def unbundle(self, cg, heads, url):
348 '''Send cg (a readable file-like object representing the
349 '''Send cg (a readable file-like object representing the
349 changegroup to push, typically a chunkbuffer object) to the
350 changegroup to push, typically a chunkbuffer object) to the
350 remote server as a bundle.
351 remote server as a bundle.
351
352
352 When pushing a bundle10 stream, return an integer indicating the
353 When pushing a bundle10 stream, return an integer indicating the
353 result of the push (see changegroup.apply()).
354 result of the push (see changegroup.apply()).
354
355
355 When pushing a bundle20 stream, return a bundle20 stream.
356 When pushing a bundle20 stream, return a bundle20 stream.
356
357
357 `url` is the url the client thinks it's pushing to, which is
358 `url` is the url the client thinks it's pushing to, which is
358 visible to hooks.
359 visible to hooks.
359 '''
360 '''
360
361
361 if heads != ['force'] and self.capable('unbundlehash'):
362 if heads != ['force'] and self.capable('unbundlehash'):
362 heads = encodelist(['hashed',
363 heads = encodelist(['hashed',
363 hashlib.sha1(''.join(sorted(heads))).digest()])
364 hashlib.sha1(''.join(sorted(heads))).digest()])
364 else:
365 else:
365 heads = encodelist(heads)
366 heads = encodelist(heads)
366
367
367 if util.safehasattr(cg, 'deltaheader'):
368 if util.safehasattr(cg, 'deltaheader'):
368 # this a bundle10, do the old style call sequence
369 # this a bundle10, do the old style call sequence
369 ret, output = self._callpush("unbundle", cg, heads=heads)
370 ret, output = self._callpush("unbundle", cg, heads=heads)
370 if ret == "":
371 if ret == "":
371 raise error.ResponseError(
372 raise error.ResponseError(
372 _('push failed:'), output)
373 _('push failed:'), output)
373 try:
374 try:
374 ret = int(ret)
375 ret = int(ret)
375 except ValueError:
376 except ValueError:
376 raise error.ResponseError(
377 raise error.ResponseError(
377 _('push failed (unexpected response):'), ret)
378 _('push failed (unexpected response):'), ret)
378
379
379 for l in output.splitlines(True):
380 for l in output.splitlines(True):
380 self.ui.status(_('remote: '), l)
381 self.ui.status(_('remote: '), l)
381 else:
382 else:
382 # bundle2 push. Send a stream, fetch a stream.
383 # bundle2 push. Send a stream, fetch a stream.
383 stream = self._calltwowaystream('unbundle', cg, heads=heads)
384 stream = self._calltwowaystream('unbundle', cg, heads=heads)
384 ret = bundle2.getunbundler(self.ui, stream)
385 ret = bundle2.getunbundler(self.ui, stream)
385 return ret
386 return ret
386
387
387 # End of basewirepeer interface.
388 # End of basewirepeer interface.
388
389
389 # Begin of baselegacywirepeer interface.
390 # Begin of baselegacywirepeer interface.
390
391
391 def branches(self, nodes):
392 def branches(self, nodes):
392 n = encodelist(nodes)
393 n = encodelist(nodes)
393 d = self._call("branches", nodes=n)
394 d = self._call("branches", nodes=n)
394 try:
395 try:
395 br = [tuple(decodelist(b)) for b in d.splitlines()]
396 br = [tuple(decodelist(b)) for b in d.splitlines()]
396 return br
397 return br
397 except ValueError:
398 except ValueError:
398 self._abort(error.ResponseError(_("unexpected response:"), d))
399 self._abort(error.ResponseError(_("unexpected response:"), d))
399
400
400 def between(self, pairs):
401 def between(self, pairs):
401 batch = 8 # avoid giant requests
402 batch = 8 # avoid giant requests
402 r = []
403 r = []
403 for i in xrange(0, len(pairs), batch):
404 for i in xrange(0, len(pairs), batch):
404 n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]])
405 n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]])
405 d = self._call("between", pairs=n)
406 d = self._call("between", pairs=n)
406 try:
407 try:
407 r.extend(l and decodelist(l) or [] for l in d.splitlines())
408 r.extend(l and decodelist(l) or [] for l in d.splitlines())
408 except ValueError:
409 except ValueError:
409 self._abort(error.ResponseError(_("unexpected response:"), d))
410 self._abort(error.ResponseError(_("unexpected response:"), d))
410 return r
411 return r
411
412
412 def changegroup(self, nodes, kind):
413 def changegroup(self, nodes, kind):
413 n = encodelist(nodes)
414 n = encodelist(nodes)
414 f = self._callcompressable("changegroup", roots=n)
415 f = self._callcompressable("changegroup", roots=n)
415 return changegroupmod.cg1unpacker(f, 'UN')
416 return changegroupmod.cg1unpacker(f, 'UN')
416
417
417 def changegroupsubset(self, bases, heads, kind):
418 def changegroupsubset(self, bases, heads, kind):
418 self.requirecap('changegroupsubset', _('look up remote changes'))
419 self.requirecap('changegroupsubset', _('look up remote changes'))
419 bases = encodelist(bases)
420 bases = encodelist(bases)
420 heads = encodelist(heads)
421 heads = encodelist(heads)
421 f = self._callcompressable("changegroupsubset",
422 f = self._callcompressable("changegroupsubset",
422 bases=bases, heads=heads)
423 bases=bases, heads=heads)
423 return changegroupmod.cg1unpacker(f, 'UN')
424 return changegroupmod.cg1unpacker(f, 'UN')
424
425
425 # End of baselegacywirepeer interface.
426 # End of baselegacywirepeer interface.
426
427
427 def _submitbatch(self, req):
428 def _submitbatch(self, req):
428 """run batch request <req> on the server
429 """run batch request <req> on the server
429
430
430 Returns an iterator of the raw responses from the server.
431 Returns an iterator of the raw responses from the server.
431 """
432 """
432 rsp = self._callstream("batch", cmds=encodebatchcmds(req))
433 rsp = self._callstream("batch", cmds=encodebatchcmds(req))
433 chunk = rsp.read(1024)
434 chunk = rsp.read(1024)
434 work = [chunk]
435 work = [chunk]
435 while chunk:
436 while chunk:
436 while ';' not in chunk and chunk:
437 while ';' not in chunk and chunk:
437 chunk = rsp.read(1024)
438 chunk = rsp.read(1024)
438 work.append(chunk)
439 work.append(chunk)
439 merged = ''.join(work)
440 merged = ''.join(work)
440 while ';' in merged:
441 while ';' in merged:
441 one, merged = merged.split(';', 1)
442 one, merged = merged.split(';', 1)
442 yield unescapearg(one)
443 yield unescapearg(one)
443 chunk = rsp.read(1024)
444 chunk = rsp.read(1024)
444 work = [merged, chunk]
445 work = [merged, chunk]
445 yield unescapearg(''.join(work))
446 yield unescapearg(''.join(work))
446
447
447 def _submitone(self, op, args):
448 def _submitone(self, op, args):
448 return self._call(op, **pycompat.strkwargs(args))
449 return self._call(op, **pycompat.strkwargs(args))
449
450
450 def debugwireargs(self, one, two, three=None, four=None, five=None):
451 def debugwireargs(self, one, two, three=None, four=None, five=None):
451 # don't pass optional arguments left at their default value
452 # don't pass optional arguments left at their default value
452 opts = {}
453 opts = {}
453 if three is not None:
454 if three is not None:
454 opts['three'] = three
455 opts['three'] = three
455 if four is not None:
456 if four is not None:
456 opts['four'] = four
457 opts['four'] = four
457 return self._call('debugwireargs', one=one, two=two, **opts)
458 return self._call('debugwireargs', one=one, two=two, **opts)
458
459
459 def _call(self, cmd, **args):
460 def _call(self, cmd, **args):
460 """execute <cmd> on the server
461 """execute <cmd> on the server
461
462
462 The command is expected to return a simple string.
463 The command is expected to return a simple string.
463
464
464 returns the server reply as a string."""
465 returns the server reply as a string."""
465 raise NotImplementedError()
466 raise NotImplementedError()
466
467
467 def _callstream(self, cmd, **args):
468 def _callstream(self, cmd, **args):
468 """execute <cmd> on the server
469 """execute <cmd> on the server
469
470
470 The command is expected to return a stream. Note that if the
471 The command is expected to return a stream. Note that if the
471 command doesn't return a stream, _callstream behaves
472 command doesn't return a stream, _callstream behaves
472 differently for ssh and http peers.
473 differently for ssh and http peers.
473
474
474 returns the server reply as a file like object.
475 returns the server reply as a file like object.
475 """
476 """
476 raise NotImplementedError()
477 raise NotImplementedError()
477
478
478 def _callcompressable(self, cmd, **args):
479 def _callcompressable(self, cmd, **args):
479 """execute <cmd> on the server
480 """execute <cmd> on the server
480
481
481 The command is expected to return a stream.
482 The command is expected to return a stream.
482
483
483 The stream may have been compressed in some implementations. This
484 The stream may have been compressed in some implementations. This
484 function takes care of the decompression. This is the only difference
485 function takes care of the decompression. This is the only difference
485 with _callstream.
486 with _callstream.
486
487
487 returns the server reply as a file like object.
488 returns the server reply as a file like object.
488 """
489 """
489 raise NotImplementedError()
490 raise NotImplementedError()
490
491
491 def _callpush(self, cmd, fp, **args):
492 def _callpush(self, cmd, fp, **args):
492 """execute a <cmd> on server
493 """execute a <cmd> on server
493
494
494 The command is expected to be related to a push. Push has a special
495 The command is expected to be related to a push. Push has a special
495 return method.
496 return method.
496
497
497 returns the server reply as a (ret, output) tuple. ret is either
498 returns the server reply as a (ret, output) tuple. ret is either
498 empty (error) or a stringified int.
499 empty (error) or a stringified int.
499 """
500 """
500 raise NotImplementedError()
501 raise NotImplementedError()
501
502
502 def _calltwowaystream(self, cmd, fp, **args):
503 def _calltwowaystream(self, cmd, fp, **args):
503 """execute <cmd> on server
504 """execute <cmd> on server
504
505
505 The command will send a stream to the server and get a stream in reply.
506 The command will send a stream to the server and get a stream in reply.
506 """
507 """
507 raise NotImplementedError()
508 raise NotImplementedError()
508
509
509 def _abort(self, exception):
510 def _abort(self, exception):
510 """clearly abort the wire protocol connection and raise the exception
511 """clearly abort the wire protocol connection and raise the exception
511 """
512 """
512 raise NotImplementedError()
513 raise NotImplementedError()
513
514
514 # server side
515 # server side
515
516
516 # wire protocol command can either return a string or one of these classes.
517 # wire protocol command can either return a string or one of these classes.
517 class streamres(object):
518 class streamres(object):
518 """wireproto reply: binary stream
519 """wireproto reply: binary stream
519
520
520 The call was successful and the result is a stream.
521 The call was successful and the result is a stream.
521
522
522 Accepts either a generator or an object with a ``read(size)`` method.
523 Accepts either a generator or an object with a ``read(size)`` method.
523
524
524 ``v1compressible`` indicates whether this data can be compressed to
525 ``v1compressible`` indicates whether this data can be compressed to
525 "version 1" clients (technically: HTTP peers using
526 "version 1" clients (technically: HTTP peers using
526 application/mercurial-0.1 media type). This flag should NOT be used on
527 application/mercurial-0.1 media type). This flag should NOT be used on
527 new commands because new clients should support a more modern compression
528 new commands because new clients should support a more modern compression
528 mechanism.
529 mechanism.
529 """
530 """
530 def __init__(self, gen=None, reader=None, v1compressible=False):
531 def __init__(self, gen=None, reader=None, v1compressible=False):
531 self.gen = gen
532 self.gen = gen
532 self.reader = reader
533 self.reader = reader
533 self.v1compressible = v1compressible
534 self.v1compressible = v1compressible
534
535
535 class pushres(object):
536 class pushres(object):
536 """wireproto reply: success with simple integer return
537 """wireproto reply: success with simple integer return
537
538
538 The call was successful and returned an integer contained in `self.res`.
539 The call was successful and returned an integer contained in `self.res`.
539 """
540 """
540 def __init__(self, res):
541 def __init__(self, res):
541 self.res = res
542 self.res = res
542
543
543 class pusherr(object):
544 class pusherr(object):
544 """wireproto reply: failure
545 """wireproto reply: failure
545
546
546 The call failed. The `self.res` attribute contains the error message.
547 The call failed. The `self.res` attribute contains the error message.
547 """
548 """
548 def __init__(self, res):
549 def __init__(self, res):
549 self.res = res
550 self.res = res
550
551
551 class ooberror(object):
552 class ooberror(object):
552 """wireproto reply: failure of a batch of operation
553 """wireproto reply: failure of a batch of operation
553
554
554 Something failed during a batch call. The error message is stored in
555 Something failed during a batch call. The error message is stored in
555 `self.message`.
556 `self.message`.
556 """
557 """
557 def __init__(self, message):
558 def __init__(self, message):
558 self.message = message
559 self.message = message
559
560
560 def getdispatchrepo(repo, proto, command):
561 def getdispatchrepo(repo, proto, command):
561 """Obtain the repo used for processing wire protocol commands.
562 """Obtain the repo used for processing wire protocol commands.
562
563
563 The intent of this function is to serve as a monkeypatch point for
564 The intent of this function is to serve as a monkeypatch point for
564 extensions that need commands to operate on different repo views under
565 extensions that need commands to operate on different repo views under
565 specialized circumstances.
566 specialized circumstances.
566 """
567 """
567 return repo.filtered('served')
568 return repo.filtered('served')
568
569
569 def dispatch(repo, proto, command):
570 def dispatch(repo, proto, command):
570 repo = getdispatchrepo(repo, proto, command)
571 repo = getdispatchrepo(repo, proto, command)
571 func, spec = commands[command]
572 func, spec = commands[command]
572 args = proto.getargs(spec)
573 args = proto.getargs(spec)
573 return func(repo, proto, *args)
574 return func(repo, proto, *args)
574
575
575 def options(cmd, keys, others):
576 def options(cmd, keys, others):
576 opts = {}
577 opts = {}
577 for k in keys:
578 for k in keys:
578 if k in others:
579 if k in others:
579 opts[k] = others[k]
580 opts[k] = others[k]
580 del others[k]
581 del others[k]
581 if others:
582 if others:
582 util.stderr.write("warning: %s ignored unexpected arguments %s\n"
583 util.stderr.write("warning: %s ignored unexpected arguments %s\n"
583 % (cmd, ",".join(others)))
584 % (cmd, ",".join(others)))
584 return opts
585 return opts
585
586
586 def bundle1allowed(repo, action):
587 def bundle1allowed(repo, action):
587 """Whether a bundle1 operation is allowed from the server.
588 """Whether a bundle1 operation is allowed from the server.
588
589
589 Priority is:
590 Priority is:
590
591
591 1. server.bundle1gd.<action> (if generaldelta active)
592 1. server.bundle1gd.<action> (if generaldelta active)
592 2. server.bundle1.<action>
593 2. server.bundle1.<action>
593 3. server.bundle1gd (if generaldelta active)
594 3. server.bundle1gd (if generaldelta active)
594 4. server.bundle1
595 4. server.bundle1
595 """
596 """
596 ui = repo.ui
597 ui = repo.ui
597 gd = 'generaldelta' in repo.requirements
598 gd = 'generaldelta' in repo.requirements
598
599
599 if gd:
600 if gd:
600 v = ui.configbool('server', 'bundle1gd.%s' % action)
601 v = ui.configbool('server', 'bundle1gd.%s' % action)
601 if v is not None:
602 if v is not None:
602 return v
603 return v
603
604
604 v = ui.configbool('server', 'bundle1.%s' % action)
605 v = ui.configbool('server', 'bundle1.%s' % action)
605 if v is not None:
606 if v is not None:
606 return v
607 return v
607
608
608 if gd:
609 if gd:
609 v = ui.configbool('server', 'bundle1gd')
610 v = ui.configbool('server', 'bundle1gd')
610 if v is not None:
611 if v is not None:
611 return v
612 return v
612
613
613 return ui.configbool('server', 'bundle1')
614 return ui.configbool('server', 'bundle1')
614
615
615 def supportedcompengines(ui, proto, role):
616 def supportedcompengines(ui, proto, role):
616 """Obtain the list of supported compression engines for a request."""
617 """Obtain the list of supported compression engines for a request."""
617 assert role in (util.CLIENTROLE, util.SERVERROLE)
618 assert role in (util.CLIENTROLE, util.SERVERROLE)
618
619
619 compengines = util.compengines.supportedwireengines(role)
620 compengines = util.compengines.supportedwireengines(role)
620
621
621 # Allow config to override default list and ordering.
622 # Allow config to override default list and ordering.
622 if role == util.SERVERROLE:
623 if role == util.SERVERROLE:
623 configengines = ui.configlist('server', 'compressionengines')
624 configengines = ui.configlist('server', 'compressionengines')
624 config = 'server.compressionengines'
625 config = 'server.compressionengines'
625 else:
626 else:
626 # This is currently implemented mainly to facilitate testing. In most
627 # This is currently implemented mainly to facilitate testing. In most
627 # cases, the server should be in charge of choosing a compression engine
628 # cases, the server should be in charge of choosing a compression engine
628 # because a server has the most to lose from a sub-optimal choice. (e.g.
629 # because a server has the most to lose from a sub-optimal choice. (e.g.
629 # CPU DoS due to an expensive engine or a network DoS due to poor
630 # CPU DoS due to an expensive engine or a network DoS due to poor
630 # compression ratio).
631 # compression ratio).
631 configengines = ui.configlist('experimental',
632 configengines = ui.configlist('experimental',
632 'clientcompressionengines')
633 'clientcompressionengines')
633 config = 'experimental.clientcompressionengines'
634 config = 'experimental.clientcompressionengines'
634
635
635 # No explicit config. Filter out the ones that aren't supposed to be
636 # No explicit config. Filter out the ones that aren't supposed to be
636 # advertised and return default ordering.
637 # advertised and return default ordering.
637 if not configengines:
638 if not configengines:
638 attr = 'serverpriority' if role == util.SERVERROLE else 'clientpriority'
639 attr = 'serverpriority' if role == util.SERVERROLE else 'clientpriority'
639 return [e for e in compengines
640 return [e for e in compengines
640 if getattr(e.wireprotosupport(), attr) > 0]
641 if getattr(e.wireprotosupport(), attr) > 0]
641
642
642 # If compression engines are listed in the config, assume there is a good
643 # If compression engines are listed in the config, assume there is a good
643 # reason for it (like server operators wanting to achieve specific
644 # reason for it (like server operators wanting to achieve specific
644 # performance characteristics). So fail fast if the config references
645 # performance characteristics). So fail fast if the config references
645 # unusable compression engines.
646 # unusable compression engines.
646 validnames = set(e.name() for e in compengines)
647 validnames = set(e.name() for e in compengines)
647 invalidnames = set(e for e in configengines if e not in validnames)
648 invalidnames = set(e for e in configengines if e not in validnames)
648 if invalidnames:
649 if invalidnames:
649 raise error.Abort(_('invalid compression engine defined in %s: %s') %
650 raise error.Abort(_('invalid compression engine defined in %s: %s') %
650 (config, ', '.join(sorted(invalidnames))))
651 (config, ', '.join(sorted(invalidnames))))
651
652
652 compengines = [e for e in compengines if e.name() in configengines]
653 compengines = [e for e in compengines if e.name() in configengines]
653 compengines = sorted(compengines,
654 compengines = sorted(compengines,
654 key=lambda e: configengines.index(e.name()))
655 key=lambda e: configengines.index(e.name()))
655
656
656 if not compengines:
657 if not compengines:
657 raise error.Abort(_('%s config option does not specify any known '
658 raise error.Abort(_('%s config option does not specify any known '
658 'compression engines') % config,
659 'compression engines') % config,
659 hint=_('usable compression engines: %s') %
660 hint=_('usable compression engines: %s') %
660 ', '.sorted(validnames))
661 ', '.sorted(validnames))
661
662
662 return compengines
663 return compengines
663
664
664 # list of commands
665 # list of commands
665 commands = {}
666 commands = {}
666
667
667 def wireprotocommand(name, args=''):
668 def wireprotocommand(name, args=''):
668 """decorator for wire protocol command"""
669 """decorator for wire protocol command"""
669 def register(func):
670 def register(func):
670 commands[name] = (func, args)
671 commands[name] = (func, args)
671 return func
672 return func
672 return register
673 return register
673
674
674 @wireprotocommand('batch', 'cmds *')
675 @wireprotocommand('batch', 'cmds *')
675 def batch(repo, proto, cmds, others):
676 def batch(repo, proto, cmds, others):
676 repo = repo.filtered("served")
677 repo = repo.filtered("served")
677 res = []
678 res = []
678 for pair in cmds.split(';'):
679 for pair in cmds.split(';'):
679 op, args = pair.split(' ', 1)
680 op, args = pair.split(' ', 1)
680 vals = {}
681 vals = {}
681 for a in args.split(','):
682 for a in args.split(','):
682 if a:
683 if a:
683 n, v = a.split('=')
684 n, v = a.split('=')
684 vals[unescapearg(n)] = unescapearg(v)
685 vals[unescapearg(n)] = unescapearg(v)
685 func, spec = commands[op]
686 func, spec = commands[op]
686 if spec:
687 if spec:
687 keys = spec.split()
688 keys = spec.split()
688 data = {}
689 data = {}
689 for k in keys:
690 for k in keys:
690 if k == '*':
691 if k == '*':
691 star = {}
692 star = {}
692 for key in vals.keys():
693 for key in vals.keys():
693 if key not in keys:
694 if key not in keys:
694 star[key] = vals[key]
695 star[key] = vals[key]
695 data['*'] = star
696 data['*'] = star
696 else:
697 else:
697 data[k] = vals[k]
698 data[k] = vals[k]
698 result = func(repo, proto, *[data[k] for k in keys])
699 result = func(repo, proto, *[data[k] for k in keys])
699 else:
700 else:
700 result = func(repo, proto)
701 result = func(repo, proto)
701 if isinstance(result, ooberror):
702 if isinstance(result, ooberror):
702 return result
703 return result
703 res.append(escapearg(result))
704 res.append(escapearg(result))
704 return ';'.join(res)
705 return ';'.join(res)
705
706
706 @wireprotocommand('between', 'pairs')
707 @wireprotocommand('between', 'pairs')
707 def between(repo, proto, pairs):
708 def between(repo, proto, pairs):
708 pairs = [decodelist(p, '-') for p in pairs.split(" ")]
709 pairs = [decodelist(p, '-') for p in pairs.split(" ")]
709 r = []
710 r = []
710 for b in repo.between(pairs):
711 for b in repo.between(pairs):
711 r.append(encodelist(b) + "\n")
712 r.append(encodelist(b) + "\n")
712 return "".join(r)
713 return "".join(r)
713
714
714 @wireprotocommand('branchmap')
715 @wireprotocommand('branchmap')
715 def branchmap(repo, proto):
716 def branchmap(repo, proto):
716 branchmap = repo.branchmap()
717 branchmap = repo.branchmap()
717 heads = []
718 heads = []
718 for branch, nodes in branchmap.iteritems():
719 for branch, nodes in branchmap.iteritems():
719 branchname = urlreq.quote(encoding.fromlocal(branch))
720 branchname = urlreq.quote(encoding.fromlocal(branch))
720 branchnodes = encodelist(nodes)
721 branchnodes = encodelist(nodes)
721 heads.append('%s %s' % (branchname, branchnodes))
722 heads.append('%s %s' % (branchname, branchnodes))
722 return '\n'.join(heads)
723 return '\n'.join(heads)
723
724
724 @wireprotocommand('branches', 'nodes')
725 @wireprotocommand('branches', 'nodes')
725 def branches(repo, proto, nodes):
726 def branches(repo, proto, nodes):
726 nodes = decodelist(nodes)
727 nodes = decodelist(nodes)
727 r = []
728 r = []
728 for b in repo.branches(nodes):
729 for b in repo.branches(nodes):
729 r.append(encodelist(b) + "\n")
730 r.append(encodelist(b) + "\n")
730 return "".join(r)
731 return "".join(r)
731
732
732 @wireprotocommand('clonebundles', '')
733 @wireprotocommand('clonebundles', '')
733 def clonebundles(repo, proto):
734 def clonebundles(repo, proto):
734 """Server command for returning info for available bundles to seed clones.
735 """Server command for returning info for available bundles to seed clones.
735
736
736 Clients will parse this response and determine what bundle to fetch.
737 Clients will parse this response and determine what bundle to fetch.
737
738
738 Extensions may wrap this command to filter or dynamically emit data
739 Extensions may wrap this command to filter or dynamically emit data
739 depending on the request. e.g. you could advertise URLs for the closest
740 depending on the request. e.g. you could advertise URLs for the closest
740 data center given the client's IP address.
741 data center given the client's IP address.
741 """
742 """
742 return repo.vfs.tryread('clonebundles.manifest')
743 return repo.vfs.tryread('clonebundles.manifest')
743
744
744 wireprotocaps = ['lookup', 'changegroupsubset', 'branchmap', 'pushkey',
745 wireprotocaps = ['lookup', 'changegroupsubset', 'branchmap', 'pushkey',
745 'known', 'getbundle', 'unbundlehash', 'batch']
746 'known', 'getbundle', 'unbundlehash', 'batch']
746
747
747 def _capabilities(repo, proto):
748 def _capabilities(repo, proto):
748 """return a list of capabilities for a repo
749 """return a list of capabilities for a repo
749
750
750 This function exists to allow extensions to easily wrap capabilities
751 This function exists to allow extensions to easily wrap capabilities
751 computation
752 computation
752
753
753 - returns a lists: easy to alter
754 - returns a lists: easy to alter
754 - change done here will be propagated to both `capabilities` and `hello`
755 - change done here will be propagated to both `capabilities` and `hello`
755 command without any other action needed.
756 command without any other action needed.
756 """
757 """
757 # copy to prevent modification of the global list
758 # copy to prevent modification of the global list
758 caps = list(wireprotocaps)
759 caps = list(wireprotocaps)
759 if streamclone.allowservergeneration(repo):
760 if streamclone.allowservergeneration(repo):
760 if repo.ui.configbool('server', 'preferuncompressed'):
761 if repo.ui.configbool('server', 'preferuncompressed'):
761 caps.append('stream-preferred')
762 caps.append('stream-preferred')
762 requiredformats = repo.requirements & repo.supportedformats
763 requiredformats = repo.requirements & repo.supportedformats
763 # if our local revlogs are just revlogv1, add 'stream' cap
764 # if our local revlogs are just revlogv1, add 'stream' cap
764 if not requiredformats - {'revlogv1'}:
765 if not requiredformats - {'revlogv1'}:
765 caps.append('stream')
766 caps.append('stream')
766 # otherwise, add 'streamreqs' detailing our local revlog format
767 # otherwise, add 'streamreqs' detailing our local revlog format
767 else:
768 else:
768 caps.append('streamreqs=%s' % ','.join(sorted(requiredformats)))
769 caps.append('streamreqs=%s' % ','.join(sorted(requiredformats)))
769 if repo.ui.configbool('experimental', 'bundle2-advertise'):
770 if repo.ui.configbool('experimental', 'bundle2-advertise'):
770 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
771 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
771 caps.append('bundle2=' + urlreq.quote(capsblob))
772 caps.append('bundle2=' + urlreq.quote(capsblob))
772 caps.append('unbundle=%s' % ','.join(bundle2.bundlepriority))
773 caps.append('unbundle=%s' % ','.join(bundle2.bundlepriority))
773
774
774 if proto.name == 'http':
775 if proto.name == 'http':
775 caps.append('httpheader=%d' %
776 caps.append('httpheader=%d' %
776 repo.ui.configint('server', 'maxhttpheaderlen'))
777 repo.ui.configint('server', 'maxhttpheaderlen'))
777 if repo.ui.configbool('experimental', 'httppostargs'):
778 if repo.ui.configbool('experimental', 'httppostargs'):
778 caps.append('httppostargs')
779 caps.append('httppostargs')
779
780
780 # FUTURE advertise 0.2rx once support is implemented
781 # FUTURE advertise 0.2rx once support is implemented
781 # FUTURE advertise minrx and mintx after consulting config option
782 # FUTURE advertise minrx and mintx after consulting config option
782 caps.append('httpmediatype=0.1rx,0.1tx,0.2tx')
783 caps.append('httpmediatype=0.1rx,0.1tx,0.2tx')
783
784
784 compengines = supportedcompengines(repo.ui, proto, util.SERVERROLE)
785 compengines = supportedcompengines(repo.ui, proto, util.SERVERROLE)
785 if compengines:
786 if compengines:
786 comptypes = ','.join(urlreq.quote(e.wireprotosupport().name)
787 comptypes = ','.join(urlreq.quote(e.wireprotosupport().name)
787 for e in compengines)
788 for e in compengines)
788 caps.append('compression=%s' % comptypes)
789 caps.append('compression=%s' % comptypes)
789
790
790 return caps
791 return caps
791
792
792 # If you are writing an extension and consider wrapping this function. Wrap
793 # If you are writing an extension and consider wrapping this function. Wrap
793 # `_capabilities` instead.
794 # `_capabilities` instead.
794 @wireprotocommand('capabilities')
795 @wireprotocommand('capabilities')
795 def capabilities(repo, proto):
796 def capabilities(repo, proto):
796 return ' '.join(_capabilities(repo, proto))
797 return ' '.join(_capabilities(repo, proto))
797
798
798 @wireprotocommand('changegroup', 'roots')
799 @wireprotocommand('changegroup', 'roots')
799 def changegroup(repo, proto, roots):
800 def changegroup(repo, proto, roots):
800 nodes = decodelist(roots)
801 nodes = decodelist(roots)
801 outgoing = discovery.outgoing(repo, missingroots=nodes,
802 outgoing = discovery.outgoing(repo, missingroots=nodes,
802 missingheads=repo.heads())
803 missingheads=repo.heads())
803 cg = changegroupmod.makechangegroup(repo, outgoing, '01', 'serve')
804 cg = changegroupmod.makechangegroup(repo, outgoing, '01', 'serve')
804 return streamres(reader=cg, v1compressible=True)
805 return streamres(reader=cg, v1compressible=True)
805
806
806 @wireprotocommand('changegroupsubset', 'bases heads')
807 @wireprotocommand('changegroupsubset', 'bases heads')
807 def changegroupsubset(repo, proto, bases, heads):
808 def changegroupsubset(repo, proto, bases, heads):
808 bases = decodelist(bases)
809 bases = decodelist(bases)
809 heads = decodelist(heads)
810 heads = decodelist(heads)
810 outgoing = discovery.outgoing(repo, missingroots=bases,
811 outgoing = discovery.outgoing(repo, missingroots=bases,
811 missingheads=heads)
812 missingheads=heads)
812 cg = changegroupmod.makechangegroup(repo, outgoing, '01', 'serve')
813 cg = changegroupmod.makechangegroup(repo, outgoing, '01', 'serve')
813 return streamres(reader=cg, v1compressible=True)
814 return streamres(reader=cg, v1compressible=True)
814
815
815 @wireprotocommand('debugwireargs', 'one two *')
816 @wireprotocommand('debugwireargs', 'one two *')
816 def debugwireargs(repo, proto, one, two, others):
817 def debugwireargs(repo, proto, one, two, others):
817 # only accept optional args from the known set
818 # only accept optional args from the known set
818 opts = options('debugwireargs', ['three', 'four'], others)
819 opts = options('debugwireargs', ['three', 'four'], others)
819 return repo.debugwireargs(one, two, **opts)
820 return repo.debugwireargs(one, two, **opts)
820
821
821 @wireprotocommand('getbundle', '*')
822 @wireprotocommand('getbundle', '*')
822 def getbundle(repo, proto, others):
823 def getbundle(repo, proto, others):
823 opts = options('getbundle', gboptsmap.keys(), others)
824 opts = options('getbundle', gboptsmap.keys(), others)
824 for k, v in opts.iteritems():
825 for k, v in opts.iteritems():
825 keytype = gboptsmap[k]
826 keytype = gboptsmap[k]
826 if keytype == 'nodes':
827 if keytype == 'nodes':
827 opts[k] = decodelist(v)
828 opts[k] = decodelist(v)
828 elif keytype == 'csv':
829 elif keytype == 'csv':
829 opts[k] = list(v.split(','))
830 opts[k] = list(v.split(','))
830 elif keytype == 'scsv':
831 elif keytype == 'scsv':
831 opts[k] = set(v.split(','))
832 opts[k] = set(v.split(','))
832 elif keytype == 'boolean':
833 elif keytype == 'boolean':
833 # Client should serialize False as '0', which is a non-empty string
834 # Client should serialize False as '0', which is a non-empty string
834 # so it evaluates as a True bool.
835 # so it evaluates as a True bool.
835 if v == '0':
836 if v == '0':
836 opts[k] = False
837 opts[k] = False
837 else:
838 else:
838 opts[k] = bool(v)
839 opts[k] = bool(v)
839 elif keytype != 'plain':
840 elif keytype != 'plain':
840 raise KeyError('unknown getbundle option type %s'
841 raise KeyError('unknown getbundle option type %s'
841 % keytype)
842 % keytype)
842
843
843 if not bundle1allowed(repo, 'pull'):
844 if not bundle1allowed(repo, 'pull'):
844 if not exchange.bundle2requested(opts.get('bundlecaps')):
845 if not exchange.bundle2requested(opts.get('bundlecaps')):
845 if proto.name == 'http':
846 if proto.name == 'http':
846 return ooberror(bundle2required)
847 return ooberror(bundle2required)
847 raise error.Abort(bundle2requiredmain,
848 raise error.Abort(bundle2requiredmain,
848 hint=bundle2requiredhint)
849 hint=bundle2requiredhint)
849
850
850 try:
851 try:
851 if repo.ui.configbool('server', 'disablefullbundle'):
852 if repo.ui.configbool('server', 'disablefullbundle'):
852 # Check to see if this is a full clone.
853 # Check to see if this is a full clone.
853 clheads = set(repo.changelog.heads())
854 clheads = set(repo.changelog.heads())
854 heads = set(opts.get('heads', set()))
855 heads = set(opts.get('heads', set()))
855 common = set(opts.get('common', set()))
856 common = set(opts.get('common', set()))
856 common.discard(nullid)
857 common.discard(nullid)
857 if not common and clheads == heads:
858 if not common and clheads == heads:
858 raise error.Abort(
859 raise error.Abort(
859 _('server has pull-based clones disabled'),
860 _('server has pull-based clones disabled'),
860 hint=_('remove --pull if specified or upgrade Mercurial'))
861 hint=_('remove --pull if specified or upgrade Mercurial'))
861
862
862 chunks = exchange.getbundlechunks(repo, 'serve',
863 chunks = exchange.getbundlechunks(repo, 'serve',
863 **pycompat.strkwargs(opts))
864 **pycompat.strkwargs(opts))
864 except error.Abort as exc:
865 except error.Abort as exc:
865 # cleanly forward Abort error to the client
866 # cleanly forward Abort error to the client
866 if not exchange.bundle2requested(opts.get('bundlecaps')):
867 if not exchange.bundle2requested(opts.get('bundlecaps')):
867 if proto.name == 'http':
868 if proto.name == 'http':
868 return ooberror(str(exc) + '\n')
869 return ooberror(str(exc) + '\n')
869 raise # cannot do better for bundle1 + ssh
870 raise # cannot do better for bundle1 + ssh
870 # bundle2 request expect a bundle2 reply
871 # bundle2 request expect a bundle2 reply
871 bundler = bundle2.bundle20(repo.ui)
872 bundler = bundle2.bundle20(repo.ui)
872 manargs = [('message', str(exc))]
873 manargs = [('message', str(exc))]
873 advargs = []
874 advargs = []
874 if exc.hint is not None:
875 if exc.hint is not None:
875 advargs.append(('hint', exc.hint))
876 advargs.append(('hint', exc.hint))
876 bundler.addpart(bundle2.bundlepart('error:abort',
877 bundler.addpart(bundle2.bundlepart('error:abort',
877 manargs, advargs))
878 manargs, advargs))
878 return streamres(gen=bundler.getchunks(), v1compressible=True)
879 return streamres(gen=bundler.getchunks(), v1compressible=True)
879 return streamres(gen=chunks, v1compressible=True)
880 return streamres(gen=chunks, v1compressible=True)
880
881
881 @wireprotocommand('heads')
882 @wireprotocommand('heads')
882 def heads(repo, proto):
883 def heads(repo, proto):
883 h = repo.heads()
884 h = repo.heads()
884 return encodelist(h) + "\n"
885 return encodelist(h) + "\n"
885
886
886 @wireprotocommand('hello')
887 @wireprotocommand('hello')
887 def hello(repo, proto):
888 def hello(repo, proto):
888 '''the hello command returns a set of lines describing various
889 '''the hello command returns a set of lines describing various
889 interesting things about the server, in an RFC822-like format.
890 interesting things about the server, in an RFC822-like format.
890 Currently the only one defined is "capabilities", which
891 Currently the only one defined is "capabilities", which
891 consists of a line in the form:
892 consists of a line in the form:
892
893
893 capabilities: space separated list of tokens
894 capabilities: space separated list of tokens
894 '''
895 '''
895 return "capabilities: %s\n" % (capabilities(repo, proto))
896 return "capabilities: %s\n" % (capabilities(repo, proto))
896
897
897 @wireprotocommand('listkeys', 'namespace')
898 @wireprotocommand('listkeys', 'namespace')
898 def listkeys(repo, proto, namespace):
899 def listkeys(repo, proto, namespace):
899 d = repo.listkeys(encoding.tolocal(namespace)).items()
900 d = repo.listkeys(encoding.tolocal(namespace)).items()
900 return pushkeymod.encodekeys(d)
901 return pushkeymod.encodekeys(d)
901
902
902 @wireprotocommand('lookup', 'key')
903 @wireprotocommand('lookup', 'key')
903 def lookup(repo, proto, key):
904 def lookup(repo, proto, key):
904 try:
905 try:
905 k = encoding.tolocal(key)
906 k = encoding.tolocal(key)
906 c = repo[k]
907 c = repo[k]
907 r = c.hex()
908 r = c.hex()
908 success = 1
909 success = 1
909 except Exception as inst:
910 except Exception as inst:
910 r = str(inst)
911 r = str(inst)
911 success = 0
912 success = 0
912 return "%d %s\n" % (success, r)
913 return "%d %s\n" % (success, r)
913
914
914 @wireprotocommand('known', 'nodes *')
915 @wireprotocommand('known', 'nodes *')
915 def known(repo, proto, nodes, others):
916 def known(repo, proto, nodes, others):
916 return ''.join(b and "1" or "0" for b in repo.known(decodelist(nodes)))
917 return ''.join(b and "1" or "0" for b in repo.known(decodelist(nodes)))
917
918
918 @wireprotocommand('pushkey', 'namespace key old new')
919 @wireprotocommand('pushkey', 'namespace key old new')
919 def pushkey(repo, proto, namespace, key, old, new):
920 def pushkey(repo, proto, namespace, key, old, new):
920 # compatibility with pre-1.8 clients which were accidentally
921 # compatibility with pre-1.8 clients which were accidentally
921 # sending raw binary nodes rather than utf-8-encoded hex
922 # sending raw binary nodes rather than utf-8-encoded hex
922 if len(new) == 20 and util.escapestr(new) != new:
923 if len(new) == 20 and util.escapestr(new) != new:
923 # looks like it could be a binary node
924 # looks like it could be a binary node
924 try:
925 try:
925 new.decode('utf-8')
926 new.decode('utf-8')
926 new = encoding.tolocal(new) # but cleanly decodes as UTF-8
927 new = encoding.tolocal(new) # but cleanly decodes as UTF-8
927 except UnicodeDecodeError:
928 except UnicodeDecodeError:
928 pass # binary, leave unmodified
929 pass # binary, leave unmodified
929 else:
930 else:
930 new = encoding.tolocal(new) # normal path
931 new = encoding.tolocal(new) # normal path
931
932
932 if util.safehasattr(proto, 'restore'):
933 if util.safehasattr(proto, 'restore'):
933
934
934 proto.redirect()
935 proto.redirect()
935
936
936 try:
937 try:
937 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
938 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
938 encoding.tolocal(old), new) or False
939 encoding.tolocal(old), new) or False
939 except error.Abort:
940 except error.Abort:
940 r = False
941 r = False
941
942
942 output = proto.restore()
943 output = proto.restore()
943
944
944 return '%s\n%s' % (int(r), output)
945 return '%s\n%s' % (int(r), output)
945
946
946 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
947 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
947 encoding.tolocal(old), new)
948 encoding.tolocal(old), new)
948 return '%s\n' % int(r)
949 return '%s\n' % int(r)
949
950
950 @wireprotocommand('stream_out')
951 @wireprotocommand('stream_out')
951 def stream(repo, proto):
952 def stream(repo, proto):
952 '''If the server supports streaming clone, it advertises the "stream"
953 '''If the server supports streaming clone, it advertises the "stream"
953 capability with a value representing the version and flags of the repo
954 capability with a value representing the version and flags of the repo
954 it is serving. Client checks to see if it understands the format.
955 it is serving. Client checks to see if it understands the format.
955 '''
956 '''
956 if not streamclone.allowservergeneration(repo):
957 if not streamclone.allowservergeneration(repo):
957 return '1\n'
958 return '1\n'
958
959
959 def getstream(it):
960 def getstream(it):
960 yield '0\n'
961 yield '0\n'
961 for chunk in it:
962 for chunk in it:
962 yield chunk
963 yield chunk
963
964
964 try:
965 try:
965 # LockError may be raised before the first result is yielded. Don't
966 # LockError may be raised before the first result is yielded. Don't
966 # emit output until we're sure we got the lock successfully.
967 # emit output until we're sure we got the lock successfully.
967 it = streamclone.generatev1wireproto(repo)
968 it = streamclone.generatev1wireproto(repo)
968 return streamres(gen=getstream(it))
969 return streamres(gen=getstream(it))
969 except error.LockError:
970 except error.LockError:
970 return '2\n'
971 return '2\n'
971
972
972 @wireprotocommand('unbundle', 'heads')
973 @wireprotocommand('unbundle', 'heads')
973 def unbundle(repo, proto, heads):
974 def unbundle(repo, proto, heads):
974 their_heads = decodelist(heads)
975 their_heads = decodelist(heads)
975
976
976 try:
977 try:
977 proto.redirect()
978 proto.redirect()
978
979
979 exchange.check_heads(repo, their_heads, 'preparing changes')
980 exchange.check_heads(repo, their_heads, 'preparing changes')
980
981
981 # write bundle data to temporary file because it can be big
982 # write bundle data to temporary file because it can be big
982 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
983 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
983 fp = os.fdopen(fd, pycompat.sysstr('wb+'))
984 fp = os.fdopen(fd, pycompat.sysstr('wb+'))
984 r = 0
985 r = 0
985 try:
986 try:
986 proto.getfile(fp)
987 proto.getfile(fp)
987 fp.seek(0)
988 fp.seek(0)
988 gen = exchange.readbundle(repo.ui, fp, None)
989 gen = exchange.readbundle(repo.ui, fp, None)
989 if (isinstance(gen, changegroupmod.cg1unpacker)
990 if (isinstance(gen, changegroupmod.cg1unpacker)
990 and not bundle1allowed(repo, 'push')):
991 and not bundle1allowed(repo, 'push')):
991 if proto.name == 'http':
992 if proto.name == 'http':
992 # need to special case http because stderr do not get to
993 # need to special case http because stderr do not get to
993 # the http client on failed push so we need to abuse some
994 # the http client on failed push so we need to abuse some
994 # other error type to make sure the message get to the
995 # other error type to make sure the message get to the
995 # user.
996 # user.
996 return ooberror(bundle2required)
997 return ooberror(bundle2required)
997 raise error.Abort(bundle2requiredmain,
998 raise error.Abort(bundle2requiredmain,
998 hint=bundle2requiredhint)
999 hint=bundle2requiredhint)
999
1000
1000 r = exchange.unbundle(repo, gen, their_heads, 'serve',
1001 r = exchange.unbundle(repo, gen, their_heads, 'serve',
1001 proto._client())
1002 proto._client())
1002 if util.safehasattr(r, 'addpart'):
1003 if util.safehasattr(r, 'addpart'):
1003 # The return looks streamable, we are in the bundle2 case and
1004 # The return looks streamable, we are in the bundle2 case and
1004 # should return a stream.
1005 # should return a stream.
1005 return streamres(gen=r.getchunks())
1006 return streamres(gen=r.getchunks())
1006 return pushres(r)
1007 return pushres(r)
1007
1008
1008 finally:
1009 finally:
1009 fp.close()
1010 fp.close()
1010 os.unlink(tempname)
1011 os.unlink(tempname)
1011
1012
1012 except (error.BundleValueError, error.Abort, error.PushRaced) as exc:
1013 except (error.BundleValueError, error.Abort, error.PushRaced) as exc:
1013 # handle non-bundle2 case first
1014 # handle non-bundle2 case first
1014 if not getattr(exc, 'duringunbundle2', False):
1015 if not getattr(exc, 'duringunbundle2', False):
1015 try:
1016 try:
1016 raise
1017 raise
1017 except error.Abort:
1018 except error.Abort:
1018 # The old code we moved used util.stderr directly.
1019 # The old code we moved used util.stderr directly.
1019 # We did not change it to minimise code change.
1020 # We did not change it to minimise code change.
1020 # This need to be moved to something proper.
1021 # This need to be moved to something proper.
1021 # Feel free to do it.
1022 # Feel free to do it.
1022 util.stderr.write("abort: %s\n" % exc)
1023 util.stderr.write("abort: %s\n" % exc)
1023 if exc.hint is not None:
1024 if exc.hint is not None:
1024 util.stderr.write("(%s)\n" % exc.hint)
1025 util.stderr.write("(%s)\n" % exc.hint)
1025 return pushres(0)
1026 return pushres(0)
1026 except error.PushRaced:
1027 except error.PushRaced:
1027 return pusherr(str(exc))
1028 return pusherr(str(exc))
1028
1029
1029 bundler = bundle2.bundle20(repo.ui)
1030 bundler = bundle2.bundle20(repo.ui)
1030 for out in getattr(exc, '_bundle2salvagedoutput', ()):
1031 for out in getattr(exc, '_bundle2salvagedoutput', ()):
1031 bundler.addpart(out)
1032 bundler.addpart(out)
1032 try:
1033 try:
1033 try:
1034 try:
1034 raise
1035 raise
1035 except error.PushkeyFailed as exc:
1036 except error.PushkeyFailed as exc:
1036 # check client caps
1037 # check client caps
1037 remotecaps = getattr(exc, '_replycaps', None)
1038 remotecaps = getattr(exc, '_replycaps', None)
1038 if (remotecaps is not None
1039 if (remotecaps is not None
1039 and 'pushkey' not in remotecaps.get('error', ())):
1040 and 'pushkey' not in remotecaps.get('error', ())):
1040 # no support remote side, fallback to Abort handler.
1041 # no support remote side, fallback to Abort handler.
1041 raise
1042 raise
1042 part = bundler.newpart('error:pushkey')
1043 part = bundler.newpart('error:pushkey')
1043 part.addparam('in-reply-to', exc.partid)
1044 part.addparam('in-reply-to', exc.partid)
1044 if exc.namespace is not None:
1045 if exc.namespace is not None:
1045 part.addparam('namespace', exc.namespace, mandatory=False)
1046 part.addparam('namespace', exc.namespace, mandatory=False)
1046 if exc.key is not None:
1047 if exc.key is not None:
1047 part.addparam('key', exc.key, mandatory=False)
1048 part.addparam('key', exc.key, mandatory=False)
1048 if exc.new is not None:
1049 if exc.new is not None:
1049 part.addparam('new', exc.new, mandatory=False)
1050 part.addparam('new', exc.new, mandatory=False)
1050 if exc.old is not None:
1051 if exc.old is not None:
1051 part.addparam('old', exc.old, mandatory=False)
1052 part.addparam('old', exc.old, mandatory=False)
1052 if exc.ret is not None:
1053 if exc.ret is not None:
1053 part.addparam('ret', exc.ret, mandatory=False)
1054 part.addparam('ret', exc.ret, mandatory=False)
1054 except error.BundleValueError as exc:
1055 except error.BundleValueError as exc:
1055 errpart = bundler.newpart('error:unsupportedcontent')
1056 errpart = bundler.newpart('error:unsupportedcontent')
1056 if exc.parttype is not None:
1057 if exc.parttype is not None:
1057 errpart.addparam('parttype', exc.parttype)
1058 errpart.addparam('parttype', exc.parttype)
1058 if exc.params:
1059 if exc.params:
1059 errpart.addparam('params', '\0'.join(exc.params))
1060 errpart.addparam('params', '\0'.join(exc.params))
1060 except error.Abort as exc:
1061 except error.Abort as exc:
1061 manargs = [('message', str(exc))]
1062 manargs = [('message', str(exc))]
1062 advargs = []
1063 advargs = []
1063 if exc.hint is not None:
1064 if exc.hint is not None:
1064 advargs.append(('hint', exc.hint))
1065 advargs.append(('hint', exc.hint))
1065 bundler.addpart(bundle2.bundlepart('error:abort',
1066 bundler.addpart(bundle2.bundlepart('error:abort',
1066 manargs, advargs))
1067 manargs, advargs))
1067 except error.PushRaced as exc:
1068 except error.PushRaced as exc:
1068 bundler.newpart('error:pushraced', [('message', str(exc))])
1069 bundler.newpart('error:pushraced', [('message', str(exc))])
1069 return streamres(gen=bundler.getchunks())
1070 return streamres(gen=bundler.getchunks())
General Comments 0
You need to be logged in to leave comments. Login now