##// END OF EJS Templates
exchange: don't send stream data when server.uncompressed is set...
Gregory Szorc -
r35809:9adae6a2 default
parent child Browse files
Show More
@@ -1,2247 +1,2254 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 bin,
16 bin,
17 hex,
17 hex,
18 nullid,
18 nullid,
19 )
19 )
20 from . import (
20 from . import (
21 bookmarks as bookmod,
21 bookmarks as bookmod,
22 bundle2,
22 bundle2,
23 changegroup,
23 changegroup,
24 discovery,
24 discovery,
25 error,
25 error,
26 lock as lockmod,
26 lock as lockmod,
27 logexchange,
27 logexchange,
28 obsolete,
28 obsolete,
29 phases,
29 phases,
30 pushkey,
30 pushkey,
31 pycompat,
31 pycompat,
32 scmutil,
32 scmutil,
33 sslutil,
33 sslutil,
34 streamclone,
34 streamclone,
35 url as urlmod,
35 url as urlmod,
36 util,
36 util,
37 )
37 )
38
38
39 urlerr = util.urlerr
39 urlerr = util.urlerr
40 urlreq = util.urlreq
40 urlreq = util.urlreq
41
41
42 # Maps bundle version human names to changegroup versions.
42 # Maps bundle version human names to changegroup versions.
43 _bundlespeccgversions = {'v1': '01',
43 _bundlespeccgversions = {'v1': '01',
44 'v2': '02',
44 'v2': '02',
45 'packed1': 's1',
45 'packed1': 's1',
46 'bundle2': '02', #legacy
46 'bundle2': '02', #legacy
47 }
47 }
48
48
49 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
49 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
50 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
50 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
51
51
52 def parsebundlespec(repo, spec, strict=True, externalnames=False):
52 def parsebundlespec(repo, spec, strict=True, externalnames=False):
53 """Parse a bundle string specification into parts.
53 """Parse a bundle string specification into parts.
54
54
55 Bundle specifications denote a well-defined bundle/exchange format.
55 Bundle specifications denote a well-defined bundle/exchange format.
56 The content of a given specification should not change over time in
56 The content of a given specification should not change over time in
57 order to ensure that bundles produced by a newer version of Mercurial are
57 order to ensure that bundles produced by a newer version of Mercurial are
58 readable from an older version.
58 readable from an older version.
59
59
60 The string currently has the form:
60 The string currently has the form:
61
61
62 <compression>-<type>[;<parameter0>[;<parameter1>]]
62 <compression>-<type>[;<parameter0>[;<parameter1>]]
63
63
64 Where <compression> is one of the supported compression formats
64 Where <compression> is one of the supported compression formats
65 and <type> is (currently) a version string. A ";" can follow the type and
65 and <type> is (currently) a version string. A ";" can follow the type and
66 all text afterwards is interpreted as URI encoded, ";" delimited key=value
66 all text afterwards is interpreted as URI encoded, ";" delimited key=value
67 pairs.
67 pairs.
68
68
69 If ``strict`` is True (the default) <compression> is required. Otherwise,
69 If ``strict`` is True (the default) <compression> is required. Otherwise,
70 it is optional.
70 it is optional.
71
71
72 If ``externalnames`` is False (the default), the human-centric names will
72 If ``externalnames`` is False (the default), the human-centric names will
73 be converted to their internal representation.
73 be converted to their internal representation.
74
74
75 Returns a 3-tuple of (compression, version, parameters). Compression will
75 Returns a 3-tuple of (compression, version, parameters). Compression will
76 be ``None`` if not in strict mode and a compression isn't defined.
76 be ``None`` if not in strict mode and a compression isn't defined.
77
77
78 An ``InvalidBundleSpecification`` is raised when the specification is
78 An ``InvalidBundleSpecification`` is raised when the specification is
79 not syntactically well formed.
79 not syntactically well formed.
80
80
81 An ``UnsupportedBundleSpecification`` is raised when the compression or
81 An ``UnsupportedBundleSpecification`` is raised when the compression or
82 bundle type/version is not recognized.
82 bundle type/version is not recognized.
83
83
84 Note: this function will likely eventually return a more complex data
84 Note: this function will likely eventually return a more complex data
85 structure, including bundle2 part information.
85 structure, including bundle2 part information.
86 """
86 """
87 def parseparams(s):
87 def parseparams(s):
88 if ';' not in s:
88 if ';' not in s:
89 return s, {}
89 return s, {}
90
90
91 params = {}
91 params = {}
92 version, paramstr = s.split(';', 1)
92 version, paramstr = s.split(';', 1)
93
93
94 for p in paramstr.split(';'):
94 for p in paramstr.split(';'):
95 if '=' not in p:
95 if '=' not in p:
96 raise error.InvalidBundleSpecification(
96 raise error.InvalidBundleSpecification(
97 _('invalid bundle specification: '
97 _('invalid bundle specification: '
98 'missing "=" in parameter: %s') % p)
98 'missing "=" in parameter: %s') % p)
99
99
100 key, value = p.split('=', 1)
100 key, value = p.split('=', 1)
101 key = urlreq.unquote(key)
101 key = urlreq.unquote(key)
102 value = urlreq.unquote(value)
102 value = urlreq.unquote(value)
103 params[key] = value
103 params[key] = value
104
104
105 return version, params
105 return version, params
106
106
107
107
108 if strict and '-' not in spec:
108 if strict and '-' not in spec:
109 raise error.InvalidBundleSpecification(
109 raise error.InvalidBundleSpecification(
110 _('invalid bundle specification; '
110 _('invalid bundle specification; '
111 'must be prefixed with compression: %s') % spec)
111 'must be prefixed with compression: %s') % spec)
112
112
113 if '-' in spec:
113 if '-' in spec:
114 compression, version = spec.split('-', 1)
114 compression, version = spec.split('-', 1)
115
115
116 if compression not in util.compengines.supportedbundlenames:
116 if compression not in util.compengines.supportedbundlenames:
117 raise error.UnsupportedBundleSpecification(
117 raise error.UnsupportedBundleSpecification(
118 _('%s compression is not supported') % compression)
118 _('%s compression is not supported') % compression)
119
119
120 version, params = parseparams(version)
120 version, params = parseparams(version)
121
121
122 if version not in _bundlespeccgversions:
122 if version not in _bundlespeccgversions:
123 raise error.UnsupportedBundleSpecification(
123 raise error.UnsupportedBundleSpecification(
124 _('%s is not a recognized bundle version') % version)
124 _('%s is not a recognized bundle version') % version)
125 else:
125 else:
126 # Value could be just the compression or just the version, in which
126 # Value could be just the compression or just the version, in which
127 # case some defaults are assumed (but only when not in strict mode).
127 # case some defaults are assumed (but only when not in strict mode).
128 assert not strict
128 assert not strict
129
129
130 spec, params = parseparams(spec)
130 spec, params = parseparams(spec)
131
131
132 if spec in util.compengines.supportedbundlenames:
132 if spec in util.compengines.supportedbundlenames:
133 compression = spec
133 compression = spec
134 version = 'v1'
134 version = 'v1'
135 # Generaldelta repos require v2.
135 # Generaldelta repos require v2.
136 if 'generaldelta' in repo.requirements:
136 if 'generaldelta' in repo.requirements:
137 version = 'v2'
137 version = 'v2'
138 # Modern compression engines require v2.
138 # Modern compression engines require v2.
139 if compression not in _bundlespecv1compengines:
139 if compression not in _bundlespecv1compengines:
140 version = 'v2'
140 version = 'v2'
141 elif spec in _bundlespeccgversions:
141 elif spec in _bundlespeccgversions:
142 if spec == 'packed1':
142 if spec == 'packed1':
143 compression = 'none'
143 compression = 'none'
144 else:
144 else:
145 compression = 'bzip2'
145 compression = 'bzip2'
146 version = spec
146 version = spec
147 else:
147 else:
148 raise error.UnsupportedBundleSpecification(
148 raise error.UnsupportedBundleSpecification(
149 _('%s is not a recognized bundle specification') % spec)
149 _('%s is not a recognized bundle specification') % spec)
150
150
151 # Bundle version 1 only supports a known set of compression engines.
151 # Bundle version 1 only supports a known set of compression engines.
152 if version == 'v1' and compression not in _bundlespecv1compengines:
152 if version == 'v1' and compression not in _bundlespecv1compengines:
153 raise error.UnsupportedBundleSpecification(
153 raise error.UnsupportedBundleSpecification(
154 _('compression engine %s is not supported on v1 bundles') %
154 _('compression engine %s is not supported on v1 bundles') %
155 compression)
155 compression)
156
156
157 # The specification for packed1 can optionally declare the data formats
157 # The specification for packed1 can optionally declare the data formats
158 # required to apply it. If we see this metadata, compare against what the
158 # required to apply it. If we see this metadata, compare against what the
159 # repo supports and error if the bundle isn't compatible.
159 # repo supports and error if the bundle isn't compatible.
160 if version == 'packed1' and 'requirements' in params:
160 if version == 'packed1' and 'requirements' in params:
161 requirements = set(params['requirements'].split(','))
161 requirements = set(params['requirements'].split(','))
162 missingreqs = requirements - repo.supportedformats
162 missingreqs = requirements - repo.supportedformats
163 if missingreqs:
163 if missingreqs:
164 raise error.UnsupportedBundleSpecification(
164 raise error.UnsupportedBundleSpecification(
165 _('missing support for repository features: %s') %
165 _('missing support for repository features: %s') %
166 ', '.join(sorted(missingreqs)))
166 ', '.join(sorted(missingreqs)))
167
167
168 if not externalnames:
168 if not externalnames:
169 engine = util.compengines.forbundlename(compression)
169 engine = util.compengines.forbundlename(compression)
170 compression = engine.bundletype()[1]
170 compression = engine.bundletype()[1]
171 version = _bundlespeccgversions[version]
171 version = _bundlespeccgversions[version]
172 return compression, version, params
172 return compression, version, params
173
173
174 def readbundle(ui, fh, fname, vfs=None):
174 def readbundle(ui, fh, fname, vfs=None):
175 header = changegroup.readexactly(fh, 4)
175 header = changegroup.readexactly(fh, 4)
176
176
177 alg = None
177 alg = None
178 if not fname:
178 if not fname:
179 fname = "stream"
179 fname = "stream"
180 if not header.startswith('HG') and header.startswith('\0'):
180 if not header.startswith('HG') and header.startswith('\0'):
181 fh = changegroup.headerlessfixup(fh, header)
181 fh = changegroup.headerlessfixup(fh, header)
182 header = "HG10"
182 header = "HG10"
183 alg = 'UN'
183 alg = 'UN'
184 elif vfs:
184 elif vfs:
185 fname = vfs.join(fname)
185 fname = vfs.join(fname)
186
186
187 magic, version = header[0:2], header[2:4]
187 magic, version = header[0:2], header[2:4]
188
188
189 if magic != 'HG':
189 if magic != 'HG':
190 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
190 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
191 if version == '10':
191 if version == '10':
192 if alg is None:
192 if alg is None:
193 alg = changegroup.readexactly(fh, 2)
193 alg = changegroup.readexactly(fh, 2)
194 return changegroup.cg1unpacker(fh, alg)
194 return changegroup.cg1unpacker(fh, alg)
195 elif version.startswith('2'):
195 elif version.startswith('2'):
196 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
196 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
197 elif version == 'S1':
197 elif version == 'S1':
198 return streamclone.streamcloneapplier(fh)
198 return streamclone.streamcloneapplier(fh)
199 else:
199 else:
200 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
200 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
201
201
202 def getbundlespec(ui, fh):
202 def getbundlespec(ui, fh):
203 """Infer the bundlespec from a bundle file handle.
203 """Infer the bundlespec from a bundle file handle.
204
204
205 The input file handle is seeked and the original seek position is not
205 The input file handle is seeked and the original seek position is not
206 restored.
206 restored.
207 """
207 """
208 def speccompression(alg):
208 def speccompression(alg):
209 try:
209 try:
210 return util.compengines.forbundletype(alg).bundletype()[0]
210 return util.compengines.forbundletype(alg).bundletype()[0]
211 except KeyError:
211 except KeyError:
212 return None
212 return None
213
213
214 b = readbundle(ui, fh, None)
214 b = readbundle(ui, fh, None)
215 if isinstance(b, changegroup.cg1unpacker):
215 if isinstance(b, changegroup.cg1unpacker):
216 alg = b._type
216 alg = b._type
217 if alg == '_truncatedBZ':
217 if alg == '_truncatedBZ':
218 alg = 'BZ'
218 alg = 'BZ'
219 comp = speccompression(alg)
219 comp = speccompression(alg)
220 if not comp:
220 if not comp:
221 raise error.Abort(_('unknown compression algorithm: %s') % alg)
221 raise error.Abort(_('unknown compression algorithm: %s') % alg)
222 return '%s-v1' % comp
222 return '%s-v1' % comp
223 elif isinstance(b, bundle2.unbundle20):
223 elif isinstance(b, bundle2.unbundle20):
224 if 'Compression' in b.params:
224 if 'Compression' in b.params:
225 comp = speccompression(b.params['Compression'])
225 comp = speccompression(b.params['Compression'])
226 if not comp:
226 if not comp:
227 raise error.Abort(_('unknown compression algorithm: %s') % comp)
227 raise error.Abort(_('unknown compression algorithm: %s') % comp)
228 else:
228 else:
229 comp = 'none'
229 comp = 'none'
230
230
231 version = None
231 version = None
232 for part in b.iterparts():
232 for part in b.iterparts():
233 if part.type == 'changegroup':
233 if part.type == 'changegroup':
234 version = part.params['version']
234 version = part.params['version']
235 if version in ('01', '02'):
235 if version in ('01', '02'):
236 version = 'v2'
236 version = 'v2'
237 else:
237 else:
238 raise error.Abort(_('changegroup version %s does not have '
238 raise error.Abort(_('changegroup version %s does not have '
239 'a known bundlespec') % version,
239 'a known bundlespec') % version,
240 hint=_('try upgrading your Mercurial '
240 hint=_('try upgrading your Mercurial '
241 'client'))
241 'client'))
242
242
243 if not version:
243 if not version:
244 raise error.Abort(_('could not identify changegroup version in '
244 raise error.Abort(_('could not identify changegroup version in '
245 'bundle'))
245 'bundle'))
246
246
247 return '%s-%s' % (comp, version)
247 return '%s-%s' % (comp, version)
248 elif isinstance(b, streamclone.streamcloneapplier):
248 elif isinstance(b, streamclone.streamcloneapplier):
249 requirements = streamclone.readbundle1header(fh)[2]
249 requirements = streamclone.readbundle1header(fh)[2]
250 params = 'requirements=%s' % ','.join(sorted(requirements))
250 params = 'requirements=%s' % ','.join(sorted(requirements))
251 return 'none-packed1;%s' % urlreq.quote(params)
251 return 'none-packed1;%s' % urlreq.quote(params)
252 else:
252 else:
253 raise error.Abort(_('unknown bundle type: %s') % b)
253 raise error.Abort(_('unknown bundle type: %s') % b)
254
254
255 def _computeoutgoing(repo, heads, common):
255 def _computeoutgoing(repo, heads, common):
256 """Computes which revs are outgoing given a set of common
256 """Computes which revs are outgoing given a set of common
257 and a set of heads.
257 and a set of heads.
258
258
259 This is a separate function so extensions can have access to
259 This is a separate function so extensions can have access to
260 the logic.
260 the logic.
261
261
262 Returns a discovery.outgoing object.
262 Returns a discovery.outgoing object.
263 """
263 """
264 cl = repo.changelog
264 cl = repo.changelog
265 if common:
265 if common:
266 hasnode = cl.hasnode
266 hasnode = cl.hasnode
267 common = [n for n in common if hasnode(n)]
267 common = [n for n in common if hasnode(n)]
268 else:
268 else:
269 common = [nullid]
269 common = [nullid]
270 if not heads:
270 if not heads:
271 heads = cl.heads()
271 heads = cl.heads()
272 return discovery.outgoing(repo, common, heads)
272 return discovery.outgoing(repo, common, heads)
273
273
274 def _forcebundle1(op):
274 def _forcebundle1(op):
275 """return true if a pull/push must use bundle1
275 """return true if a pull/push must use bundle1
276
276
277 This function is used to allow testing of the older bundle version"""
277 This function is used to allow testing of the older bundle version"""
278 ui = op.repo.ui
278 ui = op.repo.ui
279 forcebundle1 = False
279 forcebundle1 = False
280 # The goal is this config is to allow developer to choose the bundle
280 # The goal is this config is to allow developer to choose the bundle
281 # version used during exchanged. This is especially handy during test.
281 # version used during exchanged. This is especially handy during test.
282 # Value is a list of bundle version to be picked from, highest version
282 # Value is a list of bundle version to be picked from, highest version
283 # should be used.
283 # should be used.
284 #
284 #
285 # developer config: devel.legacy.exchange
285 # developer config: devel.legacy.exchange
286 exchange = ui.configlist('devel', 'legacy.exchange')
286 exchange = ui.configlist('devel', 'legacy.exchange')
287 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
287 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
288 return forcebundle1 or not op.remote.capable('bundle2')
288 return forcebundle1 or not op.remote.capable('bundle2')
289
289
290 class pushoperation(object):
290 class pushoperation(object):
291 """A object that represent a single push operation
291 """A object that represent a single push operation
292
292
293 Its purpose is to carry push related state and very common operations.
293 Its purpose is to carry push related state and very common operations.
294
294
295 A new pushoperation should be created at the beginning of each push and
295 A new pushoperation should be created at the beginning of each push and
296 discarded afterward.
296 discarded afterward.
297 """
297 """
298
298
299 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
299 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
300 bookmarks=(), pushvars=None):
300 bookmarks=(), pushvars=None):
301 # repo we push from
301 # repo we push from
302 self.repo = repo
302 self.repo = repo
303 self.ui = repo.ui
303 self.ui = repo.ui
304 # repo we push to
304 # repo we push to
305 self.remote = remote
305 self.remote = remote
306 # force option provided
306 # force option provided
307 self.force = force
307 self.force = force
308 # revs to be pushed (None is "all")
308 # revs to be pushed (None is "all")
309 self.revs = revs
309 self.revs = revs
310 # bookmark explicitly pushed
310 # bookmark explicitly pushed
311 self.bookmarks = bookmarks
311 self.bookmarks = bookmarks
312 # allow push of new branch
312 # allow push of new branch
313 self.newbranch = newbranch
313 self.newbranch = newbranch
314 # step already performed
314 # step already performed
315 # (used to check what steps have been already performed through bundle2)
315 # (used to check what steps have been already performed through bundle2)
316 self.stepsdone = set()
316 self.stepsdone = set()
317 # Integer version of the changegroup push result
317 # Integer version of the changegroup push result
318 # - None means nothing to push
318 # - None means nothing to push
319 # - 0 means HTTP error
319 # - 0 means HTTP error
320 # - 1 means we pushed and remote head count is unchanged *or*
320 # - 1 means we pushed and remote head count is unchanged *or*
321 # we have outgoing changesets but refused to push
321 # we have outgoing changesets but refused to push
322 # - other values as described by addchangegroup()
322 # - other values as described by addchangegroup()
323 self.cgresult = None
323 self.cgresult = None
324 # Boolean value for the bookmark push
324 # Boolean value for the bookmark push
325 self.bkresult = None
325 self.bkresult = None
326 # discover.outgoing object (contains common and outgoing data)
326 # discover.outgoing object (contains common and outgoing data)
327 self.outgoing = None
327 self.outgoing = None
328 # all remote topological heads before the push
328 # all remote topological heads before the push
329 self.remoteheads = None
329 self.remoteheads = None
330 # Details of the remote branch pre and post push
330 # Details of the remote branch pre and post push
331 #
331 #
332 # mapping: {'branch': ([remoteheads],
332 # mapping: {'branch': ([remoteheads],
333 # [newheads],
333 # [newheads],
334 # [unsyncedheads],
334 # [unsyncedheads],
335 # [discardedheads])}
335 # [discardedheads])}
336 # - branch: the branch name
336 # - branch: the branch name
337 # - remoteheads: the list of remote heads known locally
337 # - remoteheads: the list of remote heads known locally
338 # None if the branch is new
338 # None if the branch is new
339 # - newheads: the new remote heads (known locally) with outgoing pushed
339 # - newheads: the new remote heads (known locally) with outgoing pushed
340 # - unsyncedheads: the list of remote heads unknown locally.
340 # - unsyncedheads: the list of remote heads unknown locally.
341 # - discardedheads: the list of remote heads made obsolete by the push
341 # - discardedheads: the list of remote heads made obsolete by the push
342 self.pushbranchmap = None
342 self.pushbranchmap = None
343 # testable as a boolean indicating if any nodes are missing locally.
343 # testable as a boolean indicating if any nodes are missing locally.
344 self.incoming = None
344 self.incoming = None
345 # summary of the remote phase situation
345 # summary of the remote phase situation
346 self.remotephases = None
346 self.remotephases = None
347 # phases changes that must be pushed along side the changesets
347 # phases changes that must be pushed along side the changesets
348 self.outdatedphases = None
348 self.outdatedphases = None
349 # phases changes that must be pushed if changeset push fails
349 # phases changes that must be pushed if changeset push fails
350 self.fallbackoutdatedphases = None
350 self.fallbackoutdatedphases = None
351 # outgoing obsmarkers
351 # outgoing obsmarkers
352 self.outobsmarkers = set()
352 self.outobsmarkers = set()
353 # outgoing bookmarks
353 # outgoing bookmarks
354 self.outbookmarks = []
354 self.outbookmarks = []
355 # transaction manager
355 # transaction manager
356 self.trmanager = None
356 self.trmanager = None
357 # map { pushkey partid -> callback handling failure}
357 # map { pushkey partid -> callback handling failure}
358 # used to handle exception from mandatory pushkey part failure
358 # used to handle exception from mandatory pushkey part failure
359 self.pkfailcb = {}
359 self.pkfailcb = {}
360 # an iterable of pushvars or None
360 # an iterable of pushvars or None
361 self.pushvars = pushvars
361 self.pushvars = pushvars
362
362
363 @util.propertycache
363 @util.propertycache
364 def futureheads(self):
364 def futureheads(self):
365 """future remote heads if the changeset push succeeds"""
365 """future remote heads if the changeset push succeeds"""
366 return self.outgoing.missingheads
366 return self.outgoing.missingheads
367
367
368 @util.propertycache
368 @util.propertycache
369 def fallbackheads(self):
369 def fallbackheads(self):
370 """future remote heads if the changeset push fails"""
370 """future remote heads if the changeset push fails"""
371 if self.revs is None:
371 if self.revs is None:
372 # not target to push, all common are relevant
372 # not target to push, all common are relevant
373 return self.outgoing.commonheads
373 return self.outgoing.commonheads
374 unfi = self.repo.unfiltered()
374 unfi = self.repo.unfiltered()
375 # I want cheads = heads(::missingheads and ::commonheads)
375 # I want cheads = heads(::missingheads and ::commonheads)
376 # (missingheads is revs with secret changeset filtered out)
376 # (missingheads is revs with secret changeset filtered out)
377 #
377 #
378 # This can be expressed as:
378 # This can be expressed as:
379 # cheads = ( (missingheads and ::commonheads)
379 # cheads = ( (missingheads and ::commonheads)
380 # + (commonheads and ::missingheads))"
380 # + (commonheads and ::missingheads))"
381 # )
381 # )
382 #
382 #
383 # while trying to push we already computed the following:
383 # while trying to push we already computed the following:
384 # common = (::commonheads)
384 # common = (::commonheads)
385 # missing = ((commonheads::missingheads) - commonheads)
385 # missing = ((commonheads::missingheads) - commonheads)
386 #
386 #
387 # We can pick:
387 # We can pick:
388 # * missingheads part of common (::commonheads)
388 # * missingheads part of common (::commonheads)
389 common = self.outgoing.common
389 common = self.outgoing.common
390 nm = self.repo.changelog.nodemap
390 nm = self.repo.changelog.nodemap
391 cheads = [node for node in self.revs if nm[node] in common]
391 cheads = [node for node in self.revs if nm[node] in common]
392 # and
392 # and
393 # * commonheads parents on missing
393 # * commonheads parents on missing
394 revset = unfi.set('%ln and parents(roots(%ln))',
394 revset = unfi.set('%ln and parents(roots(%ln))',
395 self.outgoing.commonheads,
395 self.outgoing.commonheads,
396 self.outgoing.missing)
396 self.outgoing.missing)
397 cheads.extend(c.node() for c in revset)
397 cheads.extend(c.node() for c in revset)
398 return cheads
398 return cheads
399
399
400 @property
400 @property
401 def commonheads(self):
401 def commonheads(self):
402 """set of all common heads after changeset bundle push"""
402 """set of all common heads after changeset bundle push"""
403 if self.cgresult:
403 if self.cgresult:
404 return self.futureheads
404 return self.futureheads
405 else:
405 else:
406 return self.fallbackheads
406 return self.fallbackheads
407
407
408 # mapping of message used when pushing bookmark
408 # mapping of message used when pushing bookmark
409 bookmsgmap = {'update': (_("updating bookmark %s\n"),
409 bookmsgmap = {'update': (_("updating bookmark %s\n"),
410 _('updating bookmark %s failed!\n')),
410 _('updating bookmark %s failed!\n')),
411 'export': (_("exporting bookmark %s\n"),
411 'export': (_("exporting bookmark %s\n"),
412 _('exporting bookmark %s failed!\n')),
412 _('exporting bookmark %s failed!\n')),
413 'delete': (_("deleting remote bookmark %s\n"),
413 'delete': (_("deleting remote bookmark %s\n"),
414 _('deleting remote bookmark %s failed!\n')),
414 _('deleting remote bookmark %s failed!\n')),
415 }
415 }
416
416
417
417
418 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
418 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
419 opargs=None):
419 opargs=None):
420 '''Push outgoing changesets (limited by revs) from a local
420 '''Push outgoing changesets (limited by revs) from a local
421 repository to remote. Return an integer:
421 repository to remote. Return an integer:
422 - None means nothing to push
422 - None means nothing to push
423 - 0 means HTTP error
423 - 0 means HTTP error
424 - 1 means we pushed and remote head count is unchanged *or*
424 - 1 means we pushed and remote head count is unchanged *or*
425 we have outgoing changesets but refused to push
425 we have outgoing changesets but refused to push
426 - other values as described by addchangegroup()
426 - other values as described by addchangegroup()
427 '''
427 '''
428 if opargs is None:
428 if opargs is None:
429 opargs = {}
429 opargs = {}
430 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
430 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
431 **pycompat.strkwargs(opargs))
431 **pycompat.strkwargs(opargs))
432 if pushop.remote.local():
432 if pushop.remote.local():
433 missing = (set(pushop.repo.requirements)
433 missing = (set(pushop.repo.requirements)
434 - pushop.remote.local().supported)
434 - pushop.remote.local().supported)
435 if missing:
435 if missing:
436 msg = _("required features are not"
436 msg = _("required features are not"
437 " supported in the destination:"
437 " supported in the destination:"
438 " %s") % (', '.join(sorted(missing)))
438 " %s") % (', '.join(sorted(missing)))
439 raise error.Abort(msg)
439 raise error.Abort(msg)
440
440
441 if not pushop.remote.canpush():
441 if not pushop.remote.canpush():
442 raise error.Abort(_("destination does not support push"))
442 raise error.Abort(_("destination does not support push"))
443
443
444 if not pushop.remote.capable('unbundle'):
444 if not pushop.remote.capable('unbundle'):
445 raise error.Abort(_('cannot push: destination does not support the '
445 raise error.Abort(_('cannot push: destination does not support the '
446 'unbundle wire protocol command'))
446 'unbundle wire protocol command'))
447
447
448 # get lock as we might write phase data
448 # get lock as we might write phase data
449 wlock = lock = None
449 wlock = lock = None
450 try:
450 try:
451 # bundle2 push may receive a reply bundle touching bookmarks or other
451 # bundle2 push may receive a reply bundle touching bookmarks or other
452 # things requiring the wlock. Take it now to ensure proper ordering.
452 # things requiring the wlock. Take it now to ensure proper ordering.
453 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
453 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
454 if (not _forcebundle1(pushop)) and maypushback:
454 if (not _forcebundle1(pushop)) and maypushback:
455 wlock = pushop.repo.wlock()
455 wlock = pushop.repo.wlock()
456 lock = pushop.repo.lock()
456 lock = pushop.repo.lock()
457 pushop.trmanager = transactionmanager(pushop.repo,
457 pushop.trmanager = transactionmanager(pushop.repo,
458 'push-response',
458 'push-response',
459 pushop.remote.url())
459 pushop.remote.url())
460 except IOError as err:
460 except IOError as err:
461 if err.errno != errno.EACCES:
461 if err.errno != errno.EACCES:
462 raise
462 raise
463 # source repo cannot be locked.
463 # source repo cannot be locked.
464 # We do not abort the push, but just disable the local phase
464 # We do not abort the push, but just disable the local phase
465 # synchronisation.
465 # synchronisation.
466 msg = 'cannot lock source repository: %s\n' % err
466 msg = 'cannot lock source repository: %s\n' % err
467 pushop.ui.debug(msg)
467 pushop.ui.debug(msg)
468
468
469 with wlock or util.nullcontextmanager(), \
469 with wlock or util.nullcontextmanager(), \
470 lock or util.nullcontextmanager(), \
470 lock or util.nullcontextmanager(), \
471 pushop.trmanager or util.nullcontextmanager():
471 pushop.trmanager or util.nullcontextmanager():
472 pushop.repo.checkpush(pushop)
472 pushop.repo.checkpush(pushop)
473 _pushdiscovery(pushop)
473 _pushdiscovery(pushop)
474 if not _forcebundle1(pushop):
474 if not _forcebundle1(pushop):
475 _pushbundle2(pushop)
475 _pushbundle2(pushop)
476 _pushchangeset(pushop)
476 _pushchangeset(pushop)
477 _pushsyncphase(pushop)
477 _pushsyncphase(pushop)
478 _pushobsolete(pushop)
478 _pushobsolete(pushop)
479 _pushbookmark(pushop)
479 _pushbookmark(pushop)
480
480
481 return pushop
481 return pushop
482
482
483 # list of steps to perform discovery before push
483 # list of steps to perform discovery before push
484 pushdiscoveryorder = []
484 pushdiscoveryorder = []
485
485
486 # Mapping between step name and function
486 # Mapping between step name and function
487 #
487 #
488 # This exists to help extensions wrap steps if necessary
488 # This exists to help extensions wrap steps if necessary
489 pushdiscoverymapping = {}
489 pushdiscoverymapping = {}
490
490
491 def pushdiscovery(stepname):
491 def pushdiscovery(stepname):
492 """decorator for function performing discovery before push
492 """decorator for function performing discovery before push
493
493
494 The function is added to the step -> function mapping and appended to the
494 The function is added to the step -> function mapping and appended to the
495 list of steps. Beware that decorated function will be added in order (this
495 list of steps. Beware that decorated function will be added in order (this
496 may matter).
496 may matter).
497
497
498 You can only use this decorator for a new step, if you want to wrap a step
498 You can only use this decorator for a new step, if you want to wrap a step
499 from an extension, change the pushdiscovery dictionary directly."""
499 from an extension, change the pushdiscovery dictionary directly."""
500 def dec(func):
500 def dec(func):
501 assert stepname not in pushdiscoverymapping
501 assert stepname not in pushdiscoverymapping
502 pushdiscoverymapping[stepname] = func
502 pushdiscoverymapping[stepname] = func
503 pushdiscoveryorder.append(stepname)
503 pushdiscoveryorder.append(stepname)
504 return func
504 return func
505 return dec
505 return dec
506
506
507 def _pushdiscovery(pushop):
507 def _pushdiscovery(pushop):
508 """Run all discovery steps"""
508 """Run all discovery steps"""
509 for stepname in pushdiscoveryorder:
509 for stepname in pushdiscoveryorder:
510 step = pushdiscoverymapping[stepname]
510 step = pushdiscoverymapping[stepname]
511 step(pushop)
511 step(pushop)
512
512
513 @pushdiscovery('changeset')
513 @pushdiscovery('changeset')
514 def _pushdiscoverychangeset(pushop):
514 def _pushdiscoverychangeset(pushop):
515 """discover the changeset that need to be pushed"""
515 """discover the changeset that need to be pushed"""
516 fci = discovery.findcommonincoming
516 fci = discovery.findcommonincoming
517 if pushop.revs:
517 if pushop.revs:
518 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
518 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
519 ancestorsof=pushop.revs)
519 ancestorsof=pushop.revs)
520 else:
520 else:
521 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
521 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
522 common, inc, remoteheads = commoninc
522 common, inc, remoteheads = commoninc
523 fco = discovery.findcommonoutgoing
523 fco = discovery.findcommonoutgoing
524 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
524 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
525 commoninc=commoninc, force=pushop.force)
525 commoninc=commoninc, force=pushop.force)
526 pushop.outgoing = outgoing
526 pushop.outgoing = outgoing
527 pushop.remoteheads = remoteheads
527 pushop.remoteheads = remoteheads
528 pushop.incoming = inc
528 pushop.incoming = inc
529
529
530 @pushdiscovery('phase')
530 @pushdiscovery('phase')
531 def _pushdiscoveryphase(pushop):
531 def _pushdiscoveryphase(pushop):
532 """discover the phase that needs to be pushed
532 """discover the phase that needs to be pushed
533
533
534 (computed for both success and failure case for changesets push)"""
534 (computed for both success and failure case for changesets push)"""
535 outgoing = pushop.outgoing
535 outgoing = pushop.outgoing
536 unfi = pushop.repo.unfiltered()
536 unfi = pushop.repo.unfiltered()
537 remotephases = pushop.remote.listkeys('phases')
537 remotephases = pushop.remote.listkeys('phases')
538 if (pushop.ui.configbool('ui', '_usedassubrepo')
538 if (pushop.ui.configbool('ui', '_usedassubrepo')
539 and remotephases # server supports phases
539 and remotephases # server supports phases
540 and not pushop.outgoing.missing # no changesets to be pushed
540 and not pushop.outgoing.missing # no changesets to be pushed
541 and remotephases.get('publishing', False)):
541 and remotephases.get('publishing', False)):
542 # When:
542 # When:
543 # - this is a subrepo push
543 # - this is a subrepo push
544 # - and remote support phase
544 # - and remote support phase
545 # - and no changeset are to be pushed
545 # - and no changeset are to be pushed
546 # - and remote is publishing
546 # - and remote is publishing
547 # We may be in issue 3781 case!
547 # We may be in issue 3781 case!
548 # We drop the possible phase synchronisation done by
548 # We drop the possible phase synchronisation done by
549 # courtesy to publish changesets possibly locally draft
549 # courtesy to publish changesets possibly locally draft
550 # on the remote.
550 # on the remote.
551 pushop.outdatedphases = []
551 pushop.outdatedphases = []
552 pushop.fallbackoutdatedphases = []
552 pushop.fallbackoutdatedphases = []
553 return
553 return
554
554
555 pushop.remotephases = phases.remotephasessummary(pushop.repo,
555 pushop.remotephases = phases.remotephasessummary(pushop.repo,
556 pushop.fallbackheads,
556 pushop.fallbackheads,
557 remotephases)
557 remotephases)
558 droots = pushop.remotephases.draftroots
558 droots = pushop.remotephases.draftroots
559
559
560 extracond = ''
560 extracond = ''
561 if not pushop.remotephases.publishing:
561 if not pushop.remotephases.publishing:
562 extracond = ' and public()'
562 extracond = ' and public()'
563 revset = 'heads((%%ln::%%ln) %s)' % extracond
563 revset = 'heads((%%ln::%%ln) %s)' % extracond
564 # Get the list of all revs draft on remote by public here.
564 # Get the list of all revs draft on remote by public here.
565 # XXX Beware that revset break if droots is not strictly
565 # XXX Beware that revset break if droots is not strictly
566 # XXX root we may want to ensure it is but it is costly
566 # XXX root we may want to ensure it is but it is costly
567 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
567 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
568 if not outgoing.missing:
568 if not outgoing.missing:
569 future = fallback
569 future = fallback
570 else:
570 else:
571 # adds changeset we are going to push as draft
571 # adds changeset we are going to push as draft
572 #
572 #
573 # should not be necessary for publishing server, but because of an
573 # should not be necessary for publishing server, but because of an
574 # issue fixed in xxxxx we have to do it anyway.
574 # issue fixed in xxxxx we have to do it anyway.
575 fdroots = list(unfi.set('roots(%ln + %ln::)',
575 fdroots = list(unfi.set('roots(%ln + %ln::)',
576 outgoing.missing, droots))
576 outgoing.missing, droots))
577 fdroots = [f.node() for f in fdroots]
577 fdroots = [f.node() for f in fdroots]
578 future = list(unfi.set(revset, fdroots, pushop.futureheads))
578 future = list(unfi.set(revset, fdroots, pushop.futureheads))
579 pushop.outdatedphases = future
579 pushop.outdatedphases = future
580 pushop.fallbackoutdatedphases = fallback
580 pushop.fallbackoutdatedphases = fallback
581
581
582 @pushdiscovery('obsmarker')
582 @pushdiscovery('obsmarker')
583 def _pushdiscoveryobsmarkers(pushop):
583 def _pushdiscoveryobsmarkers(pushop):
584 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
584 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
585 and pushop.repo.obsstore
585 and pushop.repo.obsstore
586 and 'obsolete' in pushop.remote.listkeys('namespaces')):
586 and 'obsolete' in pushop.remote.listkeys('namespaces')):
587 repo = pushop.repo
587 repo = pushop.repo
588 # very naive computation, that can be quite expensive on big repo.
588 # very naive computation, that can be quite expensive on big repo.
589 # However: evolution is currently slow on them anyway.
589 # However: evolution is currently slow on them anyway.
590 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
590 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
591 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
591 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
592
592
593 @pushdiscovery('bookmarks')
593 @pushdiscovery('bookmarks')
594 def _pushdiscoverybookmarks(pushop):
594 def _pushdiscoverybookmarks(pushop):
595 ui = pushop.ui
595 ui = pushop.ui
596 repo = pushop.repo.unfiltered()
596 repo = pushop.repo.unfiltered()
597 remote = pushop.remote
597 remote = pushop.remote
598 ui.debug("checking for updated bookmarks\n")
598 ui.debug("checking for updated bookmarks\n")
599 ancestors = ()
599 ancestors = ()
600 if pushop.revs:
600 if pushop.revs:
601 revnums = map(repo.changelog.rev, pushop.revs)
601 revnums = map(repo.changelog.rev, pushop.revs)
602 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
602 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
603 remotebookmark = remote.listkeys('bookmarks')
603 remotebookmark = remote.listkeys('bookmarks')
604
604
605 explicit = set([repo._bookmarks.expandname(bookmark)
605 explicit = set([repo._bookmarks.expandname(bookmark)
606 for bookmark in pushop.bookmarks])
606 for bookmark in pushop.bookmarks])
607
607
608 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
608 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
609 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
609 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
610
610
611 def safehex(x):
611 def safehex(x):
612 if x is None:
612 if x is None:
613 return x
613 return x
614 return hex(x)
614 return hex(x)
615
615
616 def hexifycompbookmarks(bookmarks):
616 def hexifycompbookmarks(bookmarks):
617 for b, scid, dcid in bookmarks:
617 for b, scid, dcid in bookmarks:
618 yield b, safehex(scid), safehex(dcid)
618 yield b, safehex(scid), safehex(dcid)
619
619
620 comp = [hexifycompbookmarks(marks) for marks in comp]
620 comp = [hexifycompbookmarks(marks) for marks in comp]
621 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
621 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
622
622
623 for b, scid, dcid in advsrc:
623 for b, scid, dcid in advsrc:
624 if b in explicit:
624 if b in explicit:
625 explicit.remove(b)
625 explicit.remove(b)
626 if not ancestors or repo[scid].rev() in ancestors:
626 if not ancestors or repo[scid].rev() in ancestors:
627 pushop.outbookmarks.append((b, dcid, scid))
627 pushop.outbookmarks.append((b, dcid, scid))
628 # search added bookmark
628 # search added bookmark
629 for b, scid, dcid in addsrc:
629 for b, scid, dcid in addsrc:
630 if b in explicit:
630 if b in explicit:
631 explicit.remove(b)
631 explicit.remove(b)
632 pushop.outbookmarks.append((b, '', scid))
632 pushop.outbookmarks.append((b, '', scid))
633 # search for overwritten bookmark
633 # search for overwritten bookmark
634 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
634 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
635 if b in explicit:
635 if b in explicit:
636 explicit.remove(b)
636 explicit.remove(b)
637 pushop.outbookmarks.append((b, dcid, scid))
637 pushop.outbookmarks.append((b, dcid, scid))
638 # search for bookmark to delete
638 # search for bookmark to delete
639 for b, scid, dcid in adddst:
639 for b, scid, dcid in adddst:
640 if b in explicit:
640 if b in explicit:
641 explicit.remove(b)
641 explicit.remove(b)
642 # treat as "deleted locally"
642 # treat as "deleted locally"
643 pushop.outbookmarks.append((b, dcid, ''))
643 pushop.outbookmarks.append((b, dcid, ''))
644 # identical bookmarks shouldn't get reported
644 # identical bookmarks shouldn't get reported
645 for b, scid, dcid in same:
645 for b, scid, dcid in same:
646 if b in explicit:
646 if b in explicit:
647 explicit.remove(b)
647 explicit.remove(b)
648
648
649 if explicit:
649 if explicit:
650 explicit = sorted(explicit)
650 explicit = sorted(explicit)
651 # we should probably list all of them
651 # we should probably list all of them
652 ui.warn(_('bookmark %s does not exist on the local '
652 ui.warn(_('bookmark %s does not exist on the local '
653 'or remote repository!\n') % explicit[0])
653 'or remote repository!\n') % explicit[0])
654 pushop.bkresult = 2
654 pushop.bkresult = 2
655
655
656 pushop.outbookmarks.sort()
656 pushop.outbookmarks.sort()
657
657
658 def _pushcheckoutgoing(pushop):
658 def _pushcheckoutgoing(pushop):
659 outgoing = pushop.outgoing
659 outgoing = pushop.outgoing
660 unfi = pushop.repo.unfiltered()
660 unfi = pushop.repo.unfiltered()
661 if not outgoing.missing:
661 if not outgoing.missing:
662 # nothing to push
662 # nothing to push
663 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
663 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
664 return False
664 return False
665 # something to push
665 # something to push
666 if not pushop.force:
666 if not pushop.force:
667 # if repo.obsstore == False --> no obsolete
667 # if repo.obsstore == False --> no obsolete
668 # then, save the iteration
668 # then, save the iteration
669 if unfi.obsstore:
669 if unfi.obsstore:
670 # this message are here for 80 char limit reason
670 # this message are here for 80 char limit reason
671 mso = _("push includes obsolete changeset: %s!")
671 mso = _("push includes obsolete changeset: %s!")
672 mspd = _("push includes phase-divergent changeset: %s!")
672 mspd = _("push includes phase-divergent changeset: %s!")
673 mscd = _("push includes content-divergent changeset: %s!")
673 mscd = _("push includes content-divergent changeset: %s!")
674 mst = {"orphan": _("push includes orphan changeset: %s!"),
674 mst = {"orphan": _("push includes orphan changeset: %s!"),
675 "phase-divergent": mspd,
675 "phase-divergent": mspd,
676 "content-divergent": mscd}
676 "content-divergent": mscd}
677 # If we are to push if there is at least one
677 # If we are to push if there is at least one
678 # obsolete or unstable changeset in missing, at
678 # obsolete or unstable changeset in missing, at
679 # least one of the missinghead will be obsolete or
679 # least one of the missinghead will be obsolete or
680 # unstable. So checking heads only is ok
680 # unstable. So checking heads only is ok
681 for node in outgoing.missingheads:
681 for node in outgoing.missingheads:
682 ctx = unfi[node]
682 ctx = unfi[node]
683 if ctx.obsolete():
683 if ctx.obsolete():
684 raise error.Abort(mso % ctx)
684 raise error.Abort(mso % ctx)
685 elif ctx.isunstable():
685 elif ctx.isunstable():
686 # TODO print more than one instability in the abort
686 # TODO print more than one instability in the abort
687 # message
687 # message
688 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
688 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
689
689
690 discovery.checkheads(pushop)
690 discovery.checkheads(pushop)
691 return True
691 return True
692
692
693 # List of names of steps to perform for an outgoing bundle2, order matters.
693 # List of names of steps to perform for an outgoing bundle2, order matters.
694 b2partsgenorder = []
694 b2partsgenorder = []
695
695
696 # Mapping between step name and function
696 # Mapping between step name and function
697 #
697 #
698 # This exists to help extensions wrap steps if necessary
698 # This exists to help extensions wrap steps if necessary
699 b2partsgenmapping = {}
699 b2partsgenmapping = {}
700
700
701 def b2partsgenerator(stepname, idx=None):
701 def b2partsgenerator(stepname, idx=None):
702 """decorator for function generating bundle2 part
702 """decorator for function generating bundle2 part
703
703
704 The function is added to the step -> function mapping and appended to the
704 The function is added to the step -> function mapping and appended to the
705 list of steps. Beware that decorated functions will be added in order
705 list of steps. Beware that decorated functions will be added in order
706 (this may matter).
706 (this may matter).
707
707
708 You can only use this decorator for new steps, if you want to wrap a step
708 You can only use this decorator for new steps, if you want to wrap a step
709 from an extension, attack the b2partsgenmapping dictionary directly."""
709 from an extension, attack the b2partsgenmapping dictionary directly."""
710 def dec(func):
710 def dec(func):
711 assert stepname not in b2partsgenmapping
711 assert stepname not in b2partsgenmapping
712 b2partsgenmapping[stepname] = func
712 b2partsgenmapping[stepname] = func
713 if idx is None:
713 if idx is None:
714 b2partsgenorder.append(stepname)
714 b2partsgenorder.append(stepname)
715 else:
715 else:
716 b2partsgenorder.insert(idx, stepname)
716 b2partsgenorder.insert(idx, stepname)
717 return func
717 return func
718 return dec
718 return dec
719
719
720 def _pushb2ctxcheckheads(pushop, bundler):
720 def _pushb2ctxcheckheads(pushop, bundler):
721 """Generate race condition checking parts
721 """Generate race condition checking parts
722
722
723 Exists as an independent function to aid extensions
723 Exists as an independent function to aid extensions
724 """
724 """
725 # * 'force' do not check for push race,
725 # * 'force' do not check for push race,
726 # * if we don't push anything, there are nothing to check.
726 # * if we don't push anything, there are nothing to check.
727 if not pushop.force and pushop.outgoing.missingheads:
727 if not pushop.force and pushop.outgoing.missingheads:
728 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
728 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
729 emptyremote = pushop.pushbranchmap is None
729 emptyremote = pushop.pushbranchmap is None
730 if not allowunrelated or emptyremote:
730 if not allowunrelated or emptyremote:
731 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
731 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
732 else:
732 else:
733 affected = set()
733 affected = set()
734 for branch, heads in pushop.pushbranchmap.iteritems():
734 for branch, heads in pushop.pushbranchmap.iteritems():
735 remoteheads, newheads, unsyncedheads, discardedheads = heads
735 remoteheads, newheads, unsyncedheads, discardedheads = heads
736 if remoteheads is not None:
736 if remoteheads is not None:
737 remote = set(remoteheads)
737 remote = set(remoteheads)
738 affected |= set(discardedheads) & remote
738 affected |= set(discardedheads) & remote
739 affected |= remote - set(newheads)
739 affected |= remote - set(newheads)
740 if affected:
740 if affected:
741 data = iter(sorted(affected))
741 data = iter(sorted(affected))
742 bundler.newpart('check:updated-heads', data=data)
742 bundler.newpart('check:updated-heads', data=data)
743
743
744 def _pushing(pushop):
744 def _pushing(pushop):
745 """return True if we are pushing anything"""
745 """return True if we are pushing anything"""
746 return bool(pushop.outgoing.missing
746 return bool(pushop.outgoing.missing
747 or pushop.outdatedphases
747 or pushop.outdatedphases
748 or pushop.outobsmarkers
748 or pushop.outobsmarkers
749 or pushop.outbookmarks)
749 or pushop.outbookmarks)
750
750
751 @b2partsgenerator('check-bookmarks')
751 @b2partsgenerator('check-bookmarks')
752 def _pushb2checkbookmarks(pushop, bundler):
752 def _pushb2checkbookmarks(pushop, bundler):
753 """insert bookmark move checking"""
753 """insert bookmark move checking"""
754 if not _pushing(pushop) or pushop.force:
754 if not _pushing(pushop) or pushop.force:
755 return
755 return
756 b2caps = bundle2.bundle2caps(pushop.remote)
756 b2caps = bundle2.bundle2caps(pushop.remote)
757 hasbookmarkcheck = 'bookmarks' in b2caps
757 hasbookmarkcheck = 'bookmarks' in b2caps
758 if not (pushop.outbookmarks and hasbookmarkcheck):
758 if not (pushop.outbookmarks and hasbookmarkcheck):
759 return
759 return
760 data = []
760 data = []
761 for book, old, new in pushop.outbookmarks:
761 for book, old, new in pushop.outbookmarks:
762 old = bin(old)
762 old = bin(old)
763 data.append((book, old))
763 data.append((book, old))
764 checkdata = bookmod.binaryencode(data)
764 checkdata = bookmod.binaryencode(data)
765 bundler.newpart('check:bookmarks', data=checkdata)
765 bundler.newpart('check:bookmarks', data=checkdata)
766
766
767 @b2partsgenerator('check-phases')
767 @b2partsgenerator('check-phases')
768 def _pushb2checkphases(pushop, bundler):
768 def _pushb2checkphases(pushop, bundler):
769 """insert phase move checking"""
769 """insert phase move checking"""
770 if not _pushing(pushop) or pushop.force:
770 if not _pushing(pushop) or pushop.force:
771 return
771 return
772 b2caps = bundle2.bundle2caps(pushop.remote)
772 b2caps = bundle2.bundle2caps(pushop.remote)
773 hasphaseheads = 'heads' in b2caps.get('phases', ())
773 hasphaseheads = 'heads' in b2caps.get('phases', ())
774 if pushop.remotephases is not None and hasphaseheads:
774 if pushop.remotephases is not None and hasphaseheads:
775 # check that the remote phase has not changed
775 # check that the remote phase has not changed
776 checks = [[] for p in phases.allphases]
776 checks = [[] for p in phases.allphases]
777 checks[phases.public].extend(pushop.remotephases.publicheads)
777 checks[phases.public].extend(pushop.remotephases.publicheads)
778 checks[phases.draft].extend(pushop.remotephases.draftroots)
778 checks[phases.draft].extend(pushop.remotephases.draftroots)
779 if any(checks):
779 if any(checks):
780 for nodes in checks:
780 for nodes in checks:
781 nodes.sort()
781 nodes.sort()
782 checkdata = phases.binaryencode(checks)
782 checkdata = phases.binaryencode(checks)
783 bundler.newpart('check:phases', data=checkdata)
783 bundler.newpart('check:phases', data=checkdata)
784
784
785 @b2partsgenerator('changeset')
785 @b2partsgenerator('changeset')
786 def _pushb2ctx(pushop, bundler):
786 def _pushb2ctx(pushop, bundler):
787 """handle changegroup push through bundle2
787 """handle changegroup push through bundle2
788
788
789 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
789 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
790 """
790 """
791 if 'changesets' in pushop.stepsdone:
791 if 'changesets' in pushop.stepsdone:
792 return
792 return
793 pushop.stepsdone.add('changesets')
793 pushop.stepsdone.add('changesets')
794 # Send known heads to the server for race detection.
794 # Send known heads to the server for race detection.
795 if not _pushcheckoutgoing(pushop):
795 if not _pushcheckoutgoing(pushop):
796 return
796 return
797 pushop.repo.prepushoutgoinghooks(pushop)
797 pushop.repo.prepushoutgoinghooks(pushop)
798
798
799 _pushb2ctxcheckheads(pushop, bundler)
799 _pushb2ctxcheckheads(pushop, bundler)
800
800
801 b2caps = bundle2.bundle2caps(pushop.remote)
801 b2caps = bundle2.bundle2caps(pushop.remote)
802 version = '01'
802 version = '01'
803 cgversions = b2caps.get('changegroup')
803 cgversions = b2caps.get('changegroup')
804 if cgversions: # 3.1 and 3.2 ship with an empty value
804 if cgversions: # 3.1 and 3.2 ship with an empty value
805 cgversions = [v for v in cgversions
805 cgversions = [v for v in cgversions
806 if v in changegroup.supportedoutgoingversions(
806 if v in changegroup.supportedoutgoingversions(
807 pushop.repo)]
807 pushop.repo)]
808 if not cgversions:
808 if not cgversions:
809 raise ValueError(_('no common changegroup version'))
809 raise ValueError(_('no common changegroup version'))
810 version = max(cgversions)
810 version = max(cgversions)
811 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
811 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
812 'push')
812 'push')
813 cgpart = bundler.newpart('changegroup', data=cgstream)
813 cgpart = bundler.newpart('changegroup', data=cgstream)
814 if cgversions:
814 if cgversions:
815 cgpart.addparam('version', version)
815 cgpart.addparam('version', version)
816 if 'treemanifest' in pushop.repo.requirements:
816 if 'treemanifest' in pushop.repo.requirements:
817 cgpart.addparam('treemanifest', '1')
817 cgpart.addparam('treemanifest', '1')
818 def handlereply(op):
818 def handlereply(op):
819 """extract addchangegroup returns from server reply"""
819 """extract addchangegroup returns from server reply"""
820 cgreplies = op.records.getreplies(cgpart.id)
820 cgreplies = op.records.getreplies(cgpart.id)
821 assert len(cgreplies['changegroup']) == 1
821 assert len(cgreplies['changegroup']) == 1
822 pushop.cgresult = cgreplies['changegroup'][0]['return']
822 pushop.cgresult = cgreplies['changegroup'][0]['return']
823 return handlereply
823 return handlereply
824
824
825 @b2partsgenerator('phase')
825 @b2partsgenerator('phase')
826 def _pushb2phases(pushop, bundler):
826 def _pushb2phases(pushop, bundler):
827 """handle phase push through bundle2"""
827 """handle phase push through bundle2"""
828 if 'phases' in pushop.stepsdone:
828 if 'phases' in pushop.stepsdone:
829 return
829 return
830 b2caps = bundle2.bundle2caps(pushop.remote)
830 b2caps = bundle2.bundle2caps(pushop.remote)
831 ui = pushop.repo.ui
831 ui = pushop.repo.ui
832
832
833 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
833 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
834 haspushkey = 'pushkey' in b2caps
834 haspushkey = 'pushkey' in b2caps
835 hasphaseheads = 'heads' in b2caps.get('phases', ())
835 hasphaseheads = 'heads' in b2caps.get('phases', ())
836
836
837 if hasphaseheads and not legacyphase:
837 if hasphaseheads and not legacyphase:
838 return _pushb2phaseheads(pushop, bundler)
838 return _pushb2phaseheads(pushop, bundler)
839 elif haspushkey:
839 elif haspushkey:
840 return _pushb2phasespushkey(pushop, bundler)
840 return _pushb2phasespushkey(pushop, bundler)
841
841
842 def _pushb2phaseheads(pushop, bundler):
842 def _pushb2phaseheads(pushop, bundler):
843 """push phase information through a bundle2 - binary part"""
843 """push phase information through a bundle2 - binary part"""
844 pushop.stepsdone.add('phases')
844 pushop.stepsdone.add('phases')
845 if pushop.outdatedphases:
845 if pushop.outdatedphases:
846 updates = [[] for p in phases.allphases]
846 updates = [[] for p in phases.allphases]
847 updates[0].extend(h.node() for h in pushop.outdatedphases)
847 updates[0].extend(h.node() for h in pushop.outdatedphases)
848 phasedata = phases.binaryencode(updates)
848 phasedata = phases.binaryencode(updates)
849 bundler.newpart('phase-heads', data=phasedata)
849 bundler.newpart('phase-heads', data=phasedata)
850
850
851 def _pushb2phasespushkey(pushop, bundler):
851 def _pushb2phasespushkey(pushop, bundler):
852 """push phase information through a bundle2 - pushkey part"""
852 """push phase information through a bundle2 - pushkey part"""
853 pushop.stepsdone.add('phases')
853 pushop.stepsdone.add('phases')
854 part2node = []
854 part2node = []
855
855
856 def handlefailure(pushop, exc):
856 def handlefailure(pushop, exc):
857 targetid = int(exc.partid)
857 targetid = int(exc.partid)
858 for partid, node in part2node:
858 for partid, node in part2node:
859 if partid == targetid:
859 if partid == targetid:
860 raise error.Abort(_('updating %s to public failed') % node)
860 raise error.Abort(_('updating %s to public failed') % node)
861
861
862 enc = pushkey.encode
862 enc = pushkey.encode
863 for newremotehead in pushop.outdatedphases:
863 for newremotehead in pushop.outdatedphases:
864 part = bundler.newpart('pushkey')
864 part = bundler.newpart('pushkey')
865 part.addparam('namespace', enc('phases'))
865 part.addparam('namespace', enc('phases'))
866 part.addparam('key', enc(newremotehead.hex()))
866 part.addparam('key', enc(newremotehead.hex()))
867 part.addparam('old', enc('%d' % phases.draft))
867 part.addparam('old', enc('%d' % phases.draft))
868 part.addparam('new', enc('%d' % phases.public))
868 part.addparam('new', enc('%d' % phases.public))
869 part2node.append((part.id, newremotehead))
869 part2node.append((part.id, newremotehead))
870 pushop.pkfailcb[part.id] = handlefailure
870 pushop.pkfailcb[part.id] = handlefailure
871
871
872 def handlereply(op):
872 def handlereply(op):
873 for partid, node in part2node:
873 for partid, node in part2node:
874 partrep = op.records.getreplies(partid)
874 partrep = op.records.getreplies(partid)
875 results = partrep['pushkey']
875 results = partrep['pushkey']
876 assert len(results) <= 1
876 assert len(results) <= 1
877 msg = None
877 msg = None
878 if not results:
878 if not results:
879 msg = _('server ignored update of %s to public!\n') % node
879 msg = _('server ignored update of %s to public!\n') % node
880 elif not int(results[0]['return']):
880 elif not int(results[0]['return']):
881 msg = _('updating %s to public failed!\n') % node
881 msg = _('updating %s to public failed!\n') % node
882 if msg is not None:
882 if msg is not None:
883 pushop.ui.warn(msg)
883 pushop.ui.warn(msg)
884 return handlereply
884 return handlereply
885
885
886 @b2partsgenerator('obsmarkers')
886 @b2partsgenerator('obsmarkers')
887 def _pushb2obsmarkers(pushop, bundler):
887 def _pushb2obsmarkers(pushop, bundler):
888 if 'obsmarkers' in pushop.stepsdone:
888 if 'obsmarkers' in pushop.stepsdone:
889 return
889 return
890 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
890 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
891 if obsolete.commonversion(remoteversions) is None:
891 if obsolete.commonversion(remoteversions) is None:
892 return
892 return
893 pushop.stepsdone.add('obsmarkers')
893 pushop.stepsdone.add('obsmarkers')
894 if pushop.outobsmarkers:
894 if pushop.outobsmarkers:
895 markers = sorted(pushop.outobsmarkers)
895 markers = sorted(pushop.outobsmarkers)
896 bundle2.buildobsmarkerspart(bundler, markers)
896 bundle2.buildobsmarkerspart(bundler, markers)
897
897
898 @b2partsgenerator('bookmarks')
898 @b2partsgenerator('bookmarks')
899 def _pushb2bookmarks(pushop, bundler):
899 def _pushb2bookmarks(pushop, bundler):
900 """handle bookmark push through bundle2"""
900 """handle bookmark push through bundle2"""
901 if 'bookmarks' in pushop.stepsdone:
901 if 'bookmarks' in pushop.stepsdone:
902 return
902 return
903 b2caps = bundle2.bundle2caps(pushop.remote)
903 b2caps = bundle2.bundle2caps(pushop.remote)
904
904
905 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
905 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
906 legacybooks = 'bookmarks' in legacy
906 legacybooks = 'bookmarks' in legacy
907
907
908 if not legacybooks and 'bookmarks' in b2caps:
908 if not legacybooks and 'bookmarks' in b2caps:
909 return _pushb2bookmarkspart(pushop, bundler)
909 return _pushb2bookmarkspart(pushop, bundler)
910 elif 'pushkey' in b2caps:
910 elif 'pushkey' in b2caps:
911 return _pushb2bookmarkspushkey(pushop, bundler)
911 return _pushb2bookmarkspushkey(pushop, bundler)
912
912
913 def _bmaction(old, new):
913 def _bmaction(old, new):
914 """small utility for bookmark pushing"""
914 """small utility for bookmark pushing"""
915 if not old:
915 if not old:
916 return 'export'
916 return 'export'
917 elif not new:
917 elif not new:
918 return 'delete'
918 return 'delete'
919 return 'update'
919 return 'update'
920
920
921 def _pushb2bookmarkspart(pushop, bundler):
921 def _pushb2bookmarkspart(pushop, bundler):
922 pushop.stepsdone.add('bookmarks')
922 pushop.stepsdone.add('bookmarks')
923 if not pushop.outbookmarks:
923 if not pushop.outbookmarks:
924 return
924 return
925
925
926 allactions = []
926 allactions = []
927 data = []
927 data = []
928 for book, old, new in pushop.outbookmarks:
928 for book, old, new in pushop.outbookmarks:
929 new = bin(new)
929 new = bin(new)
930 data.append((book, new))
930 data.append((book, new))
931 allactions.append((book, _bmaction(old, new)))
931 allactions.append((book, _bmaction(old, new)))
932 checkdata = bookmod.binaryencode(data)
932 checkdata = bookmod.binaryencode(data)
933 bundler.newpart('bookmarks', data=checkdata)
933 bundler.newpart('bookmarks', data=checkdata)
934
934
935 def handlereply(op):
935 def handlereply(op):
936 ui = pushop.ui
936 ui = pushop.ui
937 # if success
937 # if success
938 for book, action in allactions:
938 for book, action in allactions:
939 ui.status(bookmsgmap[action][0] % book)
939 ui.status(bookmsgmap[action][0] % book)
940
940
941 return handlereply
941 return handlereply
942
942
943 def _pushb2bookmarkspushkey(pushop, bundler):
943 def _pushb2bookmarkspushkey(pushop, bundler):
944 pushop.stepsdone.add('bookmarks')
944 pushop.stepsdone.add('bookmarks')
945 part2book = []
945 part2book = []
946 enc = pushkey.encode
946 enc = pushkey.encode
947
947
948 def handlefailure(pushop, exc):
948 def handlefailure(pushop, exc):
949 targetid = int(exc.partid)
949 targetid = int(exc.partid)
950 for partid, book, action in part2book:
950 for partid, book, action in part2book:
951 if partid == targetid:
951 if partid == targetid:
952 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
952 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
953 # we should not be called for part we did not generated
953 # we should not be called for part we did not generated
954 assert False
954 assert False
955
955
956 for book, old, new in pushop.outbookmarks:
956 for book, old, new in pushop.outbookmarks:
957 part = bundler.newpart('pushkey')
957 part = bundler.newpart('pushkey')
958 part.addparam('namespace', enc('bookmarks'))
958 part.addparam('namespace', enc('bookmarks'))
959 part.addparam('key', enc(book))
959 part.addparam('key', enc(book))
960 part.addparam('old', enc(old))
960 part.addparam('old', enc(old))
961 part.addparam('new', enc(new))
961 part.addparam('new', enc(new))
962 action = 'update'
962 action = 'update'
963 if not old:
963 if not old:
964 action = 'export'
964 action = 'export'
965 elif not new:
965 elif not new:
966 action = 'delete'
966 action = 'delete'
967 part2book.append((part.id, book, action))
967 part2book.append((part.id, book, action))
968 pushop.pkfailcb[part.id] = handlefailure
968 pushop.pkfailcb[part.id] = handlefailure
969
969
970 def handlereply(op):
970 def handlereply(op):
971 ui = pushop.ui
971 ui = pushop.ui
972 for partid, book, action in part2book:
972 for partid, book, action in part2book:
973 partrep = op.records.getreplies(partid)
973 partrep = op.records.getreplies(partid)
974 results = partrep['pushkey']
974 results = partrep['pushkey']
975 assert len(results) <= 1
975 assert len(results) <= 1
976 if not results:
976 if not results:
977 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
977 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
978 else:
978 else:
979 ret = int(results[0]['return'])
979 ret = int(results[0]['return'])
980 if ret:
980 if ret:
981 ui.status(bookmsgmap[action][0] % book)
981 ui.status(bookmsgmap[action][0] % book)
982 else:
982 else:
983 ui.warn(bookmsgmap[action][1] % book)
983 ui.warn(bookmsgmap[action][1] % book)
984 if pushop.bkresult is not None:
984 if pushop.bkresult is not None:
985 pushop.bkresult = 1
985 pushop.bkresult = 1
986 return handlereply
986 return handlereply
987
987
988 @b2partsgenerator('pushvars', idx=0)
988 @b2partsgenerator('pushvars', idx=0)
989 def _getbundlesendvars(pushop, bundler):
989 def _getbundlesendvars(pushop, bundler):
990 '''send shellvars via bundle2'''
990 '''send shellvars via bundle2'''
991 pushvars = pushop.pushvars
991 pushvars = pushop.pushvars
992 if pushvars:
992 if pushvars:
993 shellvars = {}
993 shellvars = {}
994 for raw in pushvars:
994 for raw in pushvars:
995 if '=' not in raw:
995 if '=' not in raw:
996 msg = ("unable to parse variable '%s', should follow "
996 msg = ("unable to parse variable '%s', should follow "
997 "'KEY=VALUE' or 'KEY=' format")
997 "'KEY=VALUE' or 'KEY=' format")
998 raise error.Abort(msg % raw)
998 raise error.Abort(msg % raw)
999 k, v = raw.split('=', 1)
999 k, v = raw.split('=', 1)
1000 shellvars[k] = v
1000 shellvars[k] = v
1001
1001
1002 part = bundler.newpart('pushvars')
1002 part = bundler.newpart('pushvars')
1003
1003
1004 for key, value in shellvars.iteritems():
1004 for key, value in shellvars.iteritems():
1005 part.addparam(key, value, mandatory=False)
1005 part.addparam(key, value, mandatory=False)
1006
1006
1007 def _pushbundle2(pushop):
1007 def _pushbundle2(pushop):
1008 """push data to the remote using bundle2
1008 """push data to the remote using bundle2
1009
1009
1010 The only currently supported type of data is changegroup but this will
1010 The only currently supported type of data is changegroup but this will
1011 evolve in the future."""
1011 evolve in the future."""
1012 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1012 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1013 pushback = (pushop.trmanager
1013 pushback = (pushop.trmanager
1014 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1014 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1015
1015
1016 # create reply capability
1016 # create reply capability
1017 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1017 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1018 allowpushback=pushback,
1018 allowpushback=pushback,
1019 role='client'))
1019 role='client'))
1020 bundler.newpart('replycaps', data=capsblob)
1020 bundler.newpart('replycaps', data=capsblob)
1021 replyhandlers = []
1021 replyhandlers = []
1022 for partgenname in b2partsgenorder:
1022 for partgenname in b2partsgenorder:
1023 partgen = b2partsgenmapping[partgenname]
1023 partgen = b2partsgenmapping[partgenname]
1024 ret = partgen(pushop, bundler)
1024 ret = partgen(pushop, bundler)
1025 if callable(ret):
1025 if callable(ret):
1026 replyhandlers.append(ret)
1026 replyhandlers.append(ret)
1027 # do not push if nothing to push
1027 # do not push if nothing to push
1028 if bundler.nbparts <= 1:
1028 if bundler.nbparts <= 1:
1029 return
1029 return
1030 stream = util.chunkbuffer(bundler.getchunks())
1030 stream = util.chunkbuffer(bundler.getchunks())
1031 try:
1031 try:
1032 try:
1032 try:
1033 reply = pushop.remote.unbundle(
1033 reply = pushop.remote.unbundle(
1034 stream, ['force'], pushop.remote.url())
1034 stream, ['force'], pushop.remote.url())
1035 except error.BundleValueError as exc:
1035 except error.BundleValueError as exc:
1036 raise error.Abort(_('missing support for %s') % exc)
1036 raise error.Abort(_('missing support for %s') % exc)
1037 try:
1037 try:
1038 trgetter = None
1038 trgetter = None
1039 if pushback:
1039 if pushback:
1040 trgetter = pushop.trmanager.transaction
1040 trgetter = pushop.trmanager.transaction
1041 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1041 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1042 except error.BundleValueError as exc:
1042 except error.BundleValueError as exc:
1043 raise error.Abort(_('missing support for %s') % exc)
1043 raise error.Abort(_('missing support for %s') % exc)
1044 except bundle2.AbortFromPart as exc:
1044 except bundle2.AbortFromPart as exc:
1045 pushop.ui.status(_('remote: %s\n') % exc)
1045 pushop.ui.status(_('remote: %s\n') % exc)
1046 if exc.hint is not None:
1046 if exc.hint is not None:
1047 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1047 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1048 raise error.Abort(_('push failed on remote'))
1048 raise error.Abort(_('push failed on remote'))
1049 except error.PushkeyFailed as exc:
1049 except error.PushkeyFailed as exc:
1050 partid = int(exc.partid)
1050 partid = int(exc.partid)
1051 if partid not in pushop.pkfailcb:
1051 if partid not in pushop.pkfailcb:
1052 raise
1052 raise
1053 pushop.pkfailcb[partid](pushop, exc)
1053 pushop.pkfailcb[partid](pushop, exc)
1054 for rephand in replyhandlers:
1054 for rephand in replyhandlers:
1055 rephand(op)
1055 rephand(op)
1056
1056
1057 def _pushchangeset(pushop):
1057 def _pushchangeset(pushop):
1058 """Make the actual push of changeset bundle to remote repo"""
1058 """Make the actual push of changeset bundle to remote repo"""
1059 if 'changesets' in pushop.stepsdone:
1059 if 'changesets' in pushop.stepsdone:
1060 return
1060 return
1061 pushop.stepsdone.add('changesets')
1061 pushop.stepsdone.add('changesets')
1062 if not _pushcheckoutgoing(pushop):
1062 if not _pushcheckoutgoing(pushop):
1063 return
1063 return
1064
1064
1065 # Should have verified this in push().
1065 # Should have verified this in push().
1066 assert pushop.remote.capable('unbundle')
1066 assert pushop.remote.capable('unbundle')
1067
1067
1068 pushop.repo.prepushoutgoinghooks(pushop)
1068 pushop.repo.prepushoutgoinghooks(pushop)
1069 outgoing = pushop.outgoing
1069 outgoing = pushop.outgoing
1070 # TODO: get bundlecaps from remote
1070 # TODO: get bundlecaps from remote
1071 bundlecaps = None
1071 bundlecaps = None
1072 # create a changegroup from local
1072 # create a changegroup from local
1073 if pushop.revs is None and not (outgoing.excluded
1073 if pushop.revs is None and not (outgoing.excluded
1074 or pushop.repo.changelog.filteredrevs):
1074 or pushop.repo.changelog.filteredrevs):
1075 # push everything,
1075 # push everything,
1076 # use the fast path, no race possible on push
1076 # use the fast path, no race possible on push
1077 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1077 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1078 fastpath=True, bundlecaps=bundlecaps)
1078 fastpath=True, bundlecaps=bundlecaps)
1079 else:
1079 else:
1080 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1080 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1081 'push', bundlecaps=bundlecaps)
1081 'push', bundlecaps=bundlecaps)
1082
1082
1083 # apply changegroup to remote
1083 # apply changegroup to remote
1084 # local repo finds heads on server, finds out what
1084 # local repo finds heads on server, finds out what
1085 # revs it must push. once revs transferred, if server
1085 # revs it must push. once revs transferred, if server
1086 # finds it has different heads (someone else won
1086 # finds it has different heads (someone else won
1087 # commit/push race), server aborts.
1087 # commit/push race), server aborts.
1088 if pushop.force:
1088 if pushop.force:
1089 remoteheads = ['force']
1089 remoteheads = ['force']
1090 else:
1090 else:
1091 remoteheads = pushop.remoteheads
1091 remoteheads = pushop.remoteheads
1092 # ssh: return remote's addchangegroup()
1092 # ssh: return remote's addchangegroup()
1093 # http: return remote's addchangegroup() or 0 for error
1093 # http: return remote's addchangegroup() or 0 for error
1094 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1094 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1095 pushop.repo.url())
1095 pushop.repo.url())
1096
1096
1097 def _pushsyncphase(pushop):
1097 def _pushsyncphase(pushop):
1098 """synchronise phase information locally and remotely"""
1098 """synchronise phase information locally and remotely"""
1099 cheads = pushop.commonheads
1099 cheads = pushop.commonheads
1100 # even when we don't push, exchanging phase data is useful
1100 # even when we don't push, exchanging phase data is useful
1101 remotephases = pushop.remote.listkeys('phases')
1101 remotephases = pushop.remote.listkeys('phases')
1102 if (pushop.ui.configbool('ui', '_usedassubrepo')
1102 if (pushop.ui.configbool('ui', '_usedassubrepo')
1103 and remotephases # server supports phases
1103 and remotephases # server supports phases
1104 and pushop.cgresult is None # nothing was pushed
1104 and pushop.cgresult is None # nothing was pushed
1105 and remotephases.get('publishing', False)):
1105 and remotephases.get('publishing', False)):
1106 # When:
1106 # When:
1107 # - this is a subrepo push
1107 # - this is a subrepo push
1108 # - and remote support phase
1108 # - and remote support phase
1109 # - and no changeset was pushed
1109 # - and no changeset was pushed
1110 # - and remote is publishing
1110 # - and remote is publishing
1111 # We may be in issue 3871 case!
1111 # We may be in issue 3871 case!
1112 # We drop the possible phase synchronisation done by
1112 # We drop the possible phase synchronisation done by
1113 # courtesy to publish changesets possibly locally draft
1113 # courtesy to publish changesets possibly locally draft
1114 # on the remote.
1114 # on the remote.
1115 remotephases = {'publishing': 'True'}
1115 remotephases = {'publishing': 'True'}
1116 if not remotephases: # old server or public only reply from non-publishing
1116 if not remotephases: # old server or public only reply from non-publishing
1117 _localphasemove(pushop, cheads)
1117 _localphasemove(pushop, cheads)
1118 # don't push any phase data as there is nothing to push
1118 # don't push any phase data as there is nothing to push
1119 else:
1119 else:
1120 ana = phases.analyzeremotephases(pushop.repo, cheads,
1120 ana = phases.analyzeremotephases(pushop.repo, cheads,
1121 remotephases)
1121 remotephases)
1122 pheads, droots = ana
1122 pheads, droots = ana
1123 ### Apply remote phase on local
1123 ### Apply remote phase on local
1124 if remotephases.get('publishing', False):
1124 if remotephases.get('publishing', False):
1125 _localphasemove(pushop, cheads)
1125 _localphasemove(pushop, cheads)
1126 else: # publish = False
1126 else: # publish = False
1127 _localphasemove(pushop, pheads)
1127 _localphasemove(pushop, pheads)
1128 _localphasemove(pushop, cheads, phases.draft)
1128 _localphasemove(pushop, cheads, phases.draft)
1129 ### Apply local phase on remote
1129 ### Apply local phase on remote
1130
1130
1131 if pushop.cgresult:
1131 if pushop.cgresult:
1132 if 'phases' in pushop.stepsdone:
1132 if 'phases' in pushop.stepsdone:
1133 # phases already pushed though bundle2
1133 # phases already pushed though bundle2
1134 return
1134 return
1135 outdated = pushop.outdatedphases
1135 outdated = pushop.outdatedphases
1136 else:
1136 else:
1137 outdated = pushop.fallbackoutdatedphases
1137 outdated = pushop.fallbackoutdatedphases
1138
1138
1139 pushop.stepsdone.add('phases')
1139 pushop.stepsdone.add('phases')
1140
1140
1141 # filter heads already turned public by the push
1141 # filter heads already turned public by the push
1142 outdated = [c for c in outdated if c.node() not in pheads]
1142 outdated = [c for c in outdated if c.node() not in pheads]
1143 # fallback to independent pushkey command
1143 # fallback to independent pushkey command
1144 for newremotehead in outdated:
1144 for newremotehead in outdated:
1145 r = pushop.remote.pushkey('phases',
1145 r = pushop.remote.pushkey('phases',
1146 newremotehead.hex(),
1146 newremotehead.hex(),
1147 str(phases.draft),
1147 str(phases.draft),
1148 str(phases.public))
1148 str(phases.public))
1149 if not r:
1149 if not r:
1150 pushop.ui.warn(_('updating %s to public failed!\n')
1150 pushop.ui.warn(_('updating %s to public failed!\n')
1151 % newremotehead)
1151 % newremotehead)
1152
1152
1153 def _localphasemove(pushop, nodes, phase=phases.public):
1153 def _localphasemove(pushop, nodes, phase=phases.public):
1154 """move <nodes> to <phase> in the local source repo"""
1154 """move <nodes> to <phase> in the local source repo"""
1155 if pushop.trmanager:
1155 if pushop.trmanager:
1156 phases.advanceboundary(pushop.repo,
1156 phases.advanceboundary(pushop.repo,
1157 pushop.trmanager.transaction(),
1157 pushop.trmanager.transaction(),
1158 phase,
1158 phase,
1159 nodes)
1159 nodes)
1160 else:
1160 else:
1161 # repo is not locked, do not change any phases!
1161 # repo is not locked, do not change any phases!
1162 # Informs the user that phases should have been moved when
1162 # Informs the user that phases should have been moved when
1163 # applicable.
1163 # applicable.
1164 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1164 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1165 phasestr = phases.phasenames[phase]
1165 phasestr = phases.phasenames[phase]
1166 if actualmoves:
1166 if actualmoves:
1167 pushop.ui.status(_('cannot lock source repo, skipping '
1167 pushop.ui.status(_('cannot lock source repo, skipping '
1168 'local %s phase update\n') % phasestr)
1168 'local %s phase update\n') % phasestr)
1169
1169
1170 def _pushobsolete(pushop):
1170 def _pushobsolete(pushop):
1171 """utility function to push obsolete markers to a remote"""
1171 """utility function to push obsolete markers to a remote"""
1172 if 'obsmarkers' in pushop.stepsdone:
1172 if 'obsmarkers' in pushop.stepsdone:
1173 return
1173 return
1174 repo = pushop.repo
1174 repo = pushop.repo
1175 remote = pushop.remote
1175 remote = pushop.remote
1176 pushop.stepsdone.add('obsmarkers')
1176 pushop.stepsdone.add('obsmarkers')
1177 if pushop.outobsmarkers:
1177 if pushop.outobsmarkers:
1178 pushop.ui.debug('try to push obsolete markers to remote\n')
1178 pushop.ui.debug('try to push obsolete markers to remote\n')
1179 rslts = []
1179 rslts = []
1180 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1180 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1181 for key in sorted(remotedata, reverse=True):
1181 for key in sorted(remotedata, reverse=True):
1182 # reverse sort to ensure we end with dump0
1182 # reverse sort to ensure we end with dump0
1183 data = remotedata[key]
1183 data = remotedata[key]
1184 rslts.append(remote.pushkey('obsolete', key, '', data))
1184 rslts.append(remote.pushkey('obsolete', key, '', data))
1185 if [r for r in rslts if not r]:
1185 if [r for r in rslts if not r]:
1186 msg = _('failed to push some obsolete markers!\n')
1186 msg = _('failed to push some obsolete markers!\n')
1187 repo.ui.warn(msg)
1187 repo.ui.warn(msg)
1188
1188
1189 def _pushbookmark(pushop):
1189 def _pushbookmark(pushop):
1190 """Update bookmark position on remote"""
1190 """Update bookmark position on remote"""
1191 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1191 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1192 return
1192 return
1193 pushop.stepsdone.add('bookmarks')
1193 pushop.stepsdone.add('bookmarks')
1194 ui = pushop.ui
1194 ui = pushop.ui
1195 remote = pushop.remote
1195 remote = pushop.remote
1196
1196
1197 for b, old, new in pushop.outbookmarks:
1197 for b, old, new in pushop.outbookmarks:
1198 action = 'update'
1198 action = 'update'
1199 if not old:
1199 if not old:
1200 action = 'export'
1200 action = 'export'
1201 elif not new:
1201 elif not new:
1202 action = 'delete'
1202 action = 'delete'
1203 if remote.pushkey('bookmarks', b, old, new):
1203 if remote.pushkey('bookmarks', b, old, new):
1204 ui.status(bookmsgmap[action][0] % b)
1204 ui.status(bookmsgmap[action][0] % b)
1205 else:
1205 else:
1206 ui.warn(bookmsgmap[action][1] % b)
1206 ui.warn(bookmsgmap[action][1] % b)
1207 # discovery can have set the value form invalid entry
1207 # discovery can have set the value form invalid entry
1208 if pushop.bkresult is not None:
1208 if pushop.bkresult is not None:
1209 pushop.bkresult = 1
1209 pushop.bkresult = 1
1210
1210
1211 class pulloperation(object):
1211 class pulloperation(object):
1212 """A object that represent a single pull operation
1212 """A object that represent a single pull operation
1213
1213
1214 It purpose is to carry pull related state and very common operation.
1214 It purpose is to carry pull related state and very common operation.
1215
1215
1216 A new should be created at the beginning of each pull and discarded
1216 A new should be created at the beginning of each pull and discarded
1217 afterward.
1217 afterward.
1218 """
1218 """
1219
1219
1220 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1220 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1221 remotebookmarks=None, streamclonerequested=None):
1221 remotebookmarks=None, streamclonerequested=None):
1222 # repo we pull into
1222 # repo we pull into
1223 self.repo = repo
1223 self.repo = repo
1224 # repo we pull from
1224 # repo we pull from
1225 self.remote = remote
1225 self.remote = remote
1226 # revision we try to pull (None is "all")
1226 # revision we try to pull (None is "all")
1227 self.heads = heads
1227 self.heads = heads
1228 # bookmark pulled explicitly
1228 # bookmark pulled explicitly
1229 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1229 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1230 for bookmark in bookmarks]
1230 for bookmark in bookmarks]
1231 # do we force pull?
1231 # do we force pull?
1232 self.force = force
1232 self.force = force
1233 # whether a streaming clone was requested
1233 # whether a streaming clone was requested
1234 self.streamclonerequested = streamclonerequested
1234 self.streamclonerequested = streamclonerequested
1235 # transaction manager
1235 # transaction manager
1236 self.trmanager = None
1236 self.trmanager = None
1237 # set of common changeset between local and remote before pull
1237 # set of common changeset between local and remote before pull
1238 self.common = None
1238 self.common = None
1239 # set of pulled head
1239 # set of pulled head
1240 self.rheads = None
1240 self.rheads = None
1241 # list of missing changeset to fetch remotely
1241 # list of missing changeset to fetch remotely
1242 self.fetch = None
1242 self.fetch = None
1243 # remote bookmarks data
1243 # remote bookmarks data
1244 self.remotebookmarks = remotebookmarks
1244 self.remotebookmarks = remotebookmarks
1245 # result of changegroup pulling (used as return code by pull)
1245 # result of changegroup pulling (used as return code by pull)
1246 self.cgresult = None
1246 self.cgresult = None
1247 # list of step already done
1247 # list of step already done
1248 self.stepsdone = set()
1248 self.stepsdone = set()
1249 # Whether we attempted a clone from pre-generated bundles.
1249 # Whether we attempted a clone from pre-generated bundles.
1250 self.clonebundleattempted = False
1250 self.clonebundleattempted = False
1251
1251
1252 @util.propertycache
1252 @util.propertycache
1253 def pulledsubset(self):
1253 def pulledsubset(self):
1254 """heads of the set of changeset target by the pull"""
1254 """heads of the set of changeset target by the pull"""
1255 # compute target subset
1255 # compute target subset
1256 if self.heads is None:
1256 if self.heads is None:
1257 # We pulled every thing possible
1257 # We pulled every thing possible
1258 # sync on everything common
1258 # sync on everything common
1259 c = set(self.common)
1259 c = set(self.common)
1260 ret = list(self.common)
1260 ret = list(self.common)
1261 for n in self.rheads:
1261 for n in self.rheads:
1262 if n not in c:
1262 if n not in c:
1263 ret.append(n)
1263 ret.append(n)
1264 return ret
1264 return ret
1265 else:
1265 else:
1266 # We pulled a specific subset
1266 # We pulled a specific subset
1267 # sync on this subset
1267 # sync on this subset
1268 return self.heads
1268 return self.heads
1269
1269
1270 @util.propertycache
1270 @util.propertycache
1271 def canusebundle2(self):
1271 def canusebundle2(self):
1272 return not _forcebundle1(self)
1272 return not _forcebundle1(self)
1273
1273
1274 @util.propertycache
1274 @util.propertycache
1275 def remotebundle2caps(self):
1275 def remotebundle2caps(self):
1276 return bundle2.bundle2caps(self.remote)
1276 return bundle2.bundle2caps(self.remote)
1277
1277
1278 def gettransaction(self):
1278 def gettransaction(self):
1279 # deprecated; talk to trmanager directly
1279 # deprecated; talk to trmanager directly
1280 return self.trmanager.transaction()
1280 return self.trmanager.transaction()
1281
1281
1282 class transactionmanager(util.transactional):
1282 class transactionmanager(util.transactional):
1283 """An object to manage the life cycle of a transaction
1283 """An object to manage the life cycle of a transaction
1284
1284
1285 It creates the transaction on demand and calls the appropriate hooks when
1285 It creates the transaction on demand and calls the appropriate hooks when
1286 closing the transaction."""
1286 closing the transaction."""
1287 def __init__(self, repo, source, url):
1287 def __init__(self, repo, source, url):
1288 self.repo = repo
1288 self.repo = repo
1289 self.source = source
1289 self.source = source
1290 self.url = url
1290 self.url = url
1291 self._tr = None
1291 self._tr = None
1292
1292
1293 def transaction(self):
1293 def transaction(self):
1294 """Return an open transaction object, constructing if necessary"""
1294 """Return an open transaction object, constructing if necessary"""
1295 if not self._tr:
1295 if not self._tr:
1296 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1296 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1297 self._tr = self.repo.transaction(trname)
1297 self._tr = self.repo.transaction(trname)
1298 self._tr.hookargs['source'] = self.source
1298 self._tr.hookargs['source'] = self.source
1299 self._tr.hookargs['url'] = self.url
1299 self._tr.hookargs['url'] = self.url
1300 return self._tr
1300 return self._tr
1301
1301
1302 def close(self):
1302 def close(self):
1303 """close transaction if created"""
1303 """close transaction if created"""
1304 if self._tr is not None:
1304 if self._tr is not None:
1305 self._tr.close()
1305 self._tr.close()
1306
1306
1307 def release(self):
1307 def release(self):
1308 """release transaction if created"""
1308 """release transaction if created"""
1309 if self._tr is not None:
1309 if self._tr is not None:
1310 self._tr.release()
1310 self._tr.release()
1311
1311
1312 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1312 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1313 streamclonerequested=None):
1313 streamclonerequested=None):
1314 """Fetch repository data from a remote.
1314 """Fetch repository data from a remote.
1315
1315
1316 This is the main function used to retrieve data from a remote repository.
1316 This is the main function used to retrieve data from a remote repository.
1317
1317
1318 ``repo`` is the local repository to clone into.
1318 ``repo`` is the local repository to clone into.
1319 ``remote`` is a peer instance.
1319 ``remote`` is a peer instance.
1320 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1320 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1321 default) means to pull everything from the remote.
1321 default) means to pull everything from the remote.
1322 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1322 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1323 default, all remote bookmarks are pulled.
1323 default, all remote bookmarks are pulled.
1324 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1324 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1325 initialization.
1325 initialization.
1326 ``streamclonerequested`` is a boolean indicating whether a "streaming
1326 ``streamclonerequested`` is a boolean indicating whether a "streaming
1327 clone" is requested. A "streaming clone" is essentially a raw file copy
1327 clone" is requested. A "streaming clone" is essentially a raw file copy
1328 of revlogs from the server. This only works when the local repository is
1328 of revlogs from the server. This only works when the local repository is
1329 empty. The default value of ``None`` means to respect the server
1329 empty. The default value of ``None`` means to respect the server
1330 configuration for preferring stream clones.
1330 configuration for preferring stream clones.
1331
1331
1332 Returns the ``pulloperation`` created for this pull.
1332 Returns the ``pulloperation`` created for this pull.
1333 """
1333 """
1334 if opargs is None:
1334 if opargs is None:
1335 opargs = {}
1335 opargs = {}
1336 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1336 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1337 streamclonerequested=streamclonerequested,
1337 streamclonerequested=streamclonerequested,
1338 **pycompat.strkwargs(opargs))
1338 **pycompat.strkwargs(opargs))
1339
1339
1340 peerlocal = pullop.remote.local()
1340 peerlocal = pullop.remote.local()
1341 if peerlocal:
1341 if peerlocal:
1342 missing = set(peerlocal.requirements) - pullop.repo.supported
1342 missing = set(peerlocal.requirements) - pullop.repo.supported
1343 if missing:
1343 if missing:
1344 msg = _("required features are not"
1344 msg = _("required features are not"
1345 " supported in the destination:"
1345 " supported in the destination:"
1346 " %s") % (', '.join(sorted(missing)))
1346 " %s") % (', '.join(sorted(missing)))
1347 raise error.Abort(msg)
1347 raise error.Abort(msg)
1348
1348
1349 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1349 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1350 with repo.wlock(), repo.lock(), pullop.trmanager:
1350 with repo.wlock(), repo.lock(), pullop.trmanager:
1351 # This should ideally be in _pullbundle2(). However, it needs to run
1351 # This should ideally be in _pullbundle2(). However, it needs to run
1352 # before discovery to avoid extra work.
1352 # before discovery to avoid extra work.
1353 _maybeapplyclonebundle(pullop)
1353 _maybeapplyclonebundle(pullop)
1354 streamclone.maybeperformlegacystreamclone(pullop)
1354 streamclone.maybeperformlegacystreamclone(pullop)
1355 _pulldiscovery(pullop)
1355 _pulldiscovery(pullop)
1356 if pullop.canusebundle2:
1356 if pullop.canusebundle2:
1357 _pullbundle2(pullop)
1357 _pullbundle2(pullop)
1358 _pullchangeset(pullop)
1358 _pullchangeset(pullop)
1359 _pullphase(pullop)
1359 _pullphase(pullop)
1360 _pullbookmarks(pullop)
1360 _pullbookmarks(pullop)
1361 _pullobsolete(pullop)
1361 _pullobsolete(pullop)
1362
1362
1363 # storing remotenames
1363 # storing remotenames
1364 if repo.ui.configbool('experimental', 'remotenames'):
1364 if repo.ui.configbool('experimental', 'remotenames'):
1365 logexchange.pullremotenames(repo, remote)
1365 logexchange.pullremotenames(repo, remote)
1366
1366
1367 return pullop
1367 return pullop
1368
1368
1369 # list of steps to perform discovery before pull
1369 # list of steps to perform discovery before pull
1370 pulldiscoveryorder = []
1370 pulldiscoveryorder = []
1371
1371
1372 # Mapping between step name and function
1372 # Mapping between step name and function
1373 #
1373 #
1374 # This exists to help extensions wrap steps if necessary
1374 # This exists to help extensions wrap steps if necessary
1375 pulldiscoverymapping = {}
1375 pulldiscoverymapping = {}
1376
1376
1377 def pulldiscovery(stepname):
1377 def pulldiscovery(stepname):
1378 """decorator for function performing discovery before pull
1378 """decorator for function performing discovery before pull
1379
1379
1380 The function is added to the step -> function mapping and appended to the
1380 The function is added to the step -> function mapping and appended to the
1381 list of steps. Beware that decorated function will be added in order (this
1381 list of steps. Beware that decorated function will be added in order (this
1382 may matter).
1382 may matter).
1383
1383
1384 You can only use this decorator for a new step, if you want to wrap a step
1384 You can only use this decorator for a new step, if you want to wrap a step
1385 from an extension, change the pulldiscovery dictionary directly."""
1385 from an extension, change the pulldiscovery dictionary directly."""
1386 def dec(func):
1386 def dec(func):
1387 assert stepname not in pulldiscoverymapping
1387 assert stepname not in pulldiscoverymapping
1388 pulldiscoverymapping[stepname] = func
1388 pulldiscoverymapping[stepname] = func
1389 pulldiscoveryorder.append(stepname)
1389 pulldiscoveryorder.append(stepname)
1390 return func
1390 return func
1391 return dec
1391 return dec
1392
1392
1393 def _pulldiscovery(pullop):
1393 def _pulldiscovery(pullop):
1394 """Run all discovery steps"""
1394 """Run all discovery steps"""
1395 for stepname in pulldiscoveryorder:
1395 for stepname in pulldiscoveryorder:
1396 step = pulldiscoverymapping[stepname]
1396 step = pulldiscoverymapping[stepname]
1397 step(pullop)
1397 step(pullop)
1398
1398
1399 @pulldiscovery('b1:bookmarks')
1399 @pulldiscovery('b1:bookmarks')
1400 def _pullbookmarkbundle1(pullop):
1400 def _pullbookmarkbundle1(pullop):
1401 """fetch bookmark data in bundle1 case
1401 """fetch bookmark data in bundle1 case
1402
1402
1403 If not using bundle2, we have to fetch bookmarks before changeset
1403 If not using bundle2, we have to fetch bookmarks before changeset
1404 discovery to reduce the chance and impact of race conditions."""
1404 discovery to reduce the chance and impact of race conditions."""
1405 if pullop.remotebookmarks is not None:
1405 if pullop.remotebookmarks is not None:
1406 return
1406 return
1407 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1407 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1408 # all known bundle2 servers now support listkeys, but lets be nice with
1408 # all known bundle2 servers now support listkeys, but lets be nice with
1409 # new implementation.
1409 # new implementation.
1410 return
1410 return
1411 books = pullop.remote.listkeys('bookmarks')
1411 books = pullop.remote.listkeys('bookmarks')
1412 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1412 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1413
1413
1414
1414
1415 @pulldiscovery('changegroup')
1415 @pulldiscovery('changegroup')
1416 def _pulldiscoverychangegroup(pullop):
1416 def _pulldiscoverychangegroup(pullop):
1417 """discovery phase for the pull
1417 """discovery phase for the pull
1418
1418
1419 Current handle changeset discovery only, will change handle all discovery
1419 Current handle changeset discovery only, will change handle all discovery
1420 at some point."""
1420 at some point."""
1421 tmp = discovery.findcommonincoming(pullop.repo,
1421 tmp = discovery.findcommonincoming(pullop.repo,
1422 pullop.remote,
1422 pullop.remote,
1423 heads=pullop.heads,
1423 heads=pullop.heads,
1424 force=pullop.force)
1424 force=pullop.force)
1425 common, fetch, rheads = tmp
1425 common, fetch, rheads = tmp
1426 nm = pullop.repo.unfiltered().changelog.nodemap
1426 nm = pullop.repo.unfiltered().changelog.nodemap
1427 if fetch and rheads:
1427 if fetch and rheads:
1428 # If a remote heads is filtered locally, put in back in common.
1428 # If a remote heads is filtered locally, put in back in common.
1429 #
1429 #
1430 # This is a hackish solution to catch most of "common but locally
1430 # This is a hackish solution to catch most of "common but locally
1431 # hidden situation". We do not performs discovery on unfiltered
1431 # hidden situation". We do not performs discovery on unfiltered
1432 # repository because it end up doing a pathological amount of round
1432 # repository because it end up doing a pathological amount of round
1433 # trip for w huge amount of changeset we do not care about.
1433 # trip for w huge amount of changeset we do not care about.
1434 #
1434 #
1435 # If a set of such "common but filtered" changeset exist on the server
1435 # If a set of such "common but filtered" changeset exist on the server
1436 # but are not including a remote heads, we'll not be able to detect it,
1436 # but are not including a remote heads, we'll not be able to detect it,
1437 scommon = set(common)
1437 scommon = set(common)
1438 for n in rheads:
1438 for n in rheads:
1439 if n in nm:
1439 if n in nm:
1440 if n not in scommon:
1440 if n not in scommon:
1441 common.append(n)
1441 common.append(n)
1442 if set(rheads).issubset(set(common)):
1442 if set(rheads).issubset(set(common)):
1443 fetch = []
1443 fetch = []
1444 pullop.common = common
1444 pullop.common = common
1445 pullop.fetch = fetch
1445 pullop.fetch = fetch
1446 pullop.rheads = rheads
1446 pullop.rheads = rheads
1447
1447
1448 def _pullbundle2(pullop):
1448 def _pullbundle2(pullop):
1449 """pull data using bundle2
1449 """pull data using bundle2
1450
1450
1451 For now, the only supported data are changegroup."""
1451 For now, the only supported data are changegroup."""
1452 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1452 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1453
1453
1454 # make ui easier to access
1454 # make ui easier to access
1455 ui = pullop.repo.ui
1455 ui = pullop.repo.ui
1456
1456
1457 # At the moment we don't do stream clones over bundle2. If that is
1457 # At the moment we don't do stream clones over bundle2. If that is
1458 # implemented then here's where the check for that will go.
1458 # implemented then here's where the check for that will go.
1459 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1459 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1460
1460
1461 # declare pull perimeters
1461 # declare pull perimeters
1462 kwargs['common'] = pullop.common
1462 kwargs['common'] = pullop.common
1463 kwargs['heads'] = pullop.heads or pullop.rheads
1463 kwargs['heads'] = pullop.heads or pullop.rheads
1464
1464
1465 if streaming:
1465 if streaming:
1466 kwargs['cg'] = False
1466 kwargs['cg'] = False
1467 kwargs['stream'] = True
1467 kwargs['stream'] = True
1468 pullop.stepsdone.add('changegroup')
1468 pullop.stepsdone.add('changegroup')
1469 pullop.stepsdone.add('phases')
1469 pullop.stepsdone.add('phases')
1470
1470
1471 else:
1471 else:
1472 # pulling changegroup
1472 # pulling changegroup
1473 pullop.stepsdone.add('changegroup')
1473 pullop.stepsdone.add('changegroup')
1474
1474
1475 kwargs['cg'] = pullop.fetch
1475 kwargs['cg'] = pullop.fetch
1476
1476
1477 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1477 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1478 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1478 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1479 if (not legacyphase and hasbinaryphase):
1479 if (not legacyphase and hasbinaryphase):
1480 kwargs['phases'] = True
1480 kwargs['phases'] = True
1481 pullop.stepsdone.add('phases')
1481 pullop.stepsdone.add('phases')
1482
1482
1483 if 'listkeys' in pullop.remotebundle2caps:
1483 if 'listkeys' in pullop.remotebundle2caps:
1484 if 'phases' not in pullop.stepsdone:
1484 if 'phases' not in pullop.stepsdone:
1485 kwargs['listkeys'] = ['phases']
1485 kwargs['listkeys'] = ['phases']
1486
1486
1487 bookmarksrequested = False
1487 bookmarksrequested = False
1488 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1488 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1489 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1489 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1490
1490
1491 if pullop.remotebookmarks is not None:
1491 if pullop.remotebookmarks is not None:
1492 pullop.stepsdone.add('request-bookmarks')
1492 pullop.stepsdone.add('request-bookmarks')
1493
1493
1494 if ('request-bookmarks' not in pullop.stepsdone
1494 if ('request-bookmarks' not in pullop.stepsdone
1495 and pullop.remotebookmarks is None
1495 and pullop.remotebookmarks is None
1496 and not legacybookmark and hasbinarybook):
1496 and not legacybookmark and hasbinarybook):
1497 kwargs['bookmarks'] = True
1497 kwargs['bookmarks'] = True
1498 bookmarksrequested = True
1498 bookmarksrequested = True
1499
1499
1500 if 'listkeys' in pullop.remotebundle2caps:
1500 if 'listkeys' in pullop.remotebundle2caps:
1501 if 'request-bookmarks' not in pullop.stepsdone:
1501 if 'request-bookmarks' not in pullop.stepsdone:
1502 # make sure to always includes bookmark data when migrating
1502 # make sure to always includes bookmark data when migrating
1503 # `hg incoming --bundle` to using this function.
1503 # `hg incoming --bundle` to using this function.
1504 pullop.stepsdone.add('request-bookmarks')
1504 pullop.stepsdone.add('request-bookmarks')
1505 kwargs.setdefault('listkeys', []).append('bookmarks')
1505 kwargs.setdefault('listkeys', []).append('bookmarks')
1506
1506
1507 # If this is a full pull / clone and the server supports the clone bundles
1507 # If this is a full pull / clone and the server supports the clone bundles
1508 # feature, tell the server whether we attempted a clone bundle. The
1508 # feature, tell the server whether we attempted a clone bundle. The
1509 # presence of this flag indicates the client supports clone bundles. This
1509 # presence of this flag indicates the client supports clone bundles. This
1510 # will enable the server to treat clients that support clone bundles
1510 # will enable the server to treat clients that support clone bundles
1511 # differently from those that don't.
1511 # differently from those that don't.
1512 if (pullop.remote.capable('clonebundles')
1512 if (pullop.remote.capable('clonebundles')
1513 and pullop.heads is None and list(pullop.common) == [nullid]):
1513 and pullop.heads is None and list(pullop.common) == [nullid]):
1514 kwargs['cbattempted'] = pullop.clonebundleattempted
1514 kwargs['cbattempted'] = pullop.clonebundleattempted
1515
1515
1516 if streaming:
1516 if streaming:
1517 pullop.repo.ui.status(_('streaming all changes\n'))
1517 pullop.repo.ui.status(_('streaming all changes\n'))
1518 elif not pullop.fetch:
1518 elif not pullop.fetch:
1519 pullop.repo.ui.status(_("no changes found\n"))
1519 pullop.repo.ui.status(_("no changes found\n"))
1520 pullop.cgresult = 0
1520 pullop.cgresult = 0
1521 else:
1521 else:
1522 if pullop.heads is None and list(pullop.common) == [nullid]:
1522 if pullop.heads is None and list(pullop.common) == [nullid]:
1523 pullop.repo.ui.status(_("requesting all changes\n"))
1523 pullop.repo.ui.status(_("requesting all changes\n"))
1524 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1524 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1525 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1525 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1526 if obsolete.commonversion(remoteversions) is not None:
1526 if obsolete.commonversion(remoteversions) is not None:
1527 kwargs['obsmarkers'] = True
1527 kwargs['obsmarkers'] = True
1528 pullop.stepsdone.add('obsmarkers')
1528 pullop.stepsdone.add('obsmarkers')
1529 _pullbundle2extraprepare(pullop, kwargs)
1529 _pullbundle2extraprepare(pullop, kwargs)
1530 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1530 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1531 try:
1531 try:
1532 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction)
1532 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction)
1533 op.modes['bookmarks'] = 'records'
1533 op.modes['bookmarks'] = 'records'
1534 bundle2.processbundle(pullop.repo, bundle, op=op)
1534 bundle2.processbundle(pullop.repo, bundle, op=op)
1535 except bundle2.AbortFromPart as exc:
1535 except bundle2.AbortFromPart as exc:
1536 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1536 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1537 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1537 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1538 except error.BundleValueError as exc:
1538 except error.BundleValueError as exc:
1539 raise error.Abort(_('missing support for %s') % exc)
1539 raise error.Abort(_('missing support for %s') % exc)
1540
1540
1541 if pullop.fetch:
1541 if pullop.fetch:
1542 pullop.cgresult = bundle2.combinechangegroupresults(op)
1542 pullop.cgresult = bundle2.combinechangegroupresults(op)
1543
1543
1544 # processing phases change
1544 # processing phases change
1545 for namespace, value in op.records['listkeys']:
1545 for namespace, value in op.records['listkeys']:
1546 if namespace == 'phases':
1546 if namespace == 'phases':
1547 _pullapplyphases(pullop, value)
1547 _pullapplyphases(pullop, value)
1548
1548
1549 # processing bookmark update
1549 # processing bookmark update
1550 if bookmarksrequested:
1550 if bookmarksrequested:
1551 books = {}
1551 books = {}
1552 for record in op.records['bookmarks']:
1552 for record in op.records['bookmarks']:
1553 books[record['bookmark']] = record["node"]
1553 books[record['bookmark']] = record["node"]
1554 pullop.remotebookmarks = books
1554 pullop.remotebookmarks = books
1555 else:
1555 else:
1556 for namespace, value in op.records['listkeys']:
1556 for namespace, value in op.records['listkeys']:
1557 if namespace == 'bookmarks':
1557 if namespace == 'bookmarks':
1558 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1558 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1559
1559
1560 # bookmark data were either already there or pulled in the bundle
1560 # bookmark data were either already there or pulled in the bundle
1561 if pullop.remotebookmarks is not None:
1561 if pullop.remotebookmarks is not None:
1562 _pullbookmarks(pullop)
1562 _pullbookmarks(pullop)
1563
1563
1564 def _pullbundle2extraprepare(pullop, kwargs):
1564 def _pullbundle2extraprepare(pullop, kwargs):
1565 """hook function so that extensions can extend the getbundle call"""
1565 """hook function so that extensions can extend the getbundle call"""
1566
1566
1567 def _pullchangeset(pullop):
1567 def _pullchangeset(pullop):
1568 """pull changeset from unbundle into the local repo"""
1568 """pull changeset from unbundle into the local repo"""
1569 # We delay the open of the transaction as late as possible so we
1569 # We delay the open of the transaction as late as possible so we
1570 # don't open transaction for nothing or you break future useful
1570 # don't open transaction for nothing or you break future useful
1571 # rollback call
1571 # rollback call
1572 if 'changegroup' in pullop.stepsdone:
1572 if 'changegroup' in pullop.stepsdone:
1573 return
1573 return
1574 pullop.stepsdone.add('changegroup')
1574 pullop.stepsdone.add('changegroup')
1575 if not pullop.fetch:
1575 if not pullop.fetch:
1576 pullop.repo.ui.status(_("no changes found\n"))
1576 pullop.repo.ui.status(_("no changes found\n"))
1577 pullop.cgresult = 0
1577 pullop.cgresult = 0
1578 return
1578 return
1579 tr = pullop.gettransaction()
1579 tr = pullop.gettransaction()
1580 if pullop.heads is None and list(pullop.common) == [nullid]:
1580 if pullop.heads is None and list(pullop.common) == [nullid]:
1581 pullop.repo.ui.status(_("requesting all changes\n"))
1581 pullop.repo.ui.status(_("requesting all changes\n"))
1582 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1582 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1583 # issue1320, avoid a race if remote changed after discovery
1583 # issue1320, avoid a race if remote changed after discovery
1584 pullop.heads = pullop.rheads
1584 pullop.heads = pullop.rheads
1585
1585
1586 if pullop.remote.capable('getbundle'):
1586 if pullop.remote.capable('getbundle'):
1587 # TODO: get bundlecaps from remote
1587 # TODO: get bundlecaps from remote
1588 cg = pullop.remote.getbundle('pull', common=pullop.common,
1588 cg = pullop.remote.getbundle('pull', common=pullop.common,
1589 heads=pullop.heads or pullop.rheads)
1589 heads=pullop.heads or pullop.rheads)
1590 elif pullop.heads is None:
1590 elif pullop.heads is None:
1591 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1591 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1592 elif not pullop.remote.capable('changegroupsubset'):
1592 elif not pullop.remote.capable('changegroupsubset'):
1593 raise error.Abort(_("partial pull cannot be done because "
1593 raise error.Abort(_("partial pull cannot be done because "
1594 "other repository doesn't support "
1594 "other repository doesn't support "
1595 "changegroupsubset."))
1595 "changegroupsubset."))
1596 else:
1596 else:
1597 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1597 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1598 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1598 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1599 pullop.remote.url())
1599 pullop.remote.url())
1600 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1600 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1601
1601
1602 def _pullphase(pullop):
1602 def _pullphase(pullop):
1603 # Get remote phases data from remote
1603 # Get remote phases data from remote
1604 if 'phases' in pullop.stepsdone:
1604 if 'phases' in pullop.stepsdone:
1605 return
1605 return
1606 remotephases = pullop.remote.listkeys('phases')
1606 remotephases = pullop.remote.listkeys('phases')
1607 _pullapplyphases(pullop, remotephases)
1607 _pullapplyphases(pullop, remotephases)
1608
1608
1609 def _pullapplyphases(pullop, remotephases):
1609 def _pullapplyphases(pullop, remotephases):
1610 """apply phase movement from observed remote state"""
1610 """apply phase movement from observed remote state"""
1611 if 'phases' in pullop.stepsdone:
1611 if 'phases' in pullop.stepsdone:
1612 return
1612 return
1613 pullop.stepsdone.add('phases')
1613 pullop.stepsdone.add('phases')
1614 publishing = bool(remotephases.get('publishing', False))
1614 publishing = bool(remotephases.get('publishing', False))
1615 if remotephases and not publishing:
1615 if remotephases and not publishing:
1616 # remote is new and non-publishing
1616 # remote is new and non-publishing
1617 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1617 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1618 pullop.pulledsubset,
1618 pullop.pulledsubset,
1619 remotephases)
1619 remotephases)
1620 dheads = pullop.pulledsubset
1620 dheads = pullop.pulledsubset
1621 else:
1621 else:
1622 # Remote is old or publishing all common changesets
1622 # Remote is old or publishing all common changesets
1623 # should be seen as public
1623 # should be seen as public
1624 pheads = pullop.pulledsubset
1624 pheads = pullop.pulledsubset
1625 dheads = []
1625 dheads = []
1626 unfi = pullop.repo.unfiltered()
1626 unfi = pullop.repo.unfiltered()
1627 phase = unfi._phasecache.phase
1627 phase = unfi._phasecache.phase
1628 rev = unfi.changelog.nodemap.get
1628 rev = unfi.changelog.nodemap.get
1629 public = phases.public
1629 public = phases.public
1630 draft = phases.draft
1630 draft = phases.draft
1631
1631
1632 # exclude changesets already public locally and update the others
1632 # exclude changesets already public locally and update the others
1633 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1633 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1634 if pheads:
1634 if pheads:
1635 tr = pullop.gettransaction()
1635 tr = pullop.gettransaction()
1636 phases.advanceboundary(pullop.repo, tr, public, pheads)
1636 phases.advanceboundary(pullop.repo, tr, public, pheads)
1637
1637
1638 # exclude changesets already draft locally and update the others
1638 # exclude changesets already draft locally and update the others
1639 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1639 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1640 if dheads:
1640 if dheads:
1641 tr = pullop.gettransaction()
1641 tr = pullop.gettransaction()
1642 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1642 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1643
1643
1644 def _pullbookmarks(pullop):
1644 def _pullbookmarks(pullop):
1645 """process the remote bookmark information to update the local one"""
1645 """process the remote bookmark information to update the local one"""
1646 if 'bookmarks' in pullop.stepsdone:
1646 if 'bookmarks' in pullop.stepsdone:
1647 return
1647 return
1648 pullop.stepsdone.add('bookmarks')
1648 pullop.stepsdone.add('bookmarks')
1649 repo = pullop.repo
1649 repo = pullop.repo
1650 remotebookmarks = pullop.remotebookmarks
1650 remotebookmarks = pullop.remotebookmarks
1651 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1651 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1652 pullop.remote.url(),
1652 pullop.remote.url(),
1653 pullop.gettransaction,
1653 pullop.gettransaction,
1654 explicit=pullop.explicitbookmarks)
1654 explicit=pullop.explicitbookmarks)
1655
1655
1656 def _pullobsolete(pullop):
1656 def _pullobsolete(pullop):
1657 """utility function to pull obsolete markers from a remote
1657 """utility function to pull obsolete markers from a remote
1658
1658
1659 The `gettransaction` is function that return the pull transaction, creating
1659 The `gettransaction` is function that return the pull transaction, creating
1660 one if necessary. We return the transaction to inform the calling code that
1660 one if necessary. We return the transaction to inform the calling code that
1661 a new transaction have been created (when applicable).
1661 a new transaction have been created (when applicable).
1662
1662
1663 Exists mostly to allow overriding for experimentation purpose"""
1663 Exists mostly to allow overriding for experimentation purpose"""
1664 if 'obsmarkers' in pullop.stepsdone:
1664 if 'obsmarkers' in pullop.stepsdone:
1665 return
1665 return
1666 pullop.stepsdone.add('obsmarkers')
1666 pullop.stepsdone.add('obsmarkers')
1667 tr = None
1667 tr = None
1668 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1668 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1669 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1669 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1670 remoteobs = pullop.remote.listkeys('obsolete')
1670 remoteobs = pullop.remote.listkeys('obsolete')
1671 if 'dump0' in remoteobs:
1671 if 'dump0' in remoteobs:
1672 tr = pullop.gettransaction()
1672 tr = pullop.gettransaction()
1673 markers = []
1673 markers = []
1674 for key in sorted(remoteobs, reverse=True):
1674 for key in sorted(remoteobs, reverse=True):
1675 if key.startswith('dump'):
1675 if key.startswith('dump'):
1676 data = util.b85decode(remoteobs[key])
1676 data = util.b85decode(remoteobs[key])
1677 version, newmarks = obsolete._readmarkers(data)
1677 version, newmarks = obsolete._readmarkers(data)
1678 markers += newmarks
1678 markers += newmarks
1679 if markers:
1679 if markers:
1680 pullop.repo.obsstore.add(tr, markers)
1680 pullop.repo.obsstore.add(tr, markers)
1681 pullop.repo.invalidatevolatilesets()
1681 pullop.repo.invalidatevolatilesets()
1682 return tr
1682 return tr
1683
1683
1684 def caps20to10(repo, role):
1684 def caps20to10(repo, role):
1685 """return a set with appropriate options to use bundle20 during getbundle"""
1685 """return a set with appropriate options to use bundle20 during getbundle"""
1686 caps = {'HG20'}
1686 caps = {'HG20'}
1687 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
1687 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
1688 caps.add('bundle2=' + urlreq.quote(capsblob))
1688 caps.add('bundle2=' + urlreq.quote(capsblob))
1689 return caps
1689 return caps
1690
1690
1691 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1691 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1692 getbundle2partsorder = []
1692 getbundle2partsorder = []
1693
1693
1694 # Mapping between step name and function
1694 # Mapping between step name and function
1695 #
1695 #
1696 # This exists to help extensions wrap steps if necessary
1696 # This exists to help extensions wrap steps if necessary
1697 getbundle2partsmapping = {}
1697 getbundle2partsmapping = {}
1698
1698
1699 def getbundle2partsgenerator(stepname, idx=None):
1699 def getbundle2partsgenerator(stepname, idx=None):
1700 """decorator for function generating bundle2 part for getbundle
1700 """decorator for function generating bundle2 part for getbundle
1701
1701
1702 The function is added to the step -> function mapping and appended to the
1702 The function is added to the step -> function mapping and appended to the
1703 list of steps. Beware that decorated functions will be added in order
1703 list of steps. Beware that decorated functions will be added in order
1704 (this may matter).
1704 (this may matter).
1705
1705
1706 You can only use this decorator for new steps, if you want to wrap a step
1706 You can only use this decorator for new steps, if you want to wrap a step
1707 from an extension, attack the getbundle2partsmapping dictionary directly."""
1707 from an extension, attack the getbundle2partsmapping dictionary directly."""
1708 def dec(func):
1708 def dec(func):
1709 assert stepname not in getbundle2partsmapping
1709 assert stepname not in getbundle2partsmapping
1710 getbundle2partsmapping[stepname] = func
1710 getbundle2partsmapping[stepname] = func
1711 if idx is None:
1711 if idx is None:
1712 getbundle2partsorder.append(stepname)
1712 getbundle2partsorder.append(stepname)
1713 else:
1713 else:
1714 getbundle2partsorder.insert(idx, stepname)
1714 getbundle2partsorder.insert(idx, stepname)
1715 return func
1715 return func
1716 return dec
1716 return dec
1717
1717
1718 def bundle2requested(bundlecaps):
1718 def bundle2requested(bundlecaps):
1719 if bundlecaps is not None:
1719 if bundlecaps is not None:
1720 return any(cap.startswith('HG2') for cap in bundlecaps)
1720 return any(cap.startswith('HG2') for cap in bundlecaps)
1721 return False
1721 return False
1722
1722
1723 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1723 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1724 **kwargs):
1724 **kwargs):
1725 """Return chunks constituting a bundle's raw data.
1725 """Return chunks constituting a bundle's raw data.
1726
1726
1727 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1727 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1728 passed.
1728 passed.
1729
1729
1730 Returns a 2-tuple of a dict with metadata about the generated bundle
1730 Returns a 2-tuple of a dict with metadata about the generated bundle
1731 and an iterator over raw chunks (of varying sizes).
1731 and an iterator over raw chunks (of varying sizes).
1732 """
1732 """
1733 kwargs = pycompat.byteskwargs(kwargs)
1733 kwargs = pycompat.byteskwargs(kwargs)
1734 info = {}
1734 info = {}
1735 usebundle2 = bundle2requested(bundlecaps)
1735 usebundle2 = bundle2requested(bundlecaps)
1736 # bundle10 case
1736 # bundle10 case
1737 if not usebundle2:
1737 if not usebundle2:
1738 if bundlecaps and not kwargs.get('cg', True):
1738 if bundlecaps and not kwargs.get('cg', True):
1739 raise ValueError(_('request for bundle10 must include changegroup'))
1739 raise ValueError(_('request for bundle10 must include changegroup'))
1740
1740
1741 if kwargs:
1741 if kwargs:
1742 raise ValueError(_('unsupported getbundle arguments: %s')
1742 raise ValueError(_('unsupported getbundle arguments: %s')
1743 % ', '.join(sorted(kwargs.keys())))
1743 % ', '.join(sorted(kwargs.keys())))
1744 outgoing = _computeoutgoing(repo, heads, common)
1744 outgoing = _computeoutgoing(repo, heads, common)
1745 info['bundleversion'] = 1
1745 info['bundleversion'] = 1
1746 return info, changegroup.makestream(repo, outgoing, '01', source,
1746 return info, changegroup.makestream(repo, outgoing, '01', source,
1747 bundlecaps=bundlecaps)
1747 bundlecaps=bundlecaps)
1748
1748
1749 # bundle20 case
1749 # bundle20 case
1750 info['bundleversion'] = 2
1750 info['bundleversion'] = 2
1751 b2caps = {}
1751 b2caps = {}
1752 for bcaps in bundlecaps:
1752 for bcaps in bundlecaps:
1753 if bcaps.startswith('bundle2='):
1753 if bcaps.startswith('bundle2='):
1754 blob = urlreq.unquote(bcaps[len('bundle2='):])
1754 blob = urlreq.unquote(bcaps[len('bundle2='):])
1755 b2caps.update(bundle2.decodecaps(blob))
1755 b2caps.update(bundle2.decodecaps(blob))
1756 bundler = bundle2.bundle20(repo.ui, b2caps)
1756 bundler = bundle2.bundle20(repo.ui, b2caps)
1757
1757
1758 kwargs['heads'] = heads
1758 kwargs['heads'] = heads
1759 kwargs['common'] = common
1759 kwargs['common'] = common
1760
1760
1761 for name in getbundle2partsorder:
1761 for name in getbundle2partsorder:
1762 func = getbundle2partsmapping[name]
1762 func = getbundle2partsmapping[name]
1763 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1763 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1764 **pycompat.strkwargs(kwargs))
1764 **pycompat.strkwargs(kwargs))
1765
1765
1766 info['prefercompressed'] = bundler.prefercompressed
1766 info['prefercompressed'] = bundler.prefercompressed
1767
1767
1768 return info, bundler.getchunks()
1768 return info, bundler.getchunks()
1769
1769
1770 @getbundle2partsgenerator('stream2')
1770 @getbundle2partsgenerator('stream2')
1771 def _getbundlestream2(bundler, repo, source, bundlecaps=None,
1771 def _getbundlestream2(bundler, repo, source, bundlecaps=None,
1772 b2caps=None, heads=None, common=None, **kwargs):
1772 b2caps=None, heads=None, common=None, **kwargs):
1773 if not kwargs.get('stream', False):
1773 if not kwargs.get('stream', False):
1774 return
1774 return
1775
1775
1776 if not streamclone.allowservergeneration(repo):
1777 raise error.Abort(_('stream data requested but server does not allow '
1778 'this feature'),
1779 hint=_('well-behaved clients should not be '
1780 'requesting stream data from servers not '
1781 'advertising it; the client may be buggy'))
1782
1776 # Stream clones don't compress well. And compression undermines a
1783 # Stream clones don't compress well. And compression undermines a
1777 # goal of stream clones, which is to be fast. Communicate the desire
1784 # goal of stream clones, which is to be fast. Communicate the desire
1778 # to avoid compression to consumers of the bundle.
1785 # to avoid compression to consumers of the bundle.
1779 bundler.prefercompressed = False
1786 bundler.prefercompressed = False
1780
1787
1781 filecount, bytecount, it = streamclone.generatev2(repo)
1788 filecount, bytecount, it = streamclone.generatev2(repo)
1782 requirements = ' '.join(sorted(repo.requirements))
1789 requirements = ' '.join(sorted(repo.requirements))
1783 part = bundler.newpart('stream2', data=it)
1790 part = bundler.newpart('stream2', data=it)
1784 part.addparam('bytecount', '%d' % bytecount, mandatory=True)
1791 part.addparam('bytecount', '%d' % bytecount, mandatory=True)
1785 part.addparam('filecount', '%d' % filecount, mandatory=True)
1792 part.addparam('filecount', '%d' % filecount, mandatory=True)
1786 part.addparam('requirements', requirements, mandatory=True)
1793 part.addparam('requirements', requirements, mandatory=True)
1787
1794
1788 @getbundle2partsgenerator('changegroup')
1795 @getbundle2partsgenerator('changegroup')
1789 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1796 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1790 b2caps=None, heads=None, common=None, **kwargs):
1797 b2caps=None, heads=None, common=None, **kwargs):
1791 """add a changegroup part to the requested bundle"""
1798 """add a changegroup part to the requested bundle"""
1792 cgstream = None
1799 cgstream = None
1793 if kwargs.get(r'cg', True):
1800 if kwargs.get(r'cg', True):
1794 # build changegroup bundle here.
1801 # build changegroup bundle here.
1795 version = '01'
1802 version = '01'
1796 cgversions = b2caps.get('changegroup')
1803 cgversions = b2caps.get('changegroup')
1797 if cgversions: # 3.1 and 3.2 ship with an empty value
1804 if cgversions: # 3.1 and 3.2 ship with an empty value
1798 cgversions = [v for v in cgversions
1805 cgversions = [v for v in cgversions
1799 if v in changegroup.supportedoutgoingversions(repo)]
1806 if v in changegroup.supportedoutgoingversions(repo)]
1800 if not cgversions:
1807 if not cgversions:
1801 raise ValueError(_('no common changegroup version'))
1808 raise ValueError(_('no common changegroup version'))
1802 version = max(cgversions)
1809 version = max(cgversions)
1803 outgoing = _computeoutgoing(repo, heads, common)
1810 outgoing = _computeoutgoing(repo, heads, common)
1804 if outgoing.missing:
1811 if outgoing.missing:
1805 cgstream = changegroup.makestream(repo, outgoing, version, source,
1812 cgstream = changegroup.makestream(repo, outgoing, version, source,
1806 bundlecaps=bundlecaps)
1813 bundlecaps=bundlecaps)
1807
1814
1808 if cgstream:
1815 if cgstream:
1809 part = bundler.newpart('changegroup', data=cgstream)
1816 part = bundler.newpart('changegroup', data=cgstream)
1810 if cgversions:
1817 if cgversions:
1811 part.addparam('version', version)
1818 part.addparam('version', version)
1812 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1819 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1813 mandatory=False)
1820 mandatory=False)
1814 if 'treemanifest' in repo.requirements:
1821 if 'treemanifest' in repo.requirements:
1815 part.addparam('treemanifest', '1')
1822 part.addparam('treemanifest', '1')
1816
1823
1817 @getbundle2partsgenerator('bookmarks')
1824 @getbundle2partsgenerator('bookmarks')
1818 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
1825 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
1819 b2caps=None, **kwargs):
1826 b2caps=None, **kwargs):
1820 """add a bookmark part to the requested bundle"""
1827 """add a bookmark part to the requested bundle"""
1821 if not kwargs.get(r'bookmarks', False):
1828 if not kwargs.get(r'bookmarks', False):
1822 return
1829 return
1823 if 'bookmarks' not in b2caps:
1830 if 'bookmarks' not in b2caps:
1824 raise ValueError(_('no common bookmarks exchange method'))
1831 raise ValueError(_('no common bookmarks exchange method'))
1825 books = bookmod.listbinbookmarks(repo)
1832 books = bookmod.listbinbookmarks(repo)
1826 data = bookmod.binaryencode(books)
1833 data = bookmod.binaryencode(books)
1827 if data:
1834 if data:
1828 bundler.newpart('bookmarks', data=data)
1835 bundler.newpart('bookmarks', data=data)
1829
1836
1830 @getbundle2partsgenerator('listkeys')
1837 @getbundle2partsgenerator('listkeys')
1831 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1838 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1832 b2caps=None, **kwargs):
1839 b2caps=None, **kwargs):
1833 """add parts containing listkeys namespaces to the requested bundle"""
1840 """add parts containing listkeys namespaces to the requested bundle"""
1834 listkeys = kwargs.get(r'listkeys', ())
1841 listkeys = kwargs.get(r'listkeys', ())
1835 for namespace in listkeys:
1842 for namespace in listkeys:
1836 part = bundler.newpart('listkeys')
1843 part = bundler.newpart('listkeys')
1837 part.addparam('namespace', namespace)
1844 part.addparam('namespace', namespace)
1838 keys = repo.listkeys(namespace).items()
1845 keys = repo.listkeys(namespace).items()
1839 part.data = pushkey.encodekeys(keys)
1846 part.data = pushkey.encodekeys(keys)
1840
1847
1841 @getbundle2partsgenerator('obsmarkers')
1848 @getbundle2partsgenerator('obsmarkers')
1842 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1849 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1843 b2caps=None, heads=None, **kwargs):
1850 b2caps=None, heads=None, **kwargs):
1844 """add an obsolescence markers part to the requested bundle"""
1851 """add an obsolescence markers part to the requested bundle"""
1845 if kwargs.get(r'obsmarkers', False):
1852 if kwargs.get(r'obsmarkers', False):
1846 if heads is None:
1853 if heads is None:
1847 heads = repo.heads()
1854 heads = repo.heads()
1848 subset = [c.node() for c in repo.set('::%ln', heads)]
1855 subset = [c.node() for c in repo.set('::%ln', heads)]
1849 markers = repo.obsstore.relevantmarkers(subset)
1856 markers = repo.obsstore.relevantmarkers(subset)
1850 markers = sorted(markers)
1857 markers = sorted(markers)
1851 bundle2.buildobsmarkerspart(bundler, markers)
1858 bundle2.buildobsmarkerspart(bundler, markers)
1852
1859
1853 @getbundle2partsgenerator('phases')
1860 @getbundle2partsgenerator('phases')
1854 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1861 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1855 b2caps=None, heads=None, **kwargs):
1862 b2caps=None, heads=None, **kwargs):
1856 """add phase heads part to the requested bundle"""
1863 """add phase heads part to the requested bundle"""
1857 if kwargs.get(r'phases', False):
1864 if kwargs.get(r'phases', False):
1858 if not 'heads' in b2caps.get('phases'):
1865 if not 'heads' in b2caps.get('phases'):
1859 raise ValueError(_('no common phases exchange method'))
1866 raise ValueError(_('no common phases exchange method'))
1860 if heads is None:
1867 if heads is None:
1861 heads = repo.heads()
1868 heads = repo.heads()
1862
1869
1863 headsbyphase = collections.defaultdict(set)
1870 headsbyphase = collections.defaultdict(set)
1864 if repo.publishing():
1871 if repo.publishing():
1865 headsbyphase[phases.public] = heads
1872 headsbyphase[phases.public] = heads
1866 else:
1873 else:
1867 # find the appropriate heads to move
1874 # find the appropriate heads to move
1868
1875
1869 phase = repo._phasecache.phase
1876 phase = repo._phasecache.phase
1870 node = repo.changelog.node
1877 node = repo.changelog.node
1871 rev = repo.changelog.rev
1878 rev = repo.changelog.rev
1872 for h in heads:
1879 for h in heads:
1873 headsbyphase[phase(repo, rev(h))].add(h)
1880 headsbyphase[phase(repo, rev(h))].add(h)
1874 seenphases = list(headsbyphase.keys())
1881 seenphases = list(headsbyphase.keys())
1875
1882
1876 # We do not handle anything but public and draft phase for now)
1883 # We do not handle anything but public and draft phase for now)
1877 if seenphases:
1884 if seenphases:
1878 assert max(seenphases) <= phases.draft
1885 assert max(seenphases) <= phases.draft
1879
1886
1880 # if client is pulling non-public changesets, we need to find
1887 # if client is pulling non-public changesets, we need to find
1881 # intermediate public heads.
1888 # intermediate public heads.
1882 draftheads = headsbyphase.get(phases.draft, set())
1889 draftheads = headsbyphase.get(phases.draft, set())
1883 if draftheads:
1890 if draftheads:
1884 publicheads = headsbyphase.get(phases.public, set())
1891 publicheads = headsbyphase.get(phases.public, set())
1885
1892
1886 revset = 'heads(only(%ln, %ln) and public())'
1893 revset = 'heads(only(%ln, %ln) and public())'
1887 extraheads = repo.revs(revset, draftheads, publicheads)
1894 extraheads = repo.revs(revset, draftheads, publicheads)
1888 for r in extraheads:
1895 for r in extraheads:
1889 headsbyphase[phases.public].add(node(r))
1896 headsbyphase[phases.public].add(node(r))
1890
1897
1891 # transform data in a format used by the encoding function
1898 # transform data in a format used by the encoding function
1892 phasemapping = []
1899 phasemapping = []
1893 for phase in phases.allphases:
1900 for phase in phases.allphases:
1894 phasemapping.append(sorted(headsbyphase[phase]))
1901 phasemapping.append(sorted(headsbyphase[phase]))
1895
1902
1896 # generate the actual part
1903 # generate the actual part
1897 phasedata = phases.binaryencode(phasemapping)
1904 phasedata = phases.binaryencode(phasemapping)
1898 bundler.newpart('phase-heads', data=phasedata)
1905 bundler.newpart('phase-heads', data=phasedata)
1899
1906
1900 @getbundle2partsgenerator('hgtagsfnodes')
1907 @getbundle2partsgenerator('hgtagsfnodes')
1901 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1908 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1902 b2caps=None, heads=None, common=None,
1909 b2caps=None, heads=None, common=None,
1903 **kwargs):
1910 **kwargs):
1904 """Transfer the .hgtags filenodes mapping.
1911 """Transfer the .hgtags filenodes mapping.
1905
1912
1906 Only values for heads in this bundle will be transferred.
1913 Only values for heads in this bundle will be transferred.
1907
1914
1908 The part data consists of pairs of 20 byte changeset node and .hgtags
1915 The part data consists of pairs of 20 byte changeset node and .hgtags
1909 filenodes raw values.
1916 filenodes raw values.
1910 """
1917 """
1911 # Don't send unless:
1918 # Don't send unless:
1912 # - changeset are being exchanged,
1919 # - changeset are being exchanged,
1913 # - the client supports it.
1920 # - the client supports it.
1914 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
1921 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
1915 return
1922 return
1916
1923
1917 outgoing = _computeoutgoing(repo, heads, common)
1924 outgoing = _computeoutgoing(repo, heads, common)
1918 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1925 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1919
1926
1920 def check_heads(repo, their_heads, context):
1927 def check_heads(repo, their_heads, context):
1921 """check if the heads of a repo have been modified
1928 """check if the heads of a repo have been modified
1922
1929
1923 Used by peer for unbundling.
1930 Used by peer for unbundling.
1924 """
1931 """
1925 heads = repo.heads()
1932 heads = repo.heads()
1926 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1933 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1927 if not (their_heads == ['force'] or their_heads == heads or
1934 if not (their_heads == ['force'] or their_heads == heads or
1928 their_heads == ['hashed', heads_hash]):
1935 their_heads == ['hashed', heads_hash]):
1929 # someone else committed/pushed/unbundled while we
1936 # someone else committed/pushed/unbundled while we
1930 # were transferring data
1937 # were transferring data
1931 raise error.PushRaced('repository changed while %s - '
1938 raise error.PushRaced('repository changed while %s - '
1932 'please try again' % context)
1939 'please try again' % context)
1933
1940
1934 def unbundle(repo, cg, heads, source, url):
1941 def unbundle(repo, cg, heads, source, url):
1935 """Apply a bundle to a repo.
1942 """Apply a bundle to a repo.
1936
1943
1937 this function makes sure the repo is locked during the application and have
1944 this function makes sure the repo is locked during the application and have
1938 mechanism to check that no push race occurred between the creation of the
1945 mechanism to check that no push race occurred between the creation of the
1939 bundle and its application.
1946 bundle and its application.
1940
1947
1941 If the push was raced as PushRaced exception is raised."""
1948 If the push was raced as PushRaced exception is raised."""
1942 r = 0
1949 r = 0
1943 # need a transaction when processing a bundle2 stream
1950 # need a transaction when processing a bundle2 stream
1944 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1951 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1945 lockandtr = [None, None, None]
1952 lockandtr = [None, None, None]
1946 recordout = None
1953 recordout = None
1947 # quick fix for output mismatch with bundle2 in 3.4
1954 # quick fix for output mismatch with bundle2 in 3.4
1948 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
1955 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
1949 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1956 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1950 captureoutput = True
1957 captureoutput = True
1951 try:
1958 try:
1952 # note: outside bundle1, 'heads' is expected to be empty and this
1959 # note: outside bundle1, 'heads' is expected to be empty and this
1953 # 'check_heads' call wil be a no-op
1960 # 'check_heads' call wil be a no-op
1954 check_heads(repo, heads, 'uploading changes')
1961 check_heads(repo, heads, 'uploading changes')
1955 # push can proceed
1962 # push can proceed
1956 if not isinstance(cg, bundle2.unbundle20):
1963 if not isinstance(cg, bundle2.unbundle20):
1957 # legacy case: bundle1 (changegroup 01)
1964 # legacy case: bundle1 (changegroup 01)
1958 txnname = "\n".join([source, util.hidepassword(url)])
1965 txnname = "\n".join([source, util.hidepassword(url)])
1959 with repo.lock(), repo.transaction(txnname) as tr:
1966 with repo.lock(), repo.transaction(txnname) as tr:
1960 op = bundle2.applybundle(repo, cg, tr, source, url)
1967 op = bundle2.applybundle(repo, cg, tr, source, url)
1961 r = bundle2.combinechangegroupresults(op)
1968 r = bundle2.combinechangegroupresults(op)
1962 else:
1969 else:
1963 r = None
1970 r = None
1964 try:
1971 try:
1965 def gettransaction():
1972 def gettransaction():
1966 if not lockandtr[2]:
1973 if not lockandtr[2]:
1967 lockandtr[0] = repo.wlock()
1974 lockandtr[0] = repo.wlock()
1968 lockandtr[1] = repo.lock()
1975 lockandtr[1] = repo.lock()
1969 lockandtr[2] = repo.transaction(source)
1976 lockandtr[2] = repo.transaction(source)
1970 lockandtr[2].hookargs['source'] = source
1977 lockandtr[2].hookargs['source'] = source
1971 lockandtr[2].hookargs['url'] = url
1978 lockandtr[2].hookargs['url'] = url
1972 lockandtr[2].hookargs['bundle2'] = '1'
1979 lockandtr[2].hookargs['bundle2'] = '1'
1973 return lockandtr[2]
1980 return lockandtr[2]
1974
1981
1975 # Do greedy locking by default until we're satisfied with lazy
1982 # Do greedy locking by default until we're satisfied with lazy
1976 # locking.
1983 # locking.
1977 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1984 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1978 gettransaction()
1985 gettransaction()
1979
1986
1980 op = bundle2.bundleoperation(repo, gettransaction,
1987 op = bundle2.bundleoperation(repo, gettransaction,
1981 captureoutput=captureoutput)
1988 captureoutput=captureoutput)
1982 try:
1989 try:
1983 op = bundle2.processbundle(repo, cg, op=op)
1990 op = bundle2.processbundle(repo, cg, op=op)
1984 finally:
1991 finally:
1985 r = op.reply
1992 r = op.reply
1986 if captureoutput and r is not None:
1993 if captureoutput and r is not None:
1987 repo.ui.pushbuffer(error=True, subproc=True)
1994 repo.ui.pushbuffer(error=True, subproc=True)
1988 def recordout(output):
1995 def recordout(output):
1989 r.newpart('output', data=output, mandatory=False)
1996 r.newpart('output', data=output, mandatory=False)
1990 if lockandtr[2] is not None:
1997 if lockandtr[2] is not None:
1991 lockandtr[2].close()
1998 lockandtr[2].close()
1992 except BaseException as exc:
1999 except BaseException as exc:
1993 exc.duringunbundle2 = True
2000 exc.duringunbundle2 = True
1994 if captureoutput and r is not None:
2001 if captureoutput and r is not None:
1995 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2002 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1996 def recordout(output):
2003 def recordout(output):
1997 part = bundle2.bundlepart('output', data=output,
2004 part = bundle2.bundlepart('output', data=output,
1998 mandatory=False)
2005 mandatory=False)
1999 parts.append(part)
2006 parts.append(part)
2000 raise
2007 raise
2001 finally:
2008 finally:
2002 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2009 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2003 if recordout is not None:
2010 if recordout is not None:
2004 recordout(repo.ui.popbuffer())
2011 recordout(repo.ui.popbuffer())
2005 return r
2012 return r
2006
2013
2007 def _maybeapplyclonebundle(pullop):
2014 def _maybeapplyclonebundle(pullop):
2008 """Apply a clone bundle from a remote, if possible."""
2015 """Apply a clone bundle from a remote, if possible."""
2009
2016
2010 repo = pullop.repo
2017 repo = pullop.repo
2011 remote = pullop.remote
2018 remote = pullop.remote
2012
2019
2013 if not repo.ui.configbool('ui', 'clonebundles'):
2020 if not repo.ui.configbool('ui', 'clonebundles'):
2014 return
2021 return
2015
2022
2016 # Only run if local repo is empty.
2023 # Only run if local repo is empty.
2017 if len(repo):
2024 if len(repo):
2018 return
2025 return
2019
2026
2020 if pullop.heads:
2027 if pullop.heads:
2021 return
2028 return
2022
2029
2023 if not remote.capable('clonebundles'):
2030 if not remote.capable('clonebundles'):
2024 return
2031 return
2025
2032
2026 res = remote._call('clonebundles')
2033 res = remote._call('clonebundles')
2027
2034
2028 # If we call the wire protocol command, that's good enough to record the
2035 # If we call the wire protocol command, that's good enough to record the
2029 # attempt.
2036 # attempt.
2030 pullop.clonebundleattempted = True
2037 pullop.clonebundleattempted = True
2031
2038
2032 entries = parseclonebundlesmanifest(repo, res)
2039 entries = parseclonebundlesmanifest(repo, res)
2033 if not entries:
2040 if not entries:
2034 repo.ui.note(_('no clone bundles available on remote; '
2041 repo.ui.note(_('no clone bundles available on remote; '
2035 'falling back to regular clone\n'))
2042 'falling back to regular clone\n'))
2036 return
2043 return
2037
2044
2038 entries = filterclonebundleentries(
2045 entries = filterclonebundleentries(
2039 repo, entries, streamclonerequested=pullop.streamclonerequested)
2046 repo, entries, streamclonerequested=pullop.streamclonerequested)
2040
2047
2041 if not entries:
2048 if not entries:
2042 # There is a thundering herd concern here. However, if a server
2049 # There is a thundering herd concern here. However, if a server
2043 # operator doesn't advertise bundles appropriate for its clients,
2050 # operator doesn't advertise bundles appropriate for its clients,
2044 # they deserve what's coming. Furthermore, from a client's
2051 # they deserve what's coming. Furthermore, from a client's
2045 # perspective, no automatic fallback would mean not being able to
2052 # perspective, no automatic fallback would mean not being able to
2046 # clone!
2053 # clone!
2047 repo.ui.warn(_('no compatible clone bundles available on server; '
2054 repo.ui.warn(_('no compatible clone bundles available on server; '
2048 'falling back to regular clone\n'))
2055 'falling back to regular clone\n'))
2049 repo.ui.warn(_('(you may want to report this to the server '
2056 repo.ui.warn(_('(you may want to report this to the server '
2050 'operator)\n'))
2057 'operator)\n'))
2051 return
2058 return
2052
2059
2053 entries = sortclonebundleentries(repo.ui, entries)
2060 entries = sortclonebundleentries(repo.ui, entries)
2054
2061
2055 url = entries[0]['URL']
2062 url = entries[0]['URL']
2056 repo.ui.status(_('applying clone bundle from %s\n') % url)
2063 repo.ui.status(_('applying clone bundle from %s\n') % url)
2057 if trypullbundlefromurl(repo.ui, repo, url):
2064 if trypullbundlefromurl(repo.ui, repo, url):
2058 repo.ui.status(_('finished applying clone bundle\n'))
2065 repo.ui.status(_('finished applying clone bundle\n'))
2059 # Bundle failed.
2066 # Bundle failed.
2060 #
2067 #
2061 # We abort by default to avoid the thundering herd of
2068 # We abort by default to avoid the thundering herd of
2062 # clients flooding a server that was expecting expensive
2069 # clients flooding a server that was expecting expensive
2063 # clone load to be offloaded.
2070 # clone load to be offloaded.
2064 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2071 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2065 repo.ui.warn(_('falling back to normal clone\n'))
2072 repo.ui.warn(_('falling back to normal clone\n'))
2066 else:
2073 else:
2067 raise error.Abort(_('error applying bundle'),
2074 raise error.Abort(_('error applying bundle'),
2068 hint=_('if this error persists, consider contacting '
2075 hint=_('if this error persists, consider contacting '
2069 'the server operator or disable clone '
2076 'the server operator or disable clone '
2070 'bundles via '
2077 'bundles via '
2071 '"--config ui.clonebundles=false"'))
2078 '"--config ui.clonebundles=false"'))
2072
2079
2073 def parseclonebundlesmanifest(repo, s):
2080 def parseclonebundlesmanifest(repo, s):
2074 """Parses the raw text of a clone bundles manifest.
2081 """Parses the raw text of a clone bundles manifest.
2075
2082
2076 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2083 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2077 to the URL and other keys are the attributes for the entry.
2084 to the URL and other keys are the attributes for the entry.
2078 """
2085 """
2079 m = []
2086 m = []
2080 for line in s.splitlines():
2087 for line in s.splitlines():
2081 fields = line.split()
2088 fields = line.split()
2082 if not fields:
2089 if not fields:
2083 continue
2090 continue
2084 attrs = {'URL': fields[0]}
2091 attrs = {'URL': fields[0]}
2085 for rawattr in fields[1:]:
2092 for rawattr in fields[1:]:
2086 key, value = rawattr.split('=', 1)
2093 key, value = rawattr.split('=', 1)
2087 key = urlreq.unquote(key)
2094 key = urlreq.unquote(key)
2088 value = urlreq.unquote(value)
2095 value = urlreq.unquote(value)
2089 attrs[key] = value
2096 attrs[key] = value
2090
2097
2091 # Parse BUNDLESPEC into components. This makes client-side
2098 # Parse BUNDLESPEC into components. This makes client-side
2092 # preferences easier to specify since you can prefer a single
2099 # preferences easier to specify since you can prefer a single
2093 # component of the BUNDLESPEC.
2100 # component of the BUNDLESPEC.
2094 if key == 'BUNDLESPEC':
2101 if key == 'BUNDLESPEC':
2095 try:
2102 try:
2096 comp, version, params = parsebundlespec(repo, value,
2103 comp, version, params = parsebundlespec(repo, value,
2097 externalnames=True)
2104 externalnames=True)
2098 attrs['COMPRESSION'] = comp
2105 attrs['COMPRESSION'] = comp
2099 attrs['VERSION'] = version
2106 attrs['VERSION'] = version
2100 except error.InvalidBundleSpecification:
2107 except error.InvalidBundleSpecification:
2101 pass
2108 pass
2102 except error.UnsupportedBundleSpecification:
2109 except error.UnsupportedBundleSpecification:
2103 pass
2110 pass
2104
2111
2105 m.append(attrs)
2112 m.append(attrs)
2106
2113
2107 return m
2114 return m
2108
2115
2109 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2116 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2110 """Remove incompatible clone bundle manifest entries.
2117 """Remove incompatible clone bundle manifest entries.
2111
2118
2112 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2119 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2113 and returns a new list consisting of only the entries that this client
2120 and returns a new list consisting of only the entries that this client
2114 should be able to apply.
2121 should be able to apply.
2115
2122
2116 There is no guarantee we'll be able to apply all returned entries because
2123 There is no guarantee we'll be able to apply all returned entries because
2117 the metadata we use to filter on may be missing or wrong.
2124 the metadata we use to filter on may be missing or wrong.
2118 """
2125 """
2119 newentries = []
2126 newentries = []
2120 for entry in entries:
2127 for entry in entries:
2121 spec = entry.get('BUNDLESPEC')
2128 spec = entry.get('BUNDLESPEC')
2122 if spec:
2129 if spec:
2123 try:
2130 try:
2124 comp, version, params = parsebundlespec(repo, spec, strict=True)
2131 comp, version, params = parsebundlespec(repo, spec, strict=True)
2125
2132
2126 # If a stream clone was requested, filter out non-streamclone
2133 # If a stream clone was requested, filter out non-streamclone
2127 # entries.
2134 # entries.
2128 if streamclonerequested and (comp != 'UN' or version != 's1'):
2135 if streamclonerequested and (comp != 'UN' or version != 's1'):
2129 repo.ui.debug('filtering %s because not a stream clone\n' %
2136 repo.ui.debug('filtering %s because not a stream clone\n' %
2130 entry['URL'])
2137 entry['URL'])
2131 continue
2138 continue
2132
2139
2133 except error.InvalidBundleSpecification as e:
2140 except error.InvalidBundleSpecification as e:
2134 repo.ui.debug(str(e) + '\n')
2141 repo.ui.debug(str(e) + '\n')
2135 continue
2142 continue
2136 except error.UnsupportedBundleSpecification as e:
2143 except error.UnsupportedBundleSpecification as e:
2137 repo.ui.debug('filtering %s because unsupported bundle '
2144 repo.ui.debug('filtering %s because unsupported bundle '
2138 'spec: %s\n' % (entry['URL'], str(e)))
2145 'spec: %s\n' % (entry['URL'], str(e)))
2139 continue
2146 continue
2140 # If we don't have a spec and requested a stream clone, we don't know
2147 # If we don't have a spec and requested a stream clone, we don't know
2141 # what the entry is so don't attempt to apply it.
2148 # what the entry is so don't attempt to apply it.
2142 elif streamclonerequested:
2149 elif streamclonerequested:
2143 repo.ui.debug('filtering %s because cannot determine if a stream '
2150 repo.ui.debug('filtering %s because cannot determine if a stream '
2144 'clone bundle\n' % entry['URL'])
2151 'clone bundle\n' % entry['URL'])
2145 continue
2152 continue
2146
2153
2147 if 'REQUIRESNI' in entry and not sslutil.hassni:
2154 if 'REQUIRESNI' in entry and not sslutil.hassni:
2148 repo.ui.debug('filtering %s because SNI not supported\n' %
2155 repo.ui.debug('filtering %s because SNI not supported\n' %
2149 entry['URL'])
2156 entry['URL'])
2150 continue
2157 continue
2151
2158
2152 newentries.append(entry)
2159 newentries.append(entry)
2153
2160
2154 return newentries
2161 return newentries
2155
2162
2156 class clonebundleentry(object):
2163 class clonebundleentry(object):
2157 """Represents an item in a clone bundles manifest.
2164 """Represents an item in a clone bundles manifest.
2158
2165
2159 This rich class is needed to support sorting since sorted() in Python 3
2166 This rich class is needed to support sorting since sorted() in Python 3
2160 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2167 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2161 won't work.
2168 won't work.
2162 """
2169 """
2163
2170
2164 def __init__(self, value, prefers):
2171 def __init__(self, value, prefers):
2165 self.value = value
2172 self.value = value
2166 self.prefers = prefers
2173 self.prefers = prefers
2167
2174
2168 def _cmp(self, other):
2175 def _cmp(self, other):
2169 for prefkey, prefvalue in self.prefers:
2176 for prefkey, prefvalue in self.prefers:
2170 avalue = self.value.get(prefkey)
2177 avalue = self.value.get(prefkey)
2171 bvalue = other.value.get(prefkey)
2178 bvalue = other.value.get(prefkey)
2172
2179
2173 # Special case for b missing attribute and a matches exactly.
2180 # Special case for b missing attribute and a matches exactly.
2174 if avalue is not None and bvalue is None and avalue == prefvalue:
2181 if avalue is not None and bvalue is None and avalue == prefvalue:
2175 return -1
2182 return -1
2176
2183
2177 # Special case for a missing attribute and b matches exactly.
2184 # Special case for a missing attribute and b matches exactly.
2178 if bvalue is not None and avalue is None and bvalue == prefvalue:
2185 if bvalue is not None and avalue is None and bvalue == prefvalue:
2179 return 1
2186 return 1
2180
2187
2181 # We can't compare unless attribute present on both.
2188 # We can't compare unless attribute present on both.
2182 if avalue is None or bvalue is None:
2189 if avalue is None or bvalue is None:
2183 continue
2190 continue
2184
2191
2185 # Same values should fall back to next attribute.
2192 # Same values should fall back to next attribute.
2186 if avalue == bvalue:
2193 if avalue == bvalue:
2187 continue
2194 continue
2188
2195
2189 # Exact matches come first.
2196 # Exact matches come first.
2190 if avalue == prefvalue:
2197 if avalue == prefvalue:
2191 return -1
2198 return -1
2192 if bvalue == prefvalue:
2199 if bvalue == prefvalue:
2193 return 1
2200 return 1
2194
2201
2195 # Fall back to next attribute.
2202 # Fall back to next attribute.
2196 continue
2203 continue
2197
2204
2198 # If we got here we couldn't sort by attributes and prefers. Fall
2205 # If we got here we couldn't sort by attributes and prefers. Fall
2199 # back to index order.
2206 # back to index order.
2200 return 0
2207 return 0
2201
2208
2202 def __lt__(self, other):
2209 def __lt__(self, other):
2203 return self._cmp(other) < 0
2210 return self._cmp(other) < 0
2204
2211
2205 def __gt__(self, other):
2212 def __gt__(self, other):
2206 return self._cmp(other) > 0
2213 return self._cmp(other) > 0
2207
2214
2208 def __eq__(self, other):
2215 def __eq__(self, other):
2209 return self._cmp(other) == 0
2216 return self._cmp(other) == 0
2210
2217
2211 def __le__(self, other):
2218 def __le__(self, other):
2212 return self._cmp(other) <= 0
2219 return self._cmp(other) <= 0
2213
2220
2214 def __ge__(self, other):
2221 def __ge__(self, other):
2215 return self._cmp(other) >= 0
2222 return self._cmp(other) >= 0
2216
2223
2217 def __ne__(self, other):
2224 def __ne__(self, other):
2218 return self._cmp(other) != 0
2225 return self._cmp(other) != 0
2219
2226
2220 def sortclonebundleentries(ui, entries):
2227 def sortclonebundleentries(ui, entries):
2221 prefers = ui.configlist('ui', 'clonebundleprefers')
2228 prefers = ui.configlist('ui', 'clonebundleprefers')
2222 if not prefers:
2229 if not prefers:
2223 return list(entries)
2230 return list(entries)
2224
2231
2225 prefers = [p.split('=', 1) for p in prefers]
2232 prefers = [p.split('=', 1) for p in prefers]
2226
2233
2227 items = sorted(clonebundleentry(v, prefers) for v in entries)
2234 items = sorted(clonebundleentry(v, prefers) for v in entries)
2228 return [i.value for i in items]
2235 return [i.value for i in items]
2229
2236
2230 def trypullbundlefromurl(ui, repo, url):
2237 def trypullbundlefromurl(ui, repo, url):
2231 """Attempt to apply a bundle from a URL."""
2238 """Attempt to apply a bundle from a URL."""
2232 with repo.lock(), repo.transaction('bundleurl') as tr:
2239 with repo.lock(), repo.transaction('bundleurl') as tr:
2233 try:
2240 try:
2234 fh = urlmod.open(ui, url)
2241 fh = urlmod.open(ui, url)
2235 cg = readbundle(ui, fh, 'stream')
2242 cg = readbundle(ui, fh, 'stream')
2236
2243
2237 if isinstance(cg, streamclone.streamcloneapplier):
2244 if isinstance(cg, streamclone.streamcloneapplier):
2238 cg.apply(repo)
2245 cg.apply(repo)
2239 else:
2246 else:
2240 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2247 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2241 return True
2248 return True
2242 except urlerr.httperror as e:
2249 except urlerr.httperror as e:
2243 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2250 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2244 except urlerr.urlerror as e:
2251 except urlerr.urlerror as e:
2245 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2252 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2246
2253
2247 return False
2254 return False
@@ -1,506 +1,506 b''
1 #require serve
1 #require serve
2
2
3 #testcases stream-legacy stream-bundle2
3 #testcases stream-legacy stream-bundle2
4
4
5 #if stream-bundle2
5 #if stream-bundle2
6 $ cat << EOF >> $HGRCPATH
6 $ cat << EOF >> $HGRCPATH
7 > [experimental]
7 > [experimental]
8 > bundle2.stream = yes
8 > bundle2.stream = yes
9 > EOF
9 > EOF
10 #endif
10 #endif
11
11
12 Initialize repository
12 Initialize repository
13 the status call is to check for issue5130
13 the status call is to check for issue5130
14
14
15 $ hg init server
15 $ hg init server
16 $ cd server
16 $ cd server
17 $ touch foo
17 $ touch foo
18 $ hg -q commit -A -m initial
18 $ hg -q commit -A -m initial
19 >>> for i in range(1024):
19 >>> for i in range(1024):
20 ... with open(str(i), 'wb') as fh:
20 ... with open(str(i), 'wb') as fh:
21 ... fh.write(str(i))
21 ... fh.write(str(i))
22 $ hg -q commit -A -m 'add a lot of files'
22 $ hg -q commit -A -m 'add a lot of files'
23 $ hg st
23 $ hg st
24 $ hg --config server.uncompressed=false serve -p $HGPORT -d --pid-file=hg.pid
24 $ hg --config server.uncompressed=false serve -p $HGPORT -d --pid-file=hg.pid
25 $ cat hg.pid > $DAEMON_PIDS
25 $ cat hg.pid > $DAEMON_PIDS
26 $ cd ..
26 $ cd ..
27
27
28 Cannot stream clone when server.uncompressed is set
28 Cannot stream clone when server.uncompressed is set
29
29
30 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=stream_out'
30 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=stream_out'
31 200 Script output follows
31 200 Script output follows
32
32
33 1
33 1
34
34
35 #if stream-legacy
35 #if stream-legacy
36 $ hg debugcapabilities http://localhost:$HGPORT
36 $ hg debugcapabilities http://localhost:$HGPORT
37 Main capabilities:
37 Main capabilities:
38 batch
38 batch
39 branchmap
39 branchmap
40 $USUAL_BUNDLE2_CAPS$
40 $USUAL_BUNDLE2_CAPS$
41 changegroupsubset
41 changegroupsubset
42 compression=zstd,zlib
42 compression=zstd,zlib
43 getbundle
43 getbundle
44 httpheader=1024
44 httpheader=1024
45 httpmediatype=0.1rx,0.1tx,0.2tx
45 httpmediatype=0.1rx,0.1tx,0.2tx
46 known
46 known
47 lookup
47 lookup
48 pushkey
48 pushkey
49 unbundle=HG10GZ,HG10BZ,HG10UN
49 unbundle=HG10GZ,HG10BZ,HG10UN
50 unbundlehash
50 unbundlehash
51 Bundle2 capabilities:
51 Bundle2 capabilities:
52 HG20
52 HG20
53 bookmarks
53 bookmarks
54 changegroup
54 changegroup
55 01
55 01
56 02
56 02
57 digests
57 digests
58 md5
58 md5
59 sha1
59 sha1
60 sha512
60 sha512
61 error
61 error
62 abort
62 abort
63 unsupportedcontent
63 unsupportedcontent
64 pushraced
64 pushraced
65 pushkey
65 pushkey
66 hgtagsfnodes
66 hgtagsfnodes
67 listkeys
67 listkeys
68 phases
68 phases
69 heads
69 heads
70 pushkey
70 pushkey
71 remote-changegroup
71 remote-changegroup
72 http
72 http
73 https
73 https
74
74
75 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
75 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
76 warning: stream clone requested but server has them disabled
76 warning: stream clone requested but server has them disabled
77 requesting all changes
77 requesting all changes
78 adding changesets
78 adding changesets
79 adding manifests
79 adding manifests
80 adding file changes
80 adding file changes
81 added 2 changesets with 1025 changes to 1025 files
81 added 2 changesets with 1025 changes to 1025 files
82 new changesets 96ee1d7354c4:c17445101a72
82 new changesets 96ee1d7354c4:c17445101a72
83
83
84 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
84 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
85 200 Script output follows
85 200 Script output follows
86 content-type: application/mercurial-0.2
86 content-type: application/mercurial-0.2
87
87
88
88
89 $ f --size body --hexdump --bytes 100
89 $ f --size body --hexdump --bytes 100
90 body: size=112318
90 body: size=232
91 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
91 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
92 0010: 68 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |h.STREAM2.......|
92 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...|
93 0020: 05 09 04 0c 2d 62 79 74 65 63 6f 75 6e 74 39 38 |....-bytecount98|
93 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest|
94 0030: 37 35 38 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |758filecount1030|
94 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
95 0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote|
95 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
96 0050: 6e 63 6f 64 65 20 66 6e 63 61 63 68 65 20 67 65 |ncode fncache ge|
96 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
97 0060: 6e 65 72 61 |nera|
97 0060: 69 73 20 66 |is f|
98
98
99 #endif
99 #endif
100 #if stream-bundle2
100 #if stream-bundle2
101 $ hg debugcapabilities http://localhost:$HGPORT
101 $ hg debugcapabilities http://localhost:$HGPORT
102 Main capabilities:
102 Main capabilities:
103 batch
103 batch
104 branchmap
104 branchmap
105 $USUAL_BUNDLE2_CAPS$
105 $USUAL_BUNDLE2_CAPS$
106 changegroupsubset
106 changegroupsubset
107 compression=zstd,zlib
107 compression=zstd,zlib
108 getbundle
108 getbundle
109 httpheader=1024
109 httpheader=1024
110 httpmediatype=0.1rx,0.1tx,0.2tx
110 httpmediatype=0.1rx,0.1tx,0.2tx
111 known
111 known
112 lookup
112 lookup
113 pushkey
113 pushkey
114 unbundle=HG10GZ,HG10BZ,HG10UN
114 unbundle=HG10GZ,HG10BZ,HG10UN
115 unbundlehash
115 unbundlehash
116 Bundle2 capabilities:
116 Bundle2 capabilities:
117 HG20
117 HG20
118 bookmarks
118 bookmarks
119 changegroup
119 changegroup
120 01
120 01
121 02
121 02
122 digests
122 digests
123 md5
123 md5
124 sha1
124 sha1
125 sha512
125 sha512
126 error
126 error
127 abort
127 abort
128 unsupportedcontent
128 unsupportedcontent
129 pushraced
129 pushraced
130 pushkey
130 pushkey
131 hgtagsfnodes
131 hgtagsfnodes
132 listkeys
132 listkeys
133 phases
133 phases
134 heads
134 heads
135 pushkey
135 pushkey
136 remote-changegroup
136 remote-changegroup
137 http
137 http
138 https
138 https
139
139
140 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
140 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
141 warning: stream clone requested but server has them disabled
141 warning: stream clone requested but server has them disabled
142 requesting all changes
142 requesting all changes
143 adding changesets
143 adding changesets
144 adding manifests
144 adding manifests
145 adding file changes
145 adding file changes
146 added 2 changesets with 1025 changes to 1025 files
146 added 2 changesets with 1025 changes to 1025 files
147 new changesets 96ee1d7354c4:c17445101a72
147 new changesets 96ee1d7354c4:c17445101a72
148
148
149 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
149 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
150 200 Script output follows
150 200 Script output follows
151 content-type: application/mercurial-0.2
151 content-type: application/mercurial-0.2
152
152
153
153
154 $ f --size body --hexdump --bytes 100
154 $ f --size body --hexdump --bytes 100
155 body: size=112318
155 body: size=232
156 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
156 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
157 0010: 68 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |h.STREAM2.......|
157 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...|
158 0020: 05 09 04 0c 2d 62 79 74 65 63 6f 75 6e 74 39 38 |....-bytecount98|
158 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest|
159 0030: 37 35 38 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |758filecount1030|
159 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
160 0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote|
160 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
161 0050: 6e 63 6f 64 65 20 66 6e 63 61 63 68 65 20 67 65 |ncode fncache ge|
161 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
162 0060: 6e 65 72 61 |nera|
162 0060: 69 73 20 66 |is f|
163
163
164 #endif
164 #endif
165
165
166 $ killdaemons.py
166 $ killdaemons.py
167 $ cd server
167 $ cd server
168 $ hg serve -p $HGPORT -d --pid-file=hg.pid
168 $ hg serve -p $HGPORT -d --pid-file=hg.pid
169 $ cat hg.pid > $DAEMON_PIDS
169 $ cat hg.pid > $DAEMON_PIDS
170 $ cd ..
170 $ cd ..
171
171
172 Basic clone
172 Basic clone
173
173
174 #if stream-legacy
174 #if stream-legacy
175 $ hg clone --stream -U http://localhost:$HGPORT clone1
175 $ hg clone --stream -U http://localhost:$HGPORT clone1
176 streaming all changes
176 streaming all changes
177 1027 files to transfer, 96.3 KB of data
177 1027 files to transfer, 96.3 KB of data
178 transferred 96.3 KB in * seconds (*/sec) (glob)
178 transferred 96.3 KB in * seconds (*/sec) (glob)
179 searching for changes
179 searching for changes
180 no changes found
180 no changes found
181 #endif
181 #endif
182 #if stream-bundle2
182 #if stream-bundle2
183 $ hg clone --stream -U http://localhost:$HGPORT clone1
183 $ hg clone --stream -U http://localhost:$HGPORT clone1
184 streaming all changes
184 streaming all changes
185 1030 files to transfer, 96.4 KB of data
185 1030 files to transfer, 96.4 KB of data
186 transferred 96.4 KB in * seconds (* */sec) (glob)
186 transferred 96.4 KB in * seconds (* */sec) (glob)
187
187
188 $ ls -1 clone1/.hg/cache
188 $ ls -1 clone1/.hg/cache
189 branch2-served
189 branch2-served
190 rbc-names-v1
190 rbc-names-v1
191 rbc-revs-v1
191 rbc-revs-v1
192 #endif
192 #endif
193
193
194 getbundle requests with stream=1 are uncompressed
194 getbundle requests with stream=1 are uncompressed
195
195
196 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto '0.1 0.2 comp=zlib,none' --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
196 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto '0.1 0.2 comp=zlib,none' --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
197 200 Script output follows
197 200 Script output follows
198 content-type: application/mercurial-0.2
198 content-type: application/mercurial-0.2
199
199
200
200
201 $ f --size --hex --bytes 256 body
201 $ f --size --hex --bytes 256 body
202 body: size=112318
202 body: size=112318
203 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
203 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
204 0010: 68 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |h.STREAM2.......|
204 0010: 68 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |h.STREAM2.......|
205 0020: 05 09 04 0c 2d 62 79 74 65 63 6f 75 6e 74 39 38 |....-bytecount98|
205 0020: 05 09 04 0c 2d 62 79 74 65 63 6f 75 6e 74 39 38 |....-bytecount98|
206 0030: 37 35 38 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |758filecount1030|
206 0030: 37 35 38 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |758filecount1030|
207 0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote|
207 0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote|
208 0050: 6e 63 6f 64 65 20 66 6e 63 61 63 68 65 20 67 65 |ncode fncache ge|
208 0050: 6e 63 6f 64 65 20 66 6e 63 61 63 68 65 20 67 65 |ncode fncache ge|
209 0060: 6e 65 72 61 6c 64 65 6c 74 61 20 72 65 76 6c 6f |neraldelta revlo|
209 0060: 6e 65 72 61 6c 64 65 6c 74 61 20 72 65 76 6c 6f |neraldelta revlo|
210 0070: 67 76 31 20 73 74 6f 72 65 00 00 10 00 73 08 42 |gv1 store....s.B|
210 0070: 67 76 31 20 73 74 6f 72 65 00 00 10 00 73 08 42 |gv1 store....s.B|
211 0080: 64 61 74 61 2f 30 2e 69 00 03 00 01 00 00 00 00 |data/0.i........|
211 0080: 64 61 74 61 2f 30 2e 69 00 03 00 01 00 00 00 00 |data/0.i........|
212 0090: 00 00 00 02 00 00 00 01 00 00 00 00 00 00 00 01 |................|
212 0090: 00 00 00 02 00 00 00 01 00 00 00 00 00 00 00 01 |................|
213 00a0: ff ff ff ff ff ff ff ff 80 29 63 a0 49 d3 23 87 |.........)c.I.#.|
213 00a0: ff ff ff ff ff ff ff ff 80 29 63 a0 49 d3 23 87 |.........)c.I.#.|
214 00b0: bf ce fe 56 67 92 67 2c 69 d1 ec 39 00 00 00 00 |...Vg.g,i..9....|
214 00b0: bf ce fe 56 67 92 67 2c 69 d1 ec 39 00 00 00 00 |...Vg.g,i..9....|
215 00c0: 00 00 00 00 00 00 00 00 75 30 73 08 42 64 61 74 |........u0s.Bdat|
215 00c0: 00 00 00 00 00 00 00 00 75 30 73 08 42 64 61 74 |........u0s.Bdat|
216 00d0: 61 2f 31 2e 69 00 03 00 01 00 00 00 00 00 00 00 |a/1.i...........|
216 00d0: 61 2f 31 2e 69 00 03 00 01 00 00 00 00 00 00 00 |a/1.i...........|
217 00e0: 02 00 00 00 01 00 00 00 00 00 00 00 01 ff ff ff |................|
217 00e0: 02 00 00 00 01 00 00 00 00 00 00 00 01 ff ff ff |................|
218 00f0: ff ff ff ff ff f9 76 da 1d 0d f2 25 6c de 08 db |......v....%l...|
218 00f0: ff ff ff ff ff f9 76 da 1d 0d f2 25 6c de 08 db |......v....%l...|
219
219
220 --uncompressed is an alias to --stream
220 --uncompressed is an alias to --stream
221
221
222 #if stream-legacy
222 #if stream-legacy
223 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
223 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
224 streaming all changes
224 streaming all changes
225 1027 files to transfer, 96.3 KB of data
225 1027 files to transfer, 96.3 KB of data
226 transferred 96.3 KB in * seconds (*/sec) (glob)
226 transferred 96.3 KB in * seconds (*/sec) (glob)
227 searching for changes
227 searching for changes
228 no changes found
228 no changes found
229 #endif
229 #endif
230 #if stream-bundle2
230 #if stream-bundle2
231 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
231 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
232 streaming all changes
232 streaming all changes
233 1030 files to transfer, 96.4 KB of data
233 1030 files to transfer, 96.4 KB of data
234 transferred 96.4 KB in * seconds (* */sec) (glob)
234 transferred 96.4 KB in * seconds (* */sec) (glob)
235 #endif
235 #endif
236
236
237 Clone with background file closing enabled
237 Clone with background file closing enabled
238
238
239 #if stream-legacy
239 #if stream-legacy
240 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
240 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
241 using http://localhost:$HGPORT/
241 using http://localhost:$HGPORT/
242 sending capabilities command
242 sending capabilities command
243 sending branchmap command
243 sending branchmap command
244 streaming all changes
244 streaming all changes
245 sending stream_out command
245 sending stream_out command
246 1027 files to transfer, 96.3 KB of data
246 1027 files to transfer, 96.3 KB of data
247 starting 4 threads for background file closing
247 starting 4 threads for background file closing
248 transferred 96.3 KB in * seconds (*/sec) (glob)
248 transferred 96.3 KB in * seconds (*/sec) (glob)
249 query 1; heads
249 query 1; heads
250 sending batch command
250 sending batch command
251 searching for changes
251 searching for changes
252 all remote heads known locally
252 all remote heads known locally
253 no changes found
253 no changes found
254 sending getbundle command
254 sending getbundle command
255 bundle2-input-bundle: with-transaction
255 bundle2-input-bundle: with-transaction
256 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
256 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
257 bundle2-input-part: "phase-heads" supported
257 bundle2-input-part: "phase-heads" supported
258 bundle2-input-part: total payload size 24
258 bundle2-input-part: total payload size 24
259 bundle2-input-bundle: 1 parts total
259 bundle2-input-bundle: 1 parts total
260 checking for updated bookmarks
260 checking for updated bookmarks
261 #endif
261 #endif
262 #if stream-bundle2
262 #if stream-bundle2
263 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
263 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
264 using http://localhost:$HGPORT/
264 using http://localhost:$HGPORT/
265 sending capabilities command
265 sending capabilities command
266 query 1; heads
266 query 1; heads
267 sending batch command
267 sending batch command
268 streaming all changes
268 streaming all changes
269 sending getbundle command
269 sending getbundle command
270 bundle2-input-bundle: with-transaction
270 bundle2-input-bundle: with-transaction
271 bundle2-input-part: "stream2" (params: 3 mandatory) supported
271 bundle2-input-part: "stream2" (params: 3 mandatory) supported
272 applying stream bundle
272 applying stream bundle
273 1030 files to transfer, 96.4 KB of data
273 1030 files to transfer, 96.4 KB of data
274 starting 4 threads for background file closing
274 starting 4 threads for background file closing
275 starting 4 threads for background file closing
275 starting 4 threads for background file closing
276 transferred 96.4 KB in * seconds (* */sec) (glob)
276 transferred 96.4 KB in * seconds (* */sec) (glob)
277 bundle2-input-part: total payload size 112077
277 bundle2-input-part: total payload size 112077
278 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
278 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
279 bundle2-input-bundle: 1 parts total
279 bundle2-input-bundle: 1 parts total
280 checking for updated bookmarks
280 checking for updated bookmarks
281 #endif
281 #endif
282
282
283 Cannot stream clone when there are secret changesets
283 Cannot stream clone when there are secret changesets
284
284
285 $ hg -R server phase --force --secret -r tip
285 $ hg -R server phase --force --secret -r tip
286 $ hg clone --stream -U http://localhost:$HGPORT secret-denied
286 $ hg clone --stream -U http://localhost:$HGPORT secret-denied
287 warning: stream clone requested but server has them disabled
287 warning: stream clone requested but server has them disabled
288 requesting all changes
288 requesting all changes
289 adding changesets
289 adding changesets
290 adding manifests
290 adding manifests
291 adding file changes
291 adding file changes
292 added 1 changesets with 1 changes to 1 files
292 added 1 changesets with 1 changes to 1 files
293 new changesets 96ee1d7354c4
293 new changesets 96ee1d7354c4
294
294
295 $ killdaemons.py
295 $ killdaemons.py
296
296
297 Streaming of secrets can be overridden by server config
297 Streaming of secrets can be overridden by server config
298
298
299 $ cd server
299 $ cd server
300 $ hg serve --config server.uncompressedallowsecret=true -p $HGPORT -d --pid-file=hg.pid
300 $ hg serve --config server.uncompressedallowsecret=true -p $HGPORT -d --pid-file=hg.pid
301 $ cat hg.pid > $DAEMON_PIDS
301 $ cat hg.pid > $DAEMON_PIDS
302 $ cd ..
302 $ cd ..
303
303
304 #if stream-legacy
304 #if stream-legacy
305 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
305 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
306 streaming all changes
306 streaming all changes
307 1027 files to transfer, 96.3 KB of data
307 1027 files to transfer, 96.3 KB of data
308 transferred 96.3 KB in * seconds (*/sec) (glob)
308 transferred 96.3 KB in * seconds (*/sec) (glob)
309 searching for changes
309 searching for changes
310 no changes found
310 no changes found
311 #endif
311 #endif
312 #if stream-bundle2
312 #if stream-bundle2
313 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
313 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
314 streaming all changes
314 streaming all changes
315 1030 files to transfer, 96.4 KB of data
315 1030 files to transfer, 96.4 KB of data
316 transferred 96.4 KB in * seconds (* */sec) (glob)
316 transferred 96.4 KB in * seconds (* */sec) (glob)
317 #endif
317 #endif
318
318
319 $ killdaemons.py
319 $ killdaemons.py
320
320
321 Verify interaction between preferuncompressed and secret presence
321 Verify interaction between preferuncompressed and secret presence
322
322
323 $ cd server
323 $ cd server
324 $ hg serve --config server.preferuncompressed=true -p $HGPORT -d --pid-file=hg.pid
324 $ hg serve --config server.preferuncompressed=true -p $HGPORT -d --pid-file=hg.pid
325 $ cat hg.pid > $DAEMON_PIDS
325 $ cat hg.pid > $DAEMON_PIDS
326 $ cd ..
326 $ cd ..
327
327
328 $ hg clone -U http://localhost:$HGPORT preferuncompressed-secret
328 $ hg clone -U http://localhost:$HGPORT preferuncompressed-secret
329 requesting all changes
329 requesting all changes
330 adding changesets
330 adding changesets
331 adding manifests
331 adding manifests
332 adding file changes
332 adding file changes
333 added 1 changesets with 1 changes to 1 files
333 added 1 changesets with 1 changes to 1 files
334 new changesets 96ee1d7354c4
334 new changesets 96ee1d7354c4
335
335
336 $ killdaemons.py
336 $ killdaemons.py
337
337
338 Clone not allowed when full bundles disabled and can't serve secrets
338 Clone not allowed when full bundles disabled and can't serve secrets
339
339
340 $ cd server
340 $ cd server
341 $ hg serve --config server.disablefullbundle=true -p $HGPORT -d --pid-file=hg.pid
341 $ hg serve --config server.disablefullbundle=true -p $HGPORT -d --pid-file=hg.pid
342 $ cat hg.pid > $DAEMON_PIDS
342 $ cat hg.pid > $DAEMON_PIDS
343 $ cd ..
343 $ cd ..
344
344
345 $ hg clone --stream http://localhost:$HGPORT secret-full-disabled
345 $ hg clone --stream http://localhost:$HGPORT secret-full-disabled
346 warning: stream clone requested but server has them disabled
346 warning: stream clone requested but server has them disabled
347 requesting all changes
347 requesting all changes
348 remote: abort: server has pull-based clones disabled
348 remote: abort: server has pull-based clones disabled
349 abort: pull failed on remote
349 abort: pull failed on remote
350 (remove --pull if specified or upgrade Mercurial)
350 (remove --pull if specified or upgrade Mercurial)
351 [255]
351 [255]
352
352
353 Local stream clone with secrets involved
353 Local stream clone with secrets involved
354 (This is just a test over behavior: if you have access to the repo's files,
354 (This is just a test over behavior: if you have access to the repo's files,
355 there is no security so it isn't important to prevent a clone here.)
355 there is no security so it isn't important to prevent a clone here.)
356
356
357 $ hg clone -U --stream server local-secret
357 $ hg clone -U --stream server local-secret
358 warning: stream clone requested but server has them disabled
358 warning: stream clone requested but server has them disabled
359 requesting all changes
359 requesting all changes
360 adding changesets
360 adding changesets
361 adding manifests
361 adding manifests
362 adding file changes
362 adding file changes
363 added 1 changesets with 1 changes to 1 files
363 added 1 changesets with 1 changes to 1 files
364 new changesets 96ee1d7354c4
364 new changesets 96ee1d7354c4
365
365
366 Stream clone while repo is changing:
366 Stream clone while repo is changing:
367
367
368 $ mkdir changing
368 $ mkdir changing
369 $ cd changing
369 $ cd changing
370
370
371 extension for delaying the server process so we reliably can modify the repo
371 extension for delaying the server process so we reliably can modify the repo
372 while cloning
372 while cloning
373
373
374 $ cat > delayer.py <<EOF
374 $ cat > delayer.py <<EOF
375 > import time
375 > import time
376 > from mercurial import extensions, vfs
376 > from mercurial import extensions, vfs
377 > def __call__(orig, self, path, *args, **kwargs):
377 > def __call__(orig, self, path, *args, **kwargs):
378 > if path == 'data/f1.i':
378 > if path == 'data/f1.i':
379 > time.sleep(2)
379 > time.sleep(2)
380 > return orig(self, path, *args, **kwargs)
380 > return orig(self, path, *args, **kwargs)
381 > extensions.wrapfunction(vfs.vfs, '__call__', __call__)
381 > extensions.wrapfunction(vfs.vfs, '__call__', __call__)
382 > EOF
382 > EOF
383
383
384 prepare repo with small and big file to cover both code paths in emitrevlogdata
384 prepare repo with small and big file to cover both code paths in emitrevlogdata
385
385
386 $ hg init repo
386 $ hg init repo
387 $ touch repo/f1
387 $ touch repo/f1
388 $ $TESTDIR/seq.py 50000 > repo/f2
388 $ $TESTDIR/seq.py 50000 > repo/f2
389 $ hg -R repo ci -Aqm "0"
389 $ hg -R repo ci -Aqm "0"
390 $ hg serve -R repo -p $HGPORT1 -d --pid-file=hg.pid --config extensions.delayer=delayer.py
390 $ hg serve -R repo -p $HGPORT1 -d --pid-file=hg.pid --config extensions.delayer=delayer.py
391 $ cat hg.pid >> $DAEMON_PIDS
391 $ cat hg.pid >> $DAEMON_PIDS
392
392
393 clone while modifying the repo between stating file with write lock and
393 clone while modifying the repo between stating file with write lock and
394 actually serving file content
394 actually serving file content
395
395
396 $ hg clone -q --stream -U http://localhost:$HGPORT1 clone &
396 $ hg clone -q --stream -U http://localhost:$HGPORT1 clone &
397 $ sleep 1
397 $ sleep 1
398 $ echo >> repo/f1
398 $ echo >> repo/f1
399 $ echo >> repo/f2
399 $ echo >> repo/f2
400 $ hg -R repo ci -m "1"
400 $ hg -R repo ci -m "1"
401 $ wait
401 $ wait
402 $ hg -R clone id
402 $ hg -R clone id
403 000000000000
403 000000000000
404 $ cd ..
404 $ cd ..
405
405
406 Stream repository with bookmarks
406 Stream repository with bookmarks
407 --------------------------------
407 --------------------------------
408
408
409 (revert introduction of secret changeset)
409 (revert introduction of secret changeset)
410
410
411 $ hg -R server phase --draft 'secret()'
411 $ hg -R server phase --draft 'secret()'
412
412
413 add a bookmark
413 add a bookmark
414
414
415 $ hg -R server bookmark -r tip some-bookmark
415 $ hg -R server bookmark -r tip some-bookmark
416
416
417 clone it
417 clone it
418
418
419 #if stream-legacy
419 #if stream-legacy
420 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
420 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
421 streaming all changes
421 streaming all changes
422 1027 files to transfer, 96.3 KB of data
422 1027 files to transfer, 96.3 KB of data
423 transferred 96.3 KB in * seconds (*) (glob)
423 transferred 96.3 KB in * seconds (*) (glob)
424 searching for changes
424 searching for changes
425 no changes found
425 no changes found
426 updating to branch default
426 updating to branch default
427 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
427 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
428 #endif
428 #endif
429 #if stream-bundle2
429 #if stream-bundle2
430 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
430 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
431 streaming all changes
431 streaming all changes
432 1033 files to transfer, 96.6 KB of data
432 1033 files to transfer, 96.6 KB of data
433 transferred 96.6 KB in * seconds (* */sec) (glob)
433 transferred 96.6 KB in * seconds (* */sec) (glob)
434 updating to branch default
434 updating to branch default
435 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
435 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
436 #endif
436 #endif
437 $ hg -R with-bookmarks bookmarks
437 $ hg -R with-bookmarks bookmarks
438 some-bookmark 1:c17445101a72
438 some-bookmark 1:c17445101a72
439
439
440 Stream repository with phases
440 Stream repository with phases
441 -----------------------------
441 -----------------------------
442
442
443 Clone as publishing
443 Clone as publishing
444
444
445 $ hg -R server phase -r 'all()'
445 $ hg -R server phase -r 'all()'
446 0: draft
446 0: draft
447 1: draft
447 1: draft
448
448
449 #if stream-legacy
449 #if stream-legacy
450 $ hg clone --stream http://localhost:$HGPORT phase-publish
450 $ hg clone --stream http://localhost:$HGPORT phase-publish
451 streaming all changes
451 streaming all changes
452 1027 files to transfer, 96.3 KB of data
452 1027 files to transfer, 96.3 KB of data
453 transferred 96.3 KB in * seconds (*) (glob)
453 transferred 96.3 KB in * seconds (*) (glob)
454 searching for changes
454 searching for changes
455 no changes found
455 no changes found
456 updating to branch default
456 updating to branch default
457 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
457 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
458 #endif
458 #endif
459 #if stream-bundle2
459 #if stream-bundle2
460 $ hg clone --stream http://localhost:$HGPORT phase-publish
460 $ hg clone --stream http://localhost:$HGPORT phase-publish
461 streaming all changes
461 streaming all changes
462 1033 files to transfer, 96.6 KB of data
462 1033 files to transfer, 96.6 KB of data
463 transferred 96.6 KB in * seconds (* */sec) (glob)
463 transferred 96.6 KB in * seconds (* */sec) (glob)
464 updating to branch default
464 updating to branch default
465 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
465 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
466 #endif
466 #endif
467 $ hg -R phase-publish phase -r 'all()'
467 $ hg -R phase-publish phase -r 'all()'
468 0: public
468 0: public
469 1: public
469 1: public
470
470
471 Clone as non publishing
471 Clone as non publishing
472
472
473 $ cat << EOF >> server/.hg/hgrc
473 $ cat << EOF >> server/.hg/hgrc
474 > [phases]
474 > [phases]
475 > publish = False
475 > publish = False
476 > EOF
476 > EOF
477 $ killdaemons.py
477 $ killdaemons.py
478 $ hg -R server serve -p $HGPORT -d --pid-file=hg.pid
478 $ hg -R server serve -p $HGPORT -d --pid-file=hg.pid
479 $ cat hg.pid > $DAEMON_PIDS
479 $ cat hg.pid > $DAEMON_PIDS
480
480
481 #if stream-legacy
481 #if stream-legacy
482 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
482 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
483 streaming all changes
483 streaming all changes
484 1027 files to transfer, 96.3 KB of data
484 1027 files to transfer, 96.3 KB of data
485 transferred 96.3 KB in * seconds (*) (glob)
485 transferred 96.3 KB in * seconds (*) (glob)
486 searching for changes
486 searching for changes
487 no changes found
487 no changes found
488 updating to branch default
488 updating to branch default
489 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
489 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
490 $ hg -R phase-no-publish phase -r 'all()'
490 $ hg -R phase-no-publish phase -r 'all()'
491 0: public
491 0: public
492 1: public
492 1: public
493 #endif
493 #endif
494 #if stream-bundle2
494 #if stream-bundle2
495 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
495 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
496 streaming all changes
496 streaming all changes
497 1034 files to transfer, 96.7 KB of data
497 1034 files to transfer, 96.7 KB of data
498 transferred 96.7 KB in * seconds (* */sec) (glob)
498 transferred 96.7 KB in * seconds (* */sec) (glob)
499 updating to branch default
499 updating to branch default
500 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
500 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
501 $ hg -R phase-no-publish phase -r 'all()'
501 $ hg -R phase-no-publish phase -r 'all()'
502 0: draft
502 0: draft
503 1: draft
503 1: draft
504 #endif
504 #endif
505
505
506 $ killdaemons.py
506 $ killdaemons.py
General Comments 0
You need to be logged in to leave comments. Login now