##// END OF EJS Templates
streamclone: add support for cloning non append-only file...
Boris Feld -
r35783:56c30b31 default
parent child Browse files
Show More
@@ -1,2234 +1,2235 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 bin,
16 bin,
17 hex,
17 hex,
18 nullid,
18 nullid,
19 )
19 )
20 from . import (
20 from . import (
21 bookmarks as bookmod,
21 bookmarks as bookmod,
22 bundle2,
22 bundle2,
23 changegroup,
23 changegroup,
24 discovery,
24 discovery,
25 error,
25 error,
26 lock as lockmod,
26 lock as lockmod,
27 logexchange,
27 logexchange,
28 obsolete,
28 obsolete,
29 phases,
29 phases,
30 pushkey,
30 pushkey,
31 pycompat,
31 pycompat,
32 scmutil,
32 scmutil,
33 sslutil,
33 sslutil,
34 streamclone,
34 streamclone,
35 url as urlmod,
35 url as urlmod,
36 util,
36 util,
37 )
37 )
38
38
39 urlerr = util.urlerr
39 urlerr = util.urlerr
40 urlreq = util.urlreq
40 urlreq = util.urlreq
41
41
42 # Maps bundle version human names to changegroup versions.
42 # Maps bundle version human names to changegroup versions.
43 _bundlespeccgversions = {'v1': '01',
43 _bundlespeccgversions = {'v1': '01',
44 'v2': '02',
44 'v2': '02',
45 'packed1': 's1',
45 'packed1': 's1',
46 'bundle2': '02', #legacy
46 'bundle2': '02', #legacy
47 }
47 }
48
48
49 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
49 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
50 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
50 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
51
51
52 def parsebundlespec(repo, spec, strict=True, externalnames=False):
52 def parsebundlespec(repo, spec, strict=True, externalnames=False):
53 """Parse a bundle string specification into parts.
53 """Parse a bundle string specification into parts.
54
54
55 Bundle specifications denote a well-defined bundle/exchange format.
55 Bundle specifications denote a well-defined bundle/exchange format.
56 The content of a given specification should not change over time in
56 The content of a given specification should not change over time in
57 order to ensure that bundles produced by a newer version of Mercurial are
57 order to ensure that bundles produced by a newer version of Mercurial are
58 readable from an older version.
58 readable from an older version.
59
59
60 The string currently has the form:
60 The string currently has the form:
61
61
62 <compression>-<type>[;<parameter0>[;<parameter1>]]
62 <compression>-<type>[;<parameter0>[;<parameter1>]]
63
63
64 Where <compression> is one of the supported compression formats
64 Where <compression> is one of the supported compression formats
65 and <type> is (currently) a version string. A ";" can follow the type and
65 and <type> is (currently) a version string. A ";" can follow the type and
66 all text afterwards is interpreted as URI encoded, ";" delimited key=value
66 all text afterwards is interpreted as URI encoded, ";" delimited key=value
67 pairs.
67 pairs.
68
68
69 If ``strict`` is True (the default) <compression> is required. Otherwise,
69 If ``strict`` is True (the default) <compression> is required. Otherwise,
70 it is optional.
70 it is optional.
71
71
72 If ``externalnames`` is False (the default), the human-centric names will
72 If ``externalnames`` is False (the default), the human-centric names will
73 be converted to their internal representation.
73 be converted to their internal representation.
74
74
75 Returns a 3-tuple of (compression, version, parameters). Compression will
75 Returns a 3-tuple of (compression, version, parameters). Compression will
76 be ``None`` if not in strict mode and a compression isn't defined.
76 be ``None`` if not in strict mode and a compression isn't defined.
77
77
78 An ``InvalidBundleSpecification`` is raised when the specification is
78 An ``InvalidBundleSpecification`` is raised when the specification is
79 not syntactically well formed.
79 not syntactically well formed.
80
80
81 An ``UnsupportedBundleSpecification`` is raised when the compression or
81 An ``UnsupportedBundleSpecification`` is raised when the compression or
82 bundle type/version is not recognized.
82 bundle type/version is not recognized.
83
83
84 Note: this function will likely eventually return a more complex data
84 Note: this function will likely eventually return a more complex data
85 structure, including bundle2 part information.
85 structure, including bundle2 part information.
86 """
86 """
87 def parseparams(s):
87 def parseparams(s):
88 if ';' not in s:
88 if ';' not in s:
89 return s, {}
89 return s, {}
90
90
91 params = {}
91 params = {}
92 version, paramstr = s.split(';', 1)
92 version, paramstr = s.split(';', 1)
93
93
94 for p in paramstr.split(';'):
94 for p in paramstr.split(';'):
95 if '=' not in p:
95 if '=' not in p:
96 raise error.InvalidBundleSpecification(
96 raise error.InvalidBundleSpecification(
97 _('invalid bundle specification: '
97 _('invalid bundle specification: '
98 'missing "=" in parameter: %s') % p)
98 'missing "=" in parameter: %s') % p)
99
99
100 key, value = p.split('=', 1)
100 key, value = p.split('=', 1)
101 key = urlreq.unquote(key)
101 key = urlreq.unquote(key)
102 value = urlreq.unquote(value)
102 value = urlreq.unquote(value)
103 params[key] = value
103 params[key] = value
104
104
105 return version, params
105 return version, params
106
106
107
107
108 if strict and '-' not in spec:
108 if strict and '-' not in spec:
109 raise error.InvalidBundleSpecification(
109 raise error.InvalidBundleSpecification(
110 _('invalid bundle specification; '
110 _('invalid bundle specification; '
111 'must be prefixed with compression: %s') % spec)
111 'must be prefixed with compression: %s') % spec)
112
112
113 if '-' in spec:
113 if '-' in spec:
114 compression, version = spec.split('-', 1)
114 compression, version = spec.split('-', 1)
115
115
116 if compression not in util.compengines.supportedbundlenames:
116 if compression not in util.compengines.supportedbundlenames:
117 raise error.UnsupportedBundleSpecification(
117 raise error.UnsupportedBundleSpecification(
118 _('%s compression is not supported') % compression)
118 _('%s compression is not supported') % compression)
119
119
120 version, params = parseparams(version)
120 version, params = parseparams(version)
121
121
122 if version not in _bundlespeccgversions:
122 if version not in _bundlespeccgversions:
123 raise error.UnsupportedBundleSpecification(
123 raise error.UnsupportedBundleSpecification(
124 _('%s is not a recognized bundle version') % version)
124 _('%s is not a recognized bundle version') % version)
125 else:
125 else:
126 # Value could be just the compression or just the version, in which
126 # Value could be just the compression or just the version, in which
127 # case some defaults are assumed (but only when not in strict mode).
127 # case some defaults are assumed (but only when not in strict mode).
128 assert not strict
128 assert not strict
129
129
130 spec, params = parseparams(spec)
130 spec, params = parseparams(spec)
131
131
132 if spec in util.compengines.supportedbundlenames:
132 if spec in util.compengines.supportedbundlenames:
133 compression = spec
133 compression = spec
134 version = 'v1'
134 version = 'v1'
135 # Generaldelta repos require v2.
135 # Generaldelta repos require v2.
136 if 'generaldelta' in repo.requirements:
136 if 'generaldelta' in repo.requirements:
137 version = 'v2'
137 version = 'v2'
138 # Modern compression engines require v2.
138 # Modern compression engines require v2.
139 if compression not in _bundlespecv1compengines:
139 if compression not in _bundlespecv1compengines:
140 version = 'v2'
140 version = 'v2'
141 elif spec in _bundlespeccgversions:
141 elif spec in _bundlespeccgversions:
142 if spec == 'packed1':
142 if spec == 'packed1':
143 compression = 'none'
143 compression = 'none'
144 else:
144 else:
145 compression = 'bzip2'
145 compression = 'bzip2'
146 version = spec
146 version = spec
147 else:
147 else:
148 raise error.UnsupportedBundleSpecification(
148 raise error.UnsupportedBundleSpecification(
149 _('%s is not a recognized bundle specification') % spec)
149 _('%s is not a recognized bundle specification') % spec)
150
150
151 # Bundle version 1 only supports a known set of compression engines.
151 # Bundle version 1 only supports a known set of compression engines.
152 if version == 'v1' and compression not in _bundlespecv1compengines:
152 if version == 'v1' and compression not in _bundlespecv1compengines:
153 raise error.UnsupportedBundleSpecification(
153 raise error.UnsupportedBundleSpecification(
154 _('compression engine %s is not supported on v1 bundles') %
154 _('compression engine %s is not supported on v1 bundles') %
155 compression)
155 compression)
156
156
157 # The specification for packed1 can optionally declare the data formats
157 # The specification for packed1 can optionally declare the data formats
158 # required to apply it. If we see this metadata, compare against what the
158 # required to apply it. If we see this metadata, compare against what the
159 # repo supports and error if the bundle isn't compatible.
159 # repo supports and error if the bundle isn't compatible.
160 if version == 'packed1' and 'requirements' in params:
160 if version == 'packed1' and 'requirements' in params:
161 requirements = set(params['requirements'].split(','))
161 requirements = set(params['requirements'].split(','))
162 missingreqs = requirements - repo.supportedformats
162 missingreqs = requirements - repo.supportedformats
163 if missingreqs:
163 if missingreqs:
164 raise error.UnsupportedBundleSpecification(
164 raise error.UnsupportedBundleSpecification(
165 _('missing support for repository features: %s') %
165 _('missing support for repository features: %s') %
166 ', '.join(sorted(missingreqs)))
166 ', '.join(sorted(missingreqs)))
167
167
168 if not externalnames:
168 if not externalnames:
169 engine = util.compengines.forbundlename(compression)
169 engine = util.compengines.forbundlename(compression)
170 compression = engine.bundletype()[1]
170 compression = engine.bundletype()[1]
171 version = _bundlespeccgversions[version]
171 version = _bundlespeccgversions[version]
172 return compression, version, params
172 return compression, version, params
173
173
174 def readbundle(ui, fh, fname, vfs=None):
174 def readbundle(ui, fh, fname, vfs=None):
175 header = changegroup.readexactly(fh, 4)
175 header = changegroup.readexactly(fh, 4)
176
176
177 alg = None
177 alg = None
178 if not fname:
178 if not fname:
179 fname = "stream"
179 fname = "stream"
180 if not header.startswith('HG') and header.startswith('\0'):
180 if not header.startswith('HG') and header.startswith('\0'):
181 fh = changegroup.headerlessfixup(fh, header)
181 fh = changegroup.headerlessfixup(fh, header)
182 header = "HG10"
182 header = "HG10"
183 alg = 'UN'
183 alg = 'UN'
184 elif vfs:
184 elif vfs:
185 fname = vfs.join(fname)
185 fname = vfs.join(fname)
186
186
187 magic, version = header[0:2], header[2:4]
187 magic, version = header[0:2], header[2:4]
188
188
189 if magic != 'HG':
189 if magic != 'HG':
190 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
190 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
191 if version == '10':
191 if version == '10':
192 if alg is None:
192 if alg is None:
193 alg = changegroup.readexactly(fh, 2)
193 alg = changegroup.readexactly(fh, 2)
194 return changegroup.cg1unpacker(fh, alg)
194 return changegroup.cg1unpacker(fh, alg)
195 elif version.startswith('2'):
195 elif version.startswith('2'):
196 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
196 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
197 elif version == 'S1':
197 elif version == 'S1':
198 return streamclone.streamcloneapplier(fh)
198 return streamclone.streamcloneapplier(fh)
199 else:
199 else:
200 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
200 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
201
201
202 def getbundlespec(ui, fh):
202 def getbundlespec(ui, fh):
203 """Infer the bundlespec from a bundle file handle.
203 """Infer the bundlespec from a bundle file handle.
204
204
205 The input file handle is seeked and the original seek position is not
205 The input file handle is seeked and the original seek position is not
206 restored.
206 restored.
207 """
207 """
208 def speccompression(alg):
208 def speccompression(alg):
209 try:
209 try:
210 return util.compengines.forbundletype(alg).bundletype()[0]
210 return util.compengines.forbundletype(alg).bundletype()[0]
211 except KeyError:
211 except KeyError:
212 return None
212 return None
213
213
214 b = readbundle(ui, fh, None)
214 b = readbundle(ui, fh, None)
215 if isinstance(b, changegroup.cg1unpacker):
215 if isinstance(b, changegroup.cg1unpacker):
216 alg = b._type
216 alg = b._type
217 if alg == '_truncatedBZ':
217 if alg == '_truncatedBZ':
218 alg = 'BZ'
218 alg = 'BZ'
219 comp = speccompression(alg)
219 comp = speccompression(alg)
220 if not comp:
220 if not comp:
221 raise error.Abort(_('unknown compression algorithm: %s') % alg)
221 raise error.Abort(_('unknown compression algorithm: %s') % alg)
222 return '%s-v1' % comp
222 return '%s-v1' % comp
223 elif isinstance(b, bundle2.unbundle20):
223 elif isinstance(b, bundle2.unbundle20):
224 if 'Compression' in b.params:
224 if 'Compression' in b.params:
225 comp = speccompression(b.params['Compression'])
225 comp = speccompression(b.params['Compression'])
226 if not comp:
226 if not comp:
227 raise error.Abort(_('unknown compression algorithm: %s') % comp)
227 raise error.Abort(_('unknown compression algorithm: %s') % comp)
228 else:
228 else:
229 comp = 'none'
229 comp = 'none'
230
230
231 version = None
231 version = None
232 for part in b.iterparts():
232 for part in b.iterparts():
233 if part.type == 'changegroup':
233 if part.type == 'changegroup':
234 version = part.params['version']
234 version = part.params['version']
235 if version in ('01', '02'):
235 if version in ('01', '02'):
236 version = 'v2'
236 version = 'v2'
237 else:
237 else:
238 raise error.Abort(_('changegroup version %s does not have '
238 raise error.Abort(_('changegroup version %s does not have '
239 'a known bundlespec') % version,
239 'a known bundlespec') % version,
240 hint=_('try upgrading your Mercurial '
240 hint=_('try upgrading your Mercurial '
241 'client'))
241 'client'))
242
242
243 if not version:
243 if not version:
244 raise error.Abort(_('could not identify changegroup version in '
244 raise error.Abort(_('could not identify changegroup version in '
245 'bundle'))
245 'bundle'))
246
246
247 return '%s-%s' % (comp, version)
247 return '%s-%s' % (comp, version)
248 elif isinstance(b, streamclone.streamcloneapplier):
248 elif isinstance(b, streamclone.streamcloneapplier):
249 requirements = streamclone.readbundle1header(fh)[2]
249 requirements = streamclone.readbundle1header(fh)[2]
250 params = 'requirements=%s' % ','.join(sorted(requirements))
250 params = 'requirements=%s' % ','.join(sorted(requirements))
251 return 'none-packed1;%s' % urlreq.quote(params)
251 return 'none-packed1;%s' % urlreq.quote(params)
252 else:
252 else:
253 raise error.Abort(_('unknown bundle type: %s') % b)
253 raise error.Abort(_('unknown bundle type: %s') % b)
254
254
255 def _computeoutgoing(repo, heads, common):
255 def _computeoutgoing(repo, heads, common):
256 """Computes which revs are outgoing given a set of common
256 """Computes which revs are outgoing given a set of common
257 and a set of heads.
257 and a set of heads.
258
258
259 This is a separate function so extensions can have access to
259 This is a separate function so extensions can have access to
260 the logic.
260 the logic.
261
261
262 Returns a discovery.outgoing object.
262 Returns a discovery.outgoing object.
263 """
263 """
264 cl = repo.changelog
264 cl = repo.changelog
265 if common:
265 if common:
266 hasnode = cl.hasnode
266 hasnode = cl.hasnode
267 common = [n for n in common if hasnode(n)]
267 common = [n for n in common if hasnode(n)]
268 else:
268 else:
269 common = [nullid]
269 common = [nullid]
270 if not heads:
270 if not heads:
271 heads = cl.heads()
271 heads = cl.heads()
272 return discovery.outgoing(repo, common, heads)
272 return discovery.outgoing(repo, common, heads)
273
273
274 def _forcebundle1(op):
274 def _forcebundle1(op):
275 """return true if a pull/push must use bundle1
275 """return true if a pull/push must use bundle1
276
276
277 This function is used to allow testing of the older bundle version"""
277 This function is used to allow testing of the older bundle version"""
278 ui = op.repo.ui
278 ui = op.repo.ui
279 forcebundle1 = False
279 forcebundle1 = False
280 # The goal is this config is to allow developer to choose the bundle
280 # The goal is this config is to allow developer to choose the bundle
281 # version used during exchanged. This is especially handy during test.
281 # version used during exchanged. This is especially handy during test.
282 # Value is a list of bundle version to be picked from, highest version
282 # Value is a list of bundle version to be picked from, highest version
283 # should be used.
283 # should be used.
284 #
284 #
285 # developer config: devel.legacy.exchange
285 # developer config: devel.legacy.exchange
286 exchange = ui.configlist('devel', 'legacy.exchange')
286 exchange = ui.configlist('devel', 'legacy.exchange')
287 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
287 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
288 return forcebundle1 or not op.remote.capable('bundle2')
288 return forcebundle1 or not op.remote.capable('bundle2')
289
289
290 class pushoperation(object):
290 class pushoperation(object):
291 """A object that represent a single push operation
291 """A object that represent a single push operation
292
292
293 Its purpose is to carry push related state and very common operations.
293 Its purpose is to carry push related state and very common operations.
294
294
295 A new pushoperation should be created at the beginning of each push and
295 A new pushoperation should be created at the beginning of each push and
296 discarded afterward.
296 discarded afterward.
297 """
297 """
298
298
299 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
299 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
300 bookmarks=(), pushvars=None):
300 bookmarks=(), pushvars=None):
301 # repo we push from
301 # repo we push from
302 self.repo = repo
302 self.repo = repo
303 self.ui = repo.ui
303 self.ui = repo.ui
304 # repo we push to
304 # repo we push to
305 self.remote = remote
305 self.remote = remote
306 # force option provided
306 # force option provided
307 self.force = force
307 self.force = force
308 # revs to be pushed (None is "all")
308 # revs to be pushed (None is "all")
309 self.revs = revs
309 self.revs = revs
310 # bookmark explicitly pushed
310 # bookmark explicitly pushed
311 self.bookmarks = bookmarks
311 self.bookmarks = bookmarks
312 # allow push of new branch
312 # allow push of new branch
313 self.newbranch = newbranch
313 self.newbranch = newbranch
314 # step already performed
314 # step already performed
315 # (used to check what steps have been already performed through bundle2)
315 # (used to check what steps have been already performed through bundle2)
316 self.stepsdone = set()
316 self.stepsdone = set()
317 # Integer version of the changegroup push result
317 # Integer version of the changegroup push result
318 # - None means nothing to push
318 # - None means nothing to push
319 # - 0 means HTTP error
319 # - 0 means HTTP error
320 # - 1 means we pushed and remote head count is unchanged *or*
320 # - 1 means we pushed and remote head count is unchanged *or*
321 # we have outgoing changesets but refused to push
321 # we have outgoing changesets but refused to push
322 # - other values as described by addchangegroup()
322 # - other values as described by addchangegroup()
323 self.cgresult = None
323 self.cgresult = None
324 # Boolean value for the bookmark push
324 # Boolean value for the bookmark push
325 self.bkresult = None
325 self.bkresult = None
326 # discover.outgoing object (contains common and outgoing data)
326 # discover.outgoing object (contains common and outgoing data)
327 self.outgoing = None
327 self.outgoing = None
328 # all remote topological heads before the push
328 # all remote topological heads before the push
329 self.remoteheads = None
329 self.remoteheads = None
330 # Details of the remote branch pre and post push
330 # Details of the remote branch pre and post push
331 #
331 #
332 # mapping: {'branch': ([remoteheads],
332 # mapping: {'branch': ([remoteheads],
333 # [newheads],
333 # [newheads],
334 # [unsyncedheads],
334 # [unsyncedheads],
335 # [discardedheads])}
335 # [discardedheads])}
336 # - branch: the branch name
336 # - branch: the branch name
337 # - remoteheads: the list of remote heads known locally
337 # - remoteheads: the list of remote heads known locally
338 # None if the branch is new
338 # None if the branch is new
339 # - newheads: the new remote heads (known locally) with outgoing pushed
339 # - newheads: the new remote heads (known locally) with outgoing pushed
340 # - unsyncedheads: the list of remote heads unknown locally.
340 # - unsyncedheads: the list of remote heads unknown locally.
341 # - discardedheads: the list of remote heads made obsolete by the push
341 # - discardedheads: the list of remote heads made obsolete by the push
342 self.pushbranchmap = None
342 self.pushbranchmap = None
343 # testable as a boolean indicating if any nodes are missing locally.
343 # testable as a boolean indicating if any nodes are missing locally.
344 self.incoming = None
344 self.incoming = None
345 # summary of the remote phase situation
345 # summary of the remote phase situation
346 self.remotephases = None
346 self.remotephases = None
347 # phases changes that must be pushed along side the changesets
347 # phases changes that must be pushed along side the changesets
348 self.outdatedphases = None
348 self.outdatedphases = None
349 # phases changes that must be pushed if changeset push fails
349 # phases changes that must be pushed if changeset push fails
350 self.fallbackoutdatedphases = None
350 self.fallbackoutdatedphases = None
351 # outgoing obsmarkers
351 # outgoing obsmarkers
352 self.outobsmarkers = set()
352 self.outobsmarkers = set()
353 # outgoing bookmarks
353 # outgoing bookmarks
354 self.outbookmarks = []
354 self.outbookmarks = []
355 # transaction manager
355 # transaction manager
356 self.trmanager = None
356 self.trmanager = None
357 # map { pushkey partid -> callback handling failure}
357 # map { pushkey partid -> callback handling failure}
358 # used to handle exception from mandatory pushkey part failure
358 # used to handle exception from mandatory pushkey part failure
359 self.pkfailcb = {}
359 self.pkfailcb = {}
360 # an iterable of pushvars or None
360 # an iterable of pushvars or None
361 self.pushvars = pushvars
361 self.pushvars = pushvars
362
362
363 @util.propertycache
363 @util.propertycache
364 def futureheads(self):
364 def futureheads(self):
365 """future remote heads if the changeset push succeeds"""
365 """future remote heads if the changeset push succeeds"""
366 return self.outgoing.missingheads
366 return self.outgoing.missingheads
367
367
368 @util.propertycache
368 @util.propertycache
369 def fallbackheads(self):
369 def fallbackheads(self):
370 """future remote heads if the changeset push fails"""
370 """future remote heads if the changeset push fails"""
371 if self.revs is None:
371 if self.revs is None:
372 # not target to push, all common are relevant
372 # not target to push, all common are relevant
373 return self.outgoing.commonheads
373 return self.outgoing.commonheads
374 unfi = self.repo.unfiltered()
374 unfi = self.repo.unfiltered()
375 # I want cheads = heads(::missingheads and ::commonheads)
375 # I want cheads = heads(::missingheads and ::commonheads)
376 # (missingheads is revs with secret changeset filtered out)
376 # (missingheads is revs with secret changeset filtered out)
377 #
377 #
378 # This can be expressed as:
378 # This can be expressed as:
379 # cheads = ( (missingheads and ::commonheads)
379 # cheads = ( (missingheads and ::commonheads)
380 # + (commonheads and ::missingheads))"
380 # + (commonheads and ::missingheads))"
381 # )
381 # )
382 #
382 #
383 # while trying to push we already computed the following:
383 # while trying to push we already computed the following:
384 # common = (::commonheads)
384 # common = (::commonheads)
385 # missing = ((commonheads::missingheads) - commonheads)
385 # missing = ((commonheads::missingheads) - commonheads)
386 #
386 #
387 # We can pick:
387 # We can pick:
388 # * missingheads part of common (::commonheads)
388 # * missingheads part of common (::commonheads)
389 common = self.outgoing.common
389 common = self.outgoing.common
390 nm = self.repo.changelog.nodemap
390 nm = self.repo.changelog.nodemap
391 cheads = [node for node in self.revs if nm[node] in common]
391 cheads = [node for node in self.revs if nm[node] in common]
392 # and
392 # and
393 # * commonheads parents on missing
393 # * commonheads parents on missing
394 revset = unfi.set('%ln and parents(roots(%ln))',
394 revset = unfi.set('%ln and parents(roots(%ln))',
395 self.outgoing.commonheads,
395 self.outgoing.commonheads,
396 self.outgoing.missing)
396 self.outgoing.missing)
397 cheads.extend(c.node() for c in revset)
397 cheads.extend(c.node() for c in revset)
398 return cheads
398 return cheads
399
399
400 @property
400 @property
401 def commonheads(self):
401 def commonheads(self):
402 """set of all common heads after changeset bundle push"""
402 """set of all common heads after changeset bundle push"""
403 if self.cgresult:
403 if self.cgresult:
404 return self.futureheads
404 return self.futureheads
405 else:
405 else:
406 return self.fallbackheads
406 return self.fallbackheads
407
407
408 # mapping of message used when pushing bookmark
408 # mapping of message used when pushing bookmark
409 bookmsgmap = {'update': (_("updating bookmark %s\n"),
409 bookmsgmap = {'update': (_("updating bookmark %s\n"),
410 _('updating bookmark %s failed!\n')),
410 _('updating bookmark %s failed!\n')),
411 'export': (_("exporting bookmark %s\n"),
411 'export': (_("exporting bookmark %s\n"),
412 _('exporting bookmark %s failed!\n')),
412 _('exporting bookmark %s failed!\n')),
413 'delete': (_("deleting remote bookmark %s\n"),
413 'delete': (_("deleting remote bookmark %s\n"),
414 _('deleting remote bookmark %s failed!\n')),
414 _('deleting remote bookmark %s failed!\n')),
415 }
415 }
416
416
417
417
418 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
418 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
419 opargs=None):
419 opargs=None):
420 '''Push outgoing changesets (limited by revs) from a local
420 '''Push outgoing changesets (limited by revs) from a local
421 repository to remote. Return an integer:
421 repository to remote. Return an integer:
422 - None means nothing to push
422 - None means nothing to push
423 - 0 means HTTP error
423 - 0 means HTTP error
424 - 1 means we pushed and remote head count is unchanged *or*
424 - 1 means we pushed and remote head count is unchanged *or*
425 we have outgoing changesets but refused to push
425 we have outgoing changesets but refused to push
426 - other values as described by addchangegroup()
426 - other values as described by addchangegroup()
427 '''
427 '''
428 if opargs is None:
428 if opargs is None:
429 opargs = {}
429 opargs = {}
430 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
430 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
431 **pycompat.strkwargs(opargs))
431 **pycompat.strkwargs(opargs))
432 if pushop.remote.local():
432 if pushop.remote.local():
433 missing = (set(pushop.repo.requirements)
433 missing = (set(pushop.repo.requirements)
434 - pushop.remote.local().supported)
434 - pushop.remote.local().supported)
435 if missing:
435 if missing:
436 msg = _("required features are not"
436 msg = _("required features are not"
437 " supported in the destination:"
437 " supported in the destination:"
438 " %s") % (', '.join(sorted(missing)))
438 " %s") % (', '.join(sorted(missing)))
439 raise error.Abort(msg)
439 raise error.Abort(msg)
440
440
441 if not pushop.remote.canpush():
441 if not pushop.remote.canpush():
442 raise error.Abort(_("destination does not support push"))
442 raise error.Abort(_("destination does not support push"))
443
443
444 if not pushop.remote.capable('unbundle'):
444 if not pushop.remote.capable('unbundle'):
445 raise error.Abort(_('cannot push: destination does not support the '
445 raise error.Abort(_('cannot push: destination does not support the '
446 'unbundle wire protocol command'))
446 'unbundle wire protocol command'))
447
447
448 # get lock as we might write phase data
448 # get lock as we might write phase data
449 wlock = lock = None
449 wlock = lock = None
450 try:
450 try:
451 # bundle2 push may receive a reply bundle touching bookmarks or other
451 # bundle2 push may receive a reply bundle touching bookmarks or other
452 # things requiring the wlock. Take it now to ensure proper ordering.
452 # things requiring the wlock. Take it now to ensure proper ordering.
453 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
453 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
454 if (not _forcebundle1(pushop)) and maypushback:
454 if (not _forcebundle1(pushop)) and maypushback:
455 wlock = pushop.repo.wlock()
455 wlock = pushop.repo.wlock()
456 lock = pushop.repo.lock()
456 lock = pushop.repo.lock()
457 pushop.trmanager = transactionmanager(pushop.repo,
457 pushop.trmanager = transactionmanager(pushop.repo,
458 'push-response',
458 'push-response',
459 pushop.remote.url())
459 pushop.remote.url())
460 except IOError as err:
460 except IOError as err:
461 if err.errno != errno.EACCES:
461 if err.errno != errno.EACCES:
462 raise
462 raise
463 # source repo cannot be locked.
463 # source repo cannot be locked.
464 # We do not abort the push, but just disable the local phase
464 # We do not abort the push, but just disable the local phase
465 # synchronisation.
465 # synchronisation.
466 msg = 'cannot lock source repository: %s\n' % err
466 msg = 'cannot lock source repository: %s\n' % err
467 pushop.ui.debug(msg)
467 pushop.ui.debug(msg)
468
468
469 with wlock or util.nullcontextmanager(), \
469 with wlock or util.nullcontextmanager(), \
470 lock or util.nullcontextmanager(), \
470 lock or util.nullcontextmanager(), \
471 pushop.trmanager or util.nullcontextmanager():
471 pushop.trmanager or util.nullcontextmanager():
472 pushop.repo.checkpush(pushop)
472 pushop.repo.checkpush(pushop)
473 _pushdiscovery(pushop)
473 _pushdiscovery(pushop)
474 if not _forcebundle1(pushop):
474 if not _forcebundle1(pushop):
475 _pushbundle2(pushop)
475 _pushbundle2(pushop)
476 _pushchangeset(pushop)
476 _pushchangeset(pushop)
477 _pushsyncphase(pushop)
477 _pushsyncphase(pushop)
478 _pushobsolete(pushop)
478 _pushobsolete(pushop)
479 _pushbookmark(pushop)
479 _pushbookmark(pushop)
480
480
481 return pushop
481 return pushop
482
482
483 # list of steps to perform discovery before push
483 # list of steps to perform discovery before push
484 pushdiscoveryorder = []
484 pushdiscoveryorder = []
485
485
486 # Mapping between step name and function
486 # Mapping between step name and function
487 #
487 #
488 # This exists to help extensions wrap steps if necessary
488 # This exists to help extensions wrap steps if necessary
489 pushdiscoverymapping = {}
489 pushdiscoverymapping = {}
490
490
491 def pushdiscovery(stepname):
491 def pushdiscovery(stepname):
492 """decorator for function performing discovery before push
492 """decorator for function performing discovery before push
493
493
494 The function is added to the step -> function mapping and appended to the
494 The function is added to the step -> function mapping and appended to the
495 list of steps. Beware that decorated function will be added in order (this
495 list of steps. Beware that decorated function will be added in order (this
496 may matter).
496 may matter).
497
497
498 You can only use this decorator for a new step, if you want to wrap a step
498 You can only use this decorator for a new step, if you want to wrap a step
499 from an extension, change the pushdiscovery dictionary directly."""
499 from an extension, change the pushdiscovery dictionary directly."""
500 def dec(func):
500 def dec(func):
501 assert stepname not in pushdiscoverymapping
501 assert stepname not in pushdiscoverymapping
502 pushdiscoverymapping[stepname] = func
502 pushdiscoverymapping[stepname] = func
503 pushdiscoveryorder.append(stepname)
503 pushdiscoveryorder.append(stepname)
504 return func
504 return func
505 return dec
505 return dec
506
506
507 def _pushdiscovery(pushop):
507 def _pushdiscovery(pushop):
508 """Run all discovery steps"""
508 """Run all discovery steps"""
509 for stepname in pushdiscoveryorder:
509 for stepname in pushdiscoveryorder:
510 step = pushdiscoverymapping[stepname]
510 step = pushdiscoverymapping[stepname]
511 step(pushop)
511 step(pushop)
512
512
513 @pushdiscovery('changeset')
513 @pushdiscovery('changeset')
514 def _pushdiscoverychangeset(pushop):
514 def _pushdiscoverychangeset(pushop):
515 """discover the changeset that need to be pushed"""
515 """discover the changeset that need to be pushed"""
516 fci = discovery.findcommonincoming
516 fci = discovery.findcommonincoming
517 if pushop.revs:
517 if pushop.revs:
518 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
518 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
519 ancestorsof=pushop.revs)
519 ancestorsof=pushop.revs)
520 else:
520 else:
521 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
521 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
522 common, inc, remoteheads = commoninc
522 common, inc, remoteheads = commoninc
523 fco = discovery.findcommonoutgoing
523 fco = discovery.findcommonoutgoing
524 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
524 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
525 commoninc=commoninc, force=pushop.force)
525 commoninc=commoninc, force=pushop.force)
526 pushop.outgoing = outgoing
526 pushop.outgoing = outgoing
527 pushop.remoteheads = remoteheads
527 pushop.remoteheads = remoteheads
528 pushop.incoming = inc
528 pushop.incoming = inc
529
529
530 @pushdiscovery('phase')
530 @pushdiscovery('phase')
531 def _pushdiscoveryphase(pushop):
531 def _pushdiscoveryphase(pushop):
532 """discover the phase that needs to be pushed
532 """discover the phase that needs to be pushed
533
533
534 (computed for both success and failure case for changesets push)"""
534 (computed for both success and failure case for changesets push)"""
535 outgoing = pushop.outgoing
535 outgoing = pushop.outgoing
536 unfi = pushop.repo.unfiltered()
536 unfi = pushop.repo.unfiltered()
537 remotephases = pushop.remote.listkeys('phases')
537 remotephases = pushop.remote.listkeys('phases')
538 if (pushop.ui.configbool('ui', '_usedassubrepo')
538 if (pushop.ui.configbool('ui', '_usedassubrepo')
539 and remotephases # server supports phases
539 and remotephases # server supports phases
540 and not pushop.outgoing.missing # no changesets to be pushed
540 and not pushop.outgoing.missing # no changesets to be pushed
541 and remotephases.get('publishing', False)):
541 and remotephases.get('publishing', False)):
542 # When:
542 # When:
543 # - this is a subrepo push
543 # - this is a subrepo push
544 # - and remote support phase
544 # - and remote support phase
545 # - and no changeset are to be pushed
545 # - and no changeset are to be pushed
546 # - and remote is publishing
546 # - and remote is publishing
547 # We may be in issue 3781 case!
547 # We may be in issue 3781 case!
548 # We drop the possible phase synchronisation done by
548 # We drop the possible phase synchronisation done by
549 # courtesy to publish changesets possibly locally draft
549 # courtesy to publish changesets possibly locally draft
550 # on the remote.
550 # on the remote.
551 pushop.outdatedphases = []
551 pushop.outdatedphases = []
552 pushop.fallbackoutdatedphases = []
552 pushop.fallbackoutdatedphases = []
553 return
553 return
554
554
555 pushop.remotephases = phases.remotephasessummary(pushop.repo,
555 pushop.remotephases = phases.remotephasessummary(pushop.repo,
556 pushop.fallbackheads,
556 pushop.fallbackheads,
557 remotephases)
557 remotephases)
558 droots = pushop.remotephases.draftroots
558 droots = pushop.remotephases.draftroots
559
559
560 extracond = ''
560 extracond = ''
561 if not pushop.remotephases.publishing:
561 if not pushop.remotephases.publishing:
562 extracond = ' and public()'
562 extracond = ' and public()'
563 revset = 'heads((%%ln::%%ln) %s)' % extracond
563 revset = 'heads((%%ln::%%ln) %s)' % extracond
564 # Get the list of all revs draft on remote by public here.
564 # Get the list of all revs draft on remote by public here.
565 # XXX Beware that revset break if droots is not strictly
565 # XXX Beware that revset break if droots is not strictly
566 # XXX root we may want to ensure it is but it is costly
566 # XXX root we may want to ensure it is but it is costly
567 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
567 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
568 if not outgoing.missing:
568 if not outgoing.missing:
569 future = fallback
569 future = fallback
570 else:
570 else:
571 # adds changeset we are going to push as draft
571 # adds changeset we are going to push as draft
572 #
572 #
573 # should not be necessary for publishing server, but because of an
573 # should not be necessary for publishing server, but because of an
574 # issue fixed in xxxxx we have to do it anyway.
574 # issue fixed in xxxxx we have to do it anyway.
575 fdroots = list(unfi.set('roots(%ln + %ln::)',
575 fdroots = list(unfi.set('roots(%ln + %ln::)',
576 outgoing.missing, droots))
576 outgoing.missing, droots))
577 fdroots = [f.node() for f in fdroots]
577 fdroots = [f.node() for f in fdroots]
578 future = list(unfi.set(revset, fdroots, pushop.futureheads))
578 future = list(unfi.set(revset, fdroots, pushop.futureheads))
579 pushop.outdatedphases = future
579 pushop.outdatedphases = future
580 pushop.fallbackoutdatedphases = fallback
580 pushop.fallbackoutdatedphases = fallback
581
581
582 @pushdiscovery('obsmarker')
582 @pushdiscovery('obsmarker')
583 def _pushdiscoveryobsmarkers(pushop):
583 def _pushdiscoveryobsmarkers(pushop):
584 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
584 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
585 and pushop.repo.obsstore
585 and pushop.repo.obsstore
586 and 'obsolete' in pushop.remote.listkeys('namespaces')):
586 and 'obsolete' in pushop.remote.listkeys('namespaces')):
587 repo = pushop.repo
587 repo = pushop.repo
588 # very naive computation, that can be quite expensive on big repo.
588 # very naive computation, that can be quite expensive on big repo.
589 # However: evolution is currently slow on them anyway.
589 # However: evolution is currently slow on them anyway.
590 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
590 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
591 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
591 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
592
592
593 @pushdiscovery('bookmarks')
593 @pushdiscovery('bookmarks')
594 def _pushdiscoverybookmarks(pushop):
594 def _pushdiscoverybookmarks(pushop):
595 ui = pushop.ui
595 ui = pushop.ui
596 repo = pushop.repo.unfiltered()
596 repo = pushop.repo.unfiltered()
597 remote = pushop.remote
597 remote = pushop.remote
598 ui.debug("checking for updated bookmarks\n")
598 ui.debug("checking for updated bookmarks\n")
599 ancestors = ()
599 ancestors = ()
600 if pushop.revs:
600 if pushop.revs:
601 revnums = map(repo.changelog.rev, pushop.revs)
601 revnums = map(repo.changelog.rev, pushop.revs)
602 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
602 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
603 remotebookmark = remote.listkeys('bookmarks')
603 remotebookmark = remote.listkeys('bookmarks')
604
604
605 explicit = set([repo._bookmarks.expandname(bookmark)
605 explicit = set([repo._bookmarks.expandname(bookmark)
606 for bookmark in pushop.bookmarks])
606 for bookmark in pushop.bookmarks])
607
607
608 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
608 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
609 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
609 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
610
610
611 def safehex(x):
611 def safehex(x):
612 if x is None:
612 if x is None:
613 return x
613 return x
614 return hex(x)
614 return hex(x)
615
615
616 def hexifycompbookmarks(bookmarks):
616 def hexifycompbookmarks(bookmarks):
617 for b, scid, dcid in bookmarks:
617 for b, scid, dcid in bookmarks:
618 yield b, safehex(scid), safehex(dcid)
618 yield b, safehex(scid), safehex(dcid)
619
619
620 comp = [hexifycompbookmarks(marks) for marks in comp]
620 comp = [hexifycompbookmarks(marks) for marks in comp]
621 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
621 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
622
622
623 for b, scid, dcid in advsrc:
623 for b, scid, dcid in advsrc:
624 if b in explicit:
624 if b in explicit:
625 explicit.remove(b)
625 explicit.remove(b)
626 if not ancestors or repo[scid].rev() in ancestors:
626 if not ancestors or repo[scid].rev() in ancestors:
627 pushop.outbookmarks.append((b, dcid, scid))
627 pushop.outbookmarks.append((b, dcid, scid))
628 # search added bookmark
628 # search added bookmark
629 for b, scid, dcid in addsrc:
629 for b, scid, dcid in addsrc:
630 if b in explicit:
630 if b in explicit:
631 explicit.remove(b)
631 explicit.remove(b)
632 pushop.outbookmarks.append((b, '', scid))
632 pushop.outbookmarks.append((b, '', scid))
633 # search for overwritten bookmark
633 # search for overwritten bookmark
634 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
634 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
635 if b in explicit:
635 if b in explicit:
636 explicit.remove(b)
636 explicit.remove(b)
637 pushop.outbookmarks.append((b, dcid, scid))
637 pushop.outbookmarks.append((b, dcid, scid))
638 # search for bookmark to delete
638 # search for bookmark to delete
639 for b, scid, dcid in adddst:
639 for b, scid, dcid in adddst:
640 if b in explicit:
640 if b in explicit:
641 explicit.remove(b)
641 explicit.remove(b)
642 # treat as "deleted locally"
642 # treat as "deleted locally"
643 pushop.outbookmarks.append((b, dcid, ''))
643 pushop.outbookmarks.append((b, dcid, ''))
644 # identical bookmarks shouldn't get reported
644 # identical bookmarks shouldn't get reported
645 for b, scid, dcid in same:
645 for b, scid, dcid in same:
646 if b in explicit:
646 if b in explicit:
647 explicit.remove(b)
647 explicit.remove(b)
648
648
649 if explicit:
649 if explicit:
650 explicit = sorted(explicit)
650 explicit = sorted(explicit)
651 # we should probably list all of them
651 # we should probably list all of them
652 ui.warn(_('bookmark %s does not exist on the local '
652 ui.warn(_('bookmark %s does not exist on the local '
653 'or remote repository!\n') % explicit[0])
653 'or remote repository!\n') % explicit[0])
654 pushop.bkresult = 2
654 pushop.bkresult = 2
655
655
656 pushop.outbookmarks.sort()
656 pushop.outbookmarks.sort()
657
657
658 def _pushcheckoutgoing(pushop):
658 def _pushcheckoutgoing(pushop):
659 outgoing = pushop.outgoing
659 outgoing = pushop.outgoing
660 unfi = pushop.repo.unfiltered()
660 unfi = pushop.repo.unfiltered()
661 if not outgoing.missing:
661 if not outgoing.missing:
662 # nothing to push
662 # nothing to push
663 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
663 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
664 return False
664 return False
665 # something to push
665 # something to push
666 if not pushop.force:
666 if not pushop.force:
667 # if repo.obsstore == False --> no obsolete
667 # if repo.obsstore == False --> no obsolete
668 # then, save the iteration
668 # then, save the iteration
669 if unfi.obsstore:
669 if unfi.obsstore:
670 # this message are here for 80 char limit reason
670 # this message are here for 80 char limit reason
671 mso = _("push includes obsolete changeset: %s!")
671 mso = _("push includes obsolete changeset: %s!")
672 mspd = _("push includes phase-divergent changeset: %s!")
672 mspd = _("push includes phase-divergent changeset: %s!")
673 mscd = _("push includes content-divergent changeset: %s!")
673 mscd = _("push includes content-divergent changeset: %s!")
674 mst = {"orphan": _("push includes orphan changeset: %s!"),
674 mst = {"orphan": _("push includes orphan changeset: %s!"),
675 "phase-divergent": mspd,
675 "phase-divergent": mspd,
676 "content-divergent": mscd}
676 "content-divergent": mscd}
677 # If we are to push if there is at least one
677 # If we are to push if there is at least one
678 # obsolete or unstable changeset in missing, at
678 # obsolete or unstable changeset in missing, at
679 # least one of the missinghead will be obsolete or
679 # least one of the missinghead will be obsolete or
680 # unstable. So checking heads only is ok
680 # unstable. So checking heads only is ok
681 for node in outgoing.missingheads:
681 for node in outgoing.missingheads:
682 ctx = unfi[node]
682 ctx = unfi[node]
683 if ctx.obsolete():
683 if ctx.obsolete():
684 raise error.Abort(mso % ctx)
684 raise error.Abort(mso % ctx)
685 elif ctx.isunstable():
685 elif ctx.isunstable():
686 # TODO print more than one instability in the abort
686 # TODO print more than one instability in the abort
687 # message
687 # message
688 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
688 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
689
689
690 discovery.checkheads(pushop)
690 discovery.checkheads(pushop)
691 return True
691 return True
692
692
693 # List of names of steps to perform for an outgoing bundle2, order matters.
693 # List of names of steps to perform for an outgoing bundle2, order matters.
694 b2partsgenorder = []
694 b2partsgenorder = []
695
695
696 # Mapping between step name and function
696 # Mapping between step name and function
697 #
697 #
698 # This exists to help extensions wrap steps if necessary
698 # This exists to help extensions wrap steps if necessary
699 b2partsgenmapping = {}
699 b2partsgenmapping = {}
700
700
701 def b2partsgenerator(stepname, idx=None):
701 def b2partsgenerator(stepname, idx=None):
702 """decorator for function generating bundle2 part
702 """decorator for function generating bundle2 part
703
703
704 The function is added to the step -> function mapping and appended to the
704 The function is added to the step -> function mapping and appended to the
705 list of steps. Beware that decorated functions will be added in order
705 list of steps. Beware that decorated functions will be added in order
706 (this may matter).
706 (this may matter).
707
707
708 You can only use this decorator for new steps, if you want to wrap a step
708 You can only use this decorator for new steps, if you want to wrap a step
709 from an extension, attack the b2partsgenmapping dictionary directly."""
709 from an extension, attack the b2partsgenmapping dictionary directly."""
710 def dec(func):
710 def dec(func):
711 assert stepname not in b2partsgenmapping
711 assert stepname not in b2partsgenmapping
712 b2partsgenmapping[stepname] = func
712 b2partsgenmapping[stepname] = func
713 if idx is None:
713 if idx is None:
714 b2partsgenorder.append(stepname)
714 b2partsgenorder.append(stepname)
715 else:
715 else:
716 b2partsgenorder.insert(idx, stepname)
716 b2partsgenorder.insert(idx, stepname)
717 return func
717 return func
718 return dec
718 return dec
719
719
720 def _pushb2ctxcheckheads(pushop, bundler):
720 def _pushb2ctxcheckheads(pushop, bundler):
721 """Generate race condition checking parts
721 """Generate race condition checking parts
722
722
723 Exists as an independent function to aid extensions
723 Exists as an independent function to aid extensions
724 """
724 """
725 # * 'force' do not check for push race,
725 # * 'force' do not check for push race,
726 # * if we don't push anything, there are nothing to check.
726 # * if we don't push anything, there are nothing to check.
727 if not pushop.force and pushop.outgoing.missingheads:
727 if not pushop.force and pushop.outgoing.missingheads:
728 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
728 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
729 emptyremote = pushop.pushbranchmap is None
729 emptyremote = pushop.pushbranchmap is None
730 if not allowunrelated or emptyremote:
730 if not allowunrelated or emptyremote:
731 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
731 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
732 else:
732 else:
733 affected = set()
733 affected = set()
734 for branch, heads in pushop.pushbranchmap.iteritems():
734 for branch, heads in pushop.pushbranchmap.iteritems():
735 remoteheads, newheads, unsyncedheads, discardedheads = heads
735 remoteheads, newheads, unsyncedheads, discardedheads = heads
736 if remoteheads is not None:
736 if remoteheads is not None:
737 remote = set(remoteheads)
737 remote = set(remoteheads)
738 affected |= set(discardedheads) & remote
738 affected |= set(discardedheads) & remote
739 affected |= remote - set(newheads)
739 affected |= remote - set(newheads)
740 if affected:
740 if affected:
741 data = iter(sorted(affected))
741 data = iter(sorted(affected))
742 bundler.newpart('check:updated-heads', data=data)
742 bundler.newpart('check:updated-heads', data=data)
743
743
744 def _pushing(pushop):
744 def _pushing(pushop):
745 """return True if we are pushing anything"""
745 """return True if we are pushing anything"""
746 return bool(pushop.outgoing.missing
746 return bool(pushop.outgoing.missing
747 or pushop.outdatedphases
747 or pushop.outdatedphases
748 or pushop.outobsmarkers
748 or pushop.outobsmarkers
749 or pushop.outbookmarks)
749 or pushop.outbookmarks)
750
750
751 @b2partsgenerator('check-bookmarks')
751 @b2partsgenerator('check-bookmarks')
752 def _pushb2checkbookmarks(pushop, bundler):
752 def _pushb2checkbookmarks(pushop, bundler):
753 """insert bookmark move checking"""
753 """insert bookmark move checking"""
754 if not _pushing(pushop) or pushop.force:
754 if not _pushing(pushop) or pushop.force:
755 return
755 return
756 b2caps = bundle2.bundle2caps(pushop.remote)
756 b2caps = bundle2.bundle2caps(pushop.remote)
757 hasbookmarkcheck = 'bookmarks' in b2caps
757 hasbookmarkcheck = 'bookmarks' in b2caps
758 if not (pushop.outbookmarks and hasbookmarkcheck):
758 if not (pushop.outbookmarks and hasbookmarkcheck):
759 return
759 return
760 data = []
760 data = []
761 for book, old, new in pushop.outbookmarks:
761 for book, old, new in pushop.outbookmarks:
762 old = bin(old)
762 old = bin(old)
763 data.append((book, old))
763 data.append((book, old))
764 checkdata = bookmod.binaryencode(data)
764 checkdata = bookmod.binaryencode(data)
765 bundler.newpart('check:bookmarks', data=checkdata)
765 bundler.newpart('check:bookmarks', data=checkdata)
766
766
767 @b2partsgenerator('check-phases')
767 @b2partsgenerator('check-phases')
768 def _pushb2checkphases(pushop, bundler):
768 def _pushb2checkphases(pushop, bundler):
769 """insert phase move checking"""
769 """insert phase move checking"""
770 if not _pushing(pushop) or pushop.force:
770 if not _pushing(pushop) or pushop.force:
771 return
771 return
772 b2caps = bundle2.bundle2caps(pushop.remote)
772 b2caps = bundle2.bundle2caps(pushop.remote)
773 hasphaseheads = 'heads' in b2caps.get('phases', ())
773 hasphaseheads = 'heads' in b2caps.get('phases', ())
774 if pushop.remotephases is not None and hasphaseheads:
774 if pushop.remotephases is not None and hasphaseheads:
775 # check that the remote phase has not changed
775 # check that the remote phase has not changed
776 checks = [[] for p in phases.allphases]
776 checks = [[] for p in phases.allphases]
777 checks[phases.public].extend(pushop.remotephases.publicheads)
777 checks[phases.public].extend(pushop.remotephases.publicheads)
778 checks[phases.draft].extend(pushop.remotephases.draftroots)
778 checks[phases.draft].extend(pushop.remotephases.draftroots)
779 if any(checks):
779 if any(checks):
780 for nodes in checks:
780 for nodes in checks:
781 nodes.sort()
781 nodes.sort()
782 checkdata = phases.binaryencode(checks)
782 checkdata = phases.binaryencode(checks)
783 bundler.newpart('check:phases', data=checkdata)
783 bundler.newpart('check:phases', data=checkdata)
784
784
785 @b2partsgenerator('changeset')
785 @b2partsgenerator('changeset')
786 def _pushb2ctx(pushop, bundler):
786 def _pushb2ctx(pushop, bundler):
787 """handle changegroup push through bundle2
787 """handle changegroup push through bundle2
788
788
789 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
789 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
790 """
790 """
791 if 'changesets' in pushop.stepsdone:
791 if 'changesets' in pushop.stepsdone:
792 return
792 return
793 pushop.stepsdone.add('changesets')
793 pushop.stepsdone.add('changesets')
794 # Send known heads to the server for race detection.
794 # Send known heads to the server for race detection.
795 if not _pushcheckoutgoing(pushop):
795 if not _pushcheckoutgoing(pushop):
796 return
796 return
797 pushop.repo.prepushoutgoinghooks(pushop)
797 pushop.repo.prepushoutgoinghooks(pushop)
798
798
799 _pushb2ctxcheckheads(pushop, bundler)
799 _pushb2ctxcheckheads(pushop, bundler)
800
800
801 b2caps = bundle2.bundle2caps(pushop.remote)
801 b2caps = bundle2.bundle2caps(pushop.remote)
802 version = '01'
802 version = '01'
803 cgversions = b2caps.get('changegroup')
803 cgversions = b2caps.get('changegroup')
804 if cgversions: # 3.1 and 3.2 ship with an empty value
804 if cgversions: # 3.1 and 3.2 ship with an empty value
805 cgversions = [v for v in cgversions
805 cgversions = [v for v in cgversions
806 if v in changegroup.supportedoutgoingversions(
806 if v in changegroup.supportedoutgoingversions(
807 pushop.repo)]
807 pushop.repo)]
808 if not cgversions:
808 if not cgversions:
809 raise ValueError(_('no common changegroup version'))
809 raise ValueError(_('no common changegroup version'))
810 version = max(cgversions)
810 version = max(cgversions)
811 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
811 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
812 'push')
812 'push')
813 cgpart = bundler.newpart('changegroup', data=cgstream)
813 cgpart = bundler.newpart('changegroup', data=cgstream)
814 if cgversions:
814 if cgversions:
815 cgpart.addparam('version', version)
815 cgpart.addparam('version', version)
816 if 'treemanifest' in pushop.repo.requirements:
816 if 'treemanifest' in pushop.repo.requirements:
817 cgpart.addparam('treemanifest', '1')
817 cgpart.addparam('treemanifest', '1')
818 def handlereply(op):
818 def handlereply(op):
819 """extract addchangegroup returns from server reply"""
819 """extract addchangegroup returns from server reply"""
820 cgreplies = op.records.getreplies(cgpart.id)
820 cgreplies = op.records.getreplies(cgpart.id)
821 assert len(cgreplies['changegroup']) == 1
821 assert len(cgreplies['changegroup']) == 1
822 pushop.cgresult = cgreplies['changegroup'][0]['return']
822 pushop.cgresult = cgreplies['changegroup'][0]['return']
823 return handlereply
823 return handlereply
824
824
825 @b2partsgenerator('phase')
825 @b2partsgenerator('phase')
826 def _pushb2phases(pushop, bundler):
826 def _pushb2phases(pushop, bundler):
827 """handle phase push through bundle2"""
827 """handle phase push through bundle2"""
828 if 'phases' in pushop.stepsdone:
828 if 'phases' in pushop.stepsdone:
829 return
829 return
830 b2caps = bundle2.bundle2caps(pushop.remote)
830 b2caps = bundle2.bundle2caps(pushop.remote)
831 ui = pushop.repo.ui
831 ui = pushop.repo.ui
832
832
833 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
833 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
834 haspushkey = 'pushkey' in b2caps
834 haspushkey = 'pushkey' in b2caps
835 hasphaseheads = 'heads' in b2caps.get('phases', ())
835 hasphaseheads = 'heads' in b2caps.get('phases', ())
836
836
837 if hasphaseheads and not legacyphase:
837 if hasphaseheads and not legacyphase:
838 return _pushb2phaseheads(pushop, bundler)
838 return _pushb2phaseheads(pushop, bundler)
839 elif haspushkey:
839 elif haspushkey:
840 return _pushb2phasespushkey(pushop, bundler)
840 return _pushb2phasespushkey(pushop, bundler)
841
841
842 def _pushb2phaseheads(pushop, bundler):
842 def _pushb2phaseheads(pushop, bundler):
843 """push phase information through a bundle2 - binary part"""
843 """push phase information through a bundle2 - binary part"""
844 pushop.stepsdone.add('phases')
844 pushop.stepsdone.add('phases')
845 if pushop.outdatedphases:
845 if pushop.outdatedphases:
846 updates = [[] for p in phases.allphases]
846 updates = [[] for p in phases.allphases]
847 updates[0].extend(h.node() for h in pushop.outdatedphases)
847 updates[0].extend(h.node() for h in pushop.outdatedphases)
848 phasedata = phases.binaryencode(updates)
848 phasedata = phases.binaryencode(updates)
849 bundler.newpart('phase-heads', data=phasedata)
849 bundler.newpart('phase-heads', data=phasedata)
850
850
851 def _pushb2phasespushkey(pushop, bundler):
851 def _pushb2phasespushkey(pushop, bundler):
852 """push phase information through a bundle2 - pushkey part"""
852 """push phase information through a bundle2 - pushkey part"""
853 pushop.stepsdone.add('phases')
853 pushop.stepsdone.add('phases')
854 part2node = []
854 part2node = []
855
855
856 def handlefailure(pushop, exc):
856 def handlefailure(pushop, exc):
857 targetid = int(exc.partid)
857 targetid = int(exc.partid)
858 for partid, node in part2node:
858 for partid, node in part2node:
859 if partid == targetid:
859 if partid == targetid:
860 raise error.Abort(_('updating %s to public failed') % node)
860 raise error.Abort(_('updating %s to public failed') % node)
861
861
862 enc = pushkey.encode
862 enc = pushkey.encode
863 for newremotehead in pushop.outdatedphases:
863 for newremotehead in pushop.outdatedphases:
864 part = bundler.newpart('pushkey')
864 part = bundler.newpart('pushkey')
865 part.addparam('namespace', enc('phases'))
865 part.addparam('namespace', enc('phases'))
866 part.addparam('key', enc(newremotehead.hex()))
866 part.addparam('key', enc(newremotehead.hex()))
867 part.addparam('old', enc('%d' % phases.draft))
867 part.addparam('old', enc('%d' % phases.draft))
868 part.addparam('new', enc('%d' % phases.public))
868 part.addparam('new', enc('%d' % phases.public))
869 part2node.append((part.id, newremotehead))
869 part2node.append((part.id, newremotehead))
870 pushop.pkfailcb[part.id] = handlefailure
870 pushop.pkfailcb[part.id] = handlefailure
871
871
872 def handlereply(op):
872 def handlereply(op):
873 for partid, node in part2node:
873 for partid, node in part2node:
874 partrep = op.records.getreplies(partid)
874 partrep = op.records.getreplies(partid)
875 results = partrep['pushkey']
875 results = partrep['pushkey']
876 assert len(results) <= 1
876 assert len(results) <= 1
877 msg = None
877 msg = None
878 if not results:
878 if not results:
879 msg = _('server ignored update of %s to public!\n') % node
879 msg = _('server ignored update of %s to public!\n') % node
880 elif not int(results[0]['return']):
880 elif not int(results[0]['return']):
881 msg = _('updating %s to public failed!\n') % node
881 msg = _('updating %s to public failed!\n') % node
882 if msg is not None:
882 if msg is not None:
883 pushop.ui.warn(msg)
883 pushop.ui.warn(msg)
884 return handlereply
884 return handlereply
885
885
886 @b2partsgenerator('obsmarkers')
886 @b2partsgenerator('obsmarkers')
887 def _pushb2obsmarkers(pushop, bundler):
887 def _pushb2obsmarkers(pushop, bundler):
888 if 'obsmarkers' in pushop.stepsdone:
888 if 'obsmarkers' in pushop.stepsdone:
889 return
889 return
890 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
890 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
891 if obsolete.commonversion(remoteversions) is None:
891 if obsolete.commonversion(remoteversions) is None:
892 return
892 return
893 pushop.stepsdone.add('obsmarkers')
893 pushop.stepsdone.add('obsmarkers')
894 if pushop.outobsmarkers:
894 if pushop.outobsmarkers:
895 markers = sorted(pushop.outobsmarkers)
895 markers = sorted(pushop.outobsmarkers)
896 bundle2.buildobsmarkerspart(bundler, markers)
896 bundle2.buildobsmarkerspart(bundler, markers)
897
897
898 @b2partsgenerator('bookmarks')
898 @b2partsgenerator('bookmarks')
899 def _pushb2bookmarks(pushop, bundler):
899 def _pushb2bookmarks(pushop, bundler):
900 """handle bookmark push through bundle2"""
900 """handle bookmark push through bundle2"""
901 if 'bookmarks' in pushop.stepsdone:
901 if 'bookmarks' in pushop.stepsdone:
902 return
902 return
903 b2caps = bundle2.bundle2caps(pushop.remote)
903 b2caps = bundle2.bundle2caps(pushop.remote)
904
904
905 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
905 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
906 legacybooks = 'bookmarks' in legacy
906 legacybooks = 'bookmarks' in legacy
907
907
908 if not legacybooks and 'bookmarks' in b2caps:
908 if not legacybooks and 'bookmarks' in b2caps:
909 return _pushb2bookmarkspart(pushop, bundler)
909 return _pushb2bookmarkspart(pushop, bundler)
910 elif 'pushkey' in b2caps:
910 elif 'pushkey' in b2caps:
911 return _pushb2bookmarkspushkey(pushop, bundler)
911 return _pushb2bookmarkspushkey(pushop, bundler)
912
912
913 def _bmaction(old, new):
913 def _bmaction(old, new):
914 """small utility for bookmark pushing"""
914 """small utility for bookmark pushing"""
915 if not old:
915 if not old:
916 return 'export'
916 return 'export'
917 elif not new:
917 elif not new:
918 return 'delete'
918 return 'delete'
919 return 'update'
919 return 'update'
920
920
921 def _pushb2bookmarkspart(pushop, bundler):
921 def _pushb2bookmarkspart(pushop, bundler):
922 pushop.stepsdone.add('bookmarks')
922 pushop.stepsdone.add('bookmarks')
923 if not pushop.outbookmarks:
923 if not pushop.outbookmarks:
924 return
924 return
925
925
926 allactions = []
926 allactions = []
927 data = []
927 data = []
928 for book, old, new in pushop.outbookmarks:
928 for book, old, new in pushop.outbookmarks:
929 new = bin(new)
929 new = bin(new)
930 data.append((book, new))
930 data.append((book, new))
931 allactions.append((book, _bmaction(old, new)))
931 allactions.append((book, _bmaction(old, new)))
932 checkdata = bookmod.binaryencode(data)
932 checkdata = bookmod.binaryencode(data)
933 bundler.newpart('bookmarks', data=checkdata)
933 bundler.newpart('bookmarks', data=checkdata)
934
934
935 def handlereply(op):
935 def handlereply(op):
936 ui = pushop.ui
936 ui = pushop.ui
937 # if success
937 # if success
938 for book, action in allactions:
938 for book, action in allactions:
939 ui.status(bookmsgmap[action][0] % book)
939 ui.status(bookmsgmap[action][0] % book)
940
940
941 return handlereply
941 return handlereply
942
942
943 def _pushb2bookmarkspushkey(pushop, bundler):
943 def _pushb2bookmarkspushkey(pushop, bundler):
944 pushop.stepsdone.add('bookmarks')
944 pushop.stepsdone.add('bookmarks')
945 part2book = []
945 part2book = []
946 enc = pushkey.encode
946 enc = pushkey.encode
947
947
948 def handlefailure(pushop, exc):
948 def handlefailure(pushop, exc):
949 targetid = int(exc.partid)
949 targetid = int(exc.partid)
950 for partid, book, action in part2book:
950 for partid, book, action in part2book:
951 if partid == targetid:
951 if partid == targetid:
952 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
952 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
953 # we should not be called for part we did not generated
953 # we should not be called for part we did not generated
954 assert False
954 assert False
955
955
956 for book, old, new in pushop.outbookmarks:
956 for book, old, new in pushop.outbookmarks:
957 part = bundler.newpart('pushkey')
957 part = bundler.newpart('pushkey')
958 part.addparam('namespace', enc('bookmarks'))
958 part.addparam('namespace', enc('bookmarks'))
959 part.addparam('key', enc(book))
959 part.addparam('key', enc(book))
960 part.addparam('old', enc(old))
960 part.addparam('old', enc(old))
961 part.addparam('new', enc(new))
961 part.addparam('new', enc(new))
962 action = 'update'
962 action = 'update'
963 if not old:
963 if not old:
964 action = 'export'
964 action = 'export'
965 elif not new:
965 elif not new:
966 action = 'delete'
966 action = 'delete'
967 part2book.append((part.id, book, action))
967 part2book.append((part.id, book, action))
968 pushop.pkfailcb[part.id] = handlefailure
968 pushop.pkfailcb[part.id] = handlefailure
969
969
970 def handlereply(op):
970 def handlereply(op):
971 ui = pushop.ui
971 ui = pushop.ui
972 for partid, book, action in part2book:
972 for partid, book, action in part2book:
973 partrep = op.records.getreplies(partid)
973 partrep = op.records.getreplies(partid)
974 results = partrep['pushkey']
974 results = partrep['pushkey']
975 assert len(results) <= 1
975 assert len(results) <= 1
976 if not results:
976 if not results:
977 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
977 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
978 else:
978 else:
979 ret = int(results[0]['return'])
979 ret = int(results[0]['return'])
980 if ret:
980 if ret:
981 ui.status(bookmsgmap[action][0] % book)
981 ui.status(bookmsgmap[action][0] % book)
982 else:
982 else:
983 ui.warn(bookmsgmap[action][1] % book)
983 ui.warn(bookmsgmap[action][1] % book)
984 if pushop.bkresult is not None:
984 if pushop.bkresult is not None:
985 pushop.bkresult = 1
985 pushop.bkresult = 1
986 return handlereply
986 return handlereply
987
987
988 @b2partsgenerator('pushvars', idx=0)
988 @b2partsgenerator('pushvars', idx=0)
989 def _getbundlesendvars(pushop, bundler):
989 def _getbundlesendvars(pushop, bundler):
990 '''send shellvars via bundle2'''
990 '''send shellvars via bundle2'''
991 pushvars = pushop.pushvars
991 pushvars = pushop.pushvars
992 if pushvars:
992 if pushvars:
993 shellvars = {}
993 shellvars = {}
994 for raw in pushvars:
994 for raw in pushvars:
995 if '=' not in raw:
995 if '=' not in raw:
996 msg = ("unable to parse variable '%s', should follow "
996 msg = ("unable to parse variable '%s', should follow "
997 "'KEY=VALUE' or 'KEY=' format")
997 "'KEY=VALUE' or 'KEY=' format")
998 raise error.Abort(msg % raw)
998 raise error.Abort(msg % raw)
999 k, v = raw.split('=', 1)
999 k, v = raw.split('=', 1)
1000 shellvars[k] = v
1000 shellvars[k] = v
1001
1001
1002 part = bundler.newpart('pushvars')
1002 part = bundler.newpart('pushvars')
1003
1003
1004 for key, value in shellvars.iteritems():
1004 for key, value in shellvars.iteritems():
1005 part.addparam(key, value, mandatory=False)
1005 part.addparam(key, value, mandatory=False)
1006
1006
1007 def _pushbundle2(pushop):
1007 def _pushbundle2(pushop):
1008 """push data to the remote using bundle2
1008 """push data to the remote using bundle2
1009
1009
1010 The only currently supported type of data is changegroup but this will
1010 The only currently supported type of data is changegroup but this will
1011 evolve in the future."""
1011 evolve in the future."""
1012 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1012 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1013 pushback = (pushop.trmanager
1013 pushback = (pushop.trmanager
1014 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1014 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1015
1015
1016 # create reply capability
1016 # create reply capability
1017 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1017 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1018 allowpushback=pushback))
1018 allowpushback=pushback))
1019 bundler.newpart('replycaps', data=capsblob)
1019 bundler.newpart('replycaps', data=capsblob)
1020 replyhandlers = []
1020 replyhandlers = []
1021 for partgenname in b2partsgenorder:
1021 for partgenname in b2partsgenorder:
1022 partgen = b2partsgenmapping[partgenname]
1022 partgen = b2partsgenmapping[partgenname]
1023 ret = partgen(pushop, bundler)
1023 ret = partgen(pushop, bundler)
1024 if callable(ret):
1024 if callable(ret):
1025 replyhandlers.append(ret)
1025 replyhandlers.append(ret)
1026 # do not push if nothing to push
1026 # do not push if nothing to push
1027 if bundler.nbparts <= 1:
1027 if bundler.nbparts <= 1:
1028 return
1028 return
1029 stream = util.chunkbuffer(bundler.getchunks())
1029 stream = util.chunkbuffer(bundler.getchunks())
1030 try:
1030 try:
1031 try:
1031 try:
1032 reply = pushop.remote.unbundle(
1032 reply = pushop.remote.unbundle(
1033 stream, ['force'], pushop.remote.url())
1033 stream, ['force'], pushop.remote.url())
1034 except error.BundleValueError as exc:
1034 except error.BundleValueError as exc:
1035 raise error.Abort(_('missing support for %s') % exc)
1035 raise error.Abort(_('missing support for %s') % exc)
1036 try:
1036 try:
1037 trgetter = None
1037 trgetter = None
1038 if pushback:
1038 if pushback:
1039 trgetter = pushop.trmanager.transaction
1039 trgetter = pushop.trmanager.transaction
1040 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1040 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1041 except error.BundleValueError as exc:
1041 except error.BundleValueError as exc:
1042 raise error.Abort(_('missing support for %s') % exc)
1042 raise error.Abort(_('missing support for %s') % exc)
1043 except bundle2.AbortFromPart as exc:
1043 except bundle2.AbortFromPart as exc:
1044 pushop.ui.status(_('remote: %s\n') % exc)
1044 pushop.ui.status(_('remote: %s\n') % exc)
1045 if exc.hint is not None:
1045 if exc.hint is not None:
1046 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1046 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1047 raise error.Abort(_('push failed on remote'))
1047 raise error.Abort(_('push failed on remote'))
1048 except error.PushkeyFailed as exc:
1048 except error.PushkeyFailed as exc:
1049 partid = int(exc.partid)
1049 partid = int(exc.partid)
1050 if partid not in pushop.pkfailcb:
1050 if partid not in pushop.pkfailcb:
1051 raise
1051 raise
1052 pushop.pkfailcb[partid](pushop, exc)
1052 pushop.pkfailcb[partid](pushop, exc)
1053 for rephand in replyhandlers:
1053 for rephand in replyhandlers:
1054 rephand(op)
1054 rephand(op)
1055
1055
1056 def _pushchangeset(pushop):
1056 def _pushchangeset(pushop):
1057 """Make the actual push of changeset bundle to remote repo"""
1057 """Make the actual push of changeset bundle to remote repo"""
1058 if 'changesets' in pushop.stepsdone:
1058 if 'changesets' in pushop.stepsdone:
1059 return
1059 return
1060 pushop.stepsdone.add('changesets')
1060 pushop.stepsdone.add('changesets')
1061 if not _pushcheckoutgoing(pushop):
1061 if not _pushcheckoutgoing(pushop):
1062 return
1062 return
1063
1063
1064 # Should have verified this in push().
1064 # Should have verified this in push().
1065 assert pushop.remote.capable('unbundle')
1065 assert pushop.remote.capable('unbundle')
1066
1066
1067 pushop.repo.prepushoutgoinghooks(pushop)
1067 pushop.repo.prepushoutgoinghooks(pushop)
1068 outgoing = pushop.outgoing
1068 outgoing = pushop.outgoing
1069 # TODO: get bundlecaps from remote
1069 # TODO: get bundlecaps from remote
1070 bundlecaps = None
1070 bundlecaps = None
1071 # create a changegroup from local
1071 # create a changegroup from local
1072 if pushop.revs is None and not (outgoing.excluded
1072 if pushop.revs is None and not (outgoing.excluded
1073 or pushop.repo.changelog.filteredrevs):
1073 or pushop.repo.changelog.filteredrevs):
1074 # push everything,
1074 # push everything,
1075 # use the fast path, no race possible on push
1075 # use the fast path, no race possible on push
1076 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1076 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1077 fastpath=True, bundlecaps=bundlecaps)
1077 fastpath=True, bundlecaps=bundlecaps)
1078 else:
1078 else:
1079 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1079 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1080 'push', bundlecaps=bundlecaps)
1080 'push', bundlecaps=bundlecaps)
1081
1081
1082 # apply changegroup to remote
1082 # apply changegroup to remote
1083 # local repo finds heads on server, finds out what
1083 # local repo finds heads on server, finds out what
1084 # revs it must push. once revs transferred, if server
1084 # revs it must push. once revs transferred, if server
1085 # finds it has different heads (someone else won
1085 # finds it has different heads (someone else won
1086 # commit/push race), server aborts.
1086 # commit/push race), server aborts.
1087 if pushop.force:
1087 if pushop.force:
1088 remoteheads = ['force']
1088 remoteheads = ['force']
1089 else:
1089 else:
1090 remoteheads = pushop.remoteheads
1090 remoteheads = pushop.remoteheads
1091 # ssh: return remote's addchangegroup()
1091 # ssh: return remote's addchangegroup()
1092 # http: return remote's addchangegroup() or 0 for error
1092 # http: return remote's addchangegroup() or 0 for error
1093 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1093 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1094 pushop.repo.url())
1094 pushop.repo.url())
1095
1095
1096 def _pushsyncphase(pushop):
1096 def _pushsyncphase(pushop):
1097 """synchronise phase information locally and remotely"""
1097 """synchronise phase information locally and remotely"""
1098 cheads = pushop.commonheads
1098 cheads = pushop.commonheads
1099 # even when we don't push, exchanging phase data is useful
1099 # even when we don't push, exchanging phase data is useful
1100 remotephases = pushop.remote.listkeys('phases')
1100 remotephases = pushop.remote.listkeys('phases')
1101 if (pushop.ui.configbool('ui', '_usedassubrepo')
1101 if (pushop.ui.configbool('ui', '_usedassubrepo')
1102 and remotephases # server supports phases
1102 and remotephases # server supports phases
1103 and pushop.cgresult is None # nothing was pushed
1103 and pushop.cgresult is None # nothing was pushed
1104 and remotephases.get('publishing', False)):
1104 and remotephases.get('publishing', False)):
1105 # When:
1105 # When:
1106 # - this is a subrepo push
1106 # - this is a subrepo push
1107 # - and remote support phase
1107 # - and remote support phase
1108 # - and no changeset was pushed
1108 # - and no changeset was pushed
1109 # - and remote is publishing
1109 # - and remote is publishing
1110 # We may be in issue 3871 case!
1110 # We may be in issue 3871 case!
1111 # We drop the possible phase synchronisation done by
1111 # We drop the possible phase synchronisation done by
1112 # courtesy to publish changesets possibly locally draft
1112 # courtesy to publish changesets possibly locally draft
1113 # on the remote.
1113 # on the remote.
1114 remotephases = {'publishing': 'True'}
1114 remotephases = {'publishing': 'True'}
1115 if not remotephases: # old server or public only reply from non-publishing
1115 if not remotephases: # old server or public only reply from non-publishing
1116 _localphasemove(pushop, cheads)
1116 _localphasemove(pushop, cheads)
1117 # don't push any phase data as there is nothing to push
1117 # don't push any phase data as there is nothing to push
1118 else:
1118 else:
1119 ana = phases.analyzeremotephases(pushop.repo, cheads,
1119 ana = phases.analyzeremotephases(pushop.repo, cheads,
1120 remotephases)
1120 remotephases)
1121 pheads, droots = ana
1121 pheads, droots = ana
1122 ### Apply remote phase on local
1122 ### Apply remote phase on local
1123 if remotephases.get('publishing', False):
1123 if remotephases.get('publishing', False):
1124 _localphasemove(pushop, cheads)
1124 _localphasemove(pushop, cheads)
1125 else: # publish = False
1125 else: # publish = False
1126 _localphasemove(pushop, pheads)
1126 _localphasemove(pushop, pheads)
1127 _localphasemove(pushop, cheads, phases.draft)
1127 _localphasemove(pushop, cheads, phases.draft)
1128 ### Apply local phase on remote
1128 ### Apply local phase on remote
1129
1129
1130 if pushop.cgresult:
1130 if pushop.cgresult:
1131 if 'phases' in pushop.stepsdone:
1131 if 'phases' in pushop.stepsdone:
1132 # phases already pushed though bundle2
1132 # phases already pushed though bundle2
1133 return
1133 return
1134 outdated = pushop.outdatedphases
1134 outdated = pushop.outdatedphases
1135 else:
1135 else:
1136 outdated = pushop.fallbackoutdatedphases
1136 outdated = pushop.fallbackoutdatedphases
1137
1137
1138 pushop.stepsdone.add('phases')
1138 pushop.stepsdone.add('phases')
1139
1139
1140 # filter heads already turned public by the push
1140 # filter heads already turned public by the push
1141 outdated = [c for c in outdated if c.node() not in pheads]
1141 outdated = [c for c in outdated if c.node() not in pheads]
1142 # fallback to independent pushkey command
1142 # fallback to independent pushkey command
1143 for newremotehead in outdated:
1143 for newremotehead in outdated:
1144 r = pushop.remote.pushkey('phases',
1144 r = pushop.remote.pushkey('phases',
1145 newremotehead.hex(),
1145 newremotehead.hex(),
1146 str(phases.draft),
1146 str(phases.draft),
1147 str(phases.public))
1147 str(phases.public))
1148 if not r:
1148 if not r:
1149 pushop.ui.warn(_('updating %s to public failed!\n')
1149 pushop.ui.warn(_('updating %s to public failed!\n')
1150 % newremotehead)
1150 % newremotehead)
1151
1151
1152 def _localphasemove(pushop, nodes, phase=phases.public):
1152 def _localphasemove(pushop, nodes, phase=phases.public):
1153 """move <nodes> to <phase> in the local source repo"""
1153 """move <nodes> to <phase> in the local source repo"""
1154 if pushop.trmanager:
1154 if pushop.trmanager:
1155 phases.advanceboundary(pushop.repo,
1155 phases.advanceboundary(pushop.repo,
1156 pushop.trmanager.transaction(),
1156 pushop.trmanager.transaction(),
1157 phase,
1157 phase,
1158 nodes)
1158 nodes)
1159 else:
1159 else:
1160 # repo is not locked, do not change any phases!
1160 # repo is not locked, do not change any phases!
1161 # Informs the user that phases should have been moved when
1161 # Informs the user that phases should have been moved when
1162 # applicable.
1162 # applicable.
1163 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1163 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1164 phasestr = phases.phasenames[phase]
1164 phasestr = phases.phasenames[phase]
1165 if actualmoves:
1165 if actualmoves:
1166 pushop.ui.status(_('cannot lock source repo, skipping '
1166 pushop.ui.status(_('cannot lock source repo, skipping '
1167 'local %s phase update\n') % phasestr)
1167 'local %s phase update\n') % phasestr)
1168
1168
1169 def _pushobsolete(pushop):
1169 def _pushobsolete(pushop):
1170 """utility function to push obsolete markers to a remote"""
1170 """utility function to push obsolete markers to a remote"""
1171 if 'obsmarkers' in pushop.stepsdone:
1171 if 'obsmarkers' in pushop.stepsdone:
1172 return
1172 return
1173 repo = pushop.repo
1173 repo = pushop.repo
1174 remote = pushop.remote
1174 remote = pushop.remote
1175 pushop.stepsdone.add('obsmarkers')
1175 pushop.stepsdone.add('obsmarkers')
1176 if pushop.outobsmarkers:
1176 if pushop.outobsmarkers:
1177 pushop.ui.debug('try to push obsolete markers to remote\n')
1177 pushop.ui.debug('try to push obsolete markers to remote\n')
1178 rslts = []
1178 rslts = []
1179 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1179 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1180 for key in sorted(remotedata, reverse=True):
1180 for key in sorted(remotedata, reverse=True):
1181 # reverse sort to ensure we end with dump0
1181 # reverse sort to ensure we end with dump0
1182 data = remotedata[key]
1182 data = remotedata[key]
1183 rslts.append(remote.pushkey('obsolete', key, '', data))
1183 rslts.append(remote.pushkey('obsolete', key, '', data))
1184 if [r for r in rslts if not r]:
1184 if [r for r in rslts if not r]:
1185 msg = _('failed to push some obsolete markers!\n')
1185 msg = _('failed to push some obsolete markers!\n')
1186 repo.ui.warn(msg)
1186 repo.ui.warn(msg)
1187
1187
1188 def _pushbookmark(pushop):
1188 def _pushbookmark(pushop):
1189 """Update bookmark position on remote"""
1189 """Update bookmark position on remote"""
1190 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1190 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1191 return
1191 return
1192 pushop.stepsdone.add('bookmarks')
1192 pushop.stepsdone.add('bookmarks')
1193 ui = pushop.ui
1193 ui = pushop.ui
1194 remote = pushop.remote
1194 remote = pushop.remote
1195
1195
1196 for b, old, new in pushop.outbookmarks:
1196 for b, old, new in pushop.outbookmarks:
1197 action = 'update'
1197 action = 'update'
1198 if not old:
1198 if not old:
1199 action = 'export'
1199 action = 'export'
1200 elif not new:
1200 elif not new:
1201 action = 'delete'
1201 action = 'delete'
1202 if remote.pushkey('bookmarks', b, old, new):
1202 if remote.pushkey('bookmarks', b, old, new):
1203 ui.status(bookmsgmap[action][0] % b)
1203 ui.status(bookmsgmap[action][0] % b)
1204 else:
1204 else:
1205 ui.warn(bookmsgmap[action][1] % b)
1205 ui.warn(bookmsgmap[action][1] % b)
1206 # discovery can have set the value form invalid entry
1206 # discovery can have set the value form invalid entry
1207 if pushop.bkresult is not None:
1207 if pushop.bkresult is not None:
1208 pushop.bkresult = 1
1208 pushop.bkresult = 1
1209
1209
1210 class pulloperation(object):
1210 class pulloperation(object):
1211 """A object that represent a single pull operation
1211 """A object that represent a single pull operation
1212
1212
1213 It purpose is to carry pull related state and very common operation.
1213 It purpose is to carry pull related state and very common operation.
1214
1214
1215 A new should be created at the beginning of each pull and discarded
1215 A new should be created at the beginning of each pull and discarded
1216 afterward.
1216 afterward.
1217 """
1217 """
1218
1218
1219 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1219 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1220 remotebookmarks=None, streamclonerequested=None):
1220 remotebookmarks=None, streamclonerequested=None):
1221 # repo we pull into
1221 # repo we pull into
1222 self.repo = repo
1222 self.repo = repo
1223 # repo we pull from
1223 # repo we pull from
1224 self.remote = remote
1224 self.remote = remote
1225 # revision we try to pull (None is "all")
1225 # revision we try to pull (None is "all")
1226 self.heads = heads
1226 self.heads = heads
1227 # bookmark pulled explicitly
1227 # bookmark pulled explicitly
1228 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1228 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1229 for bookmark in bookmarks]
1229 for bookmark in bookmarks]
1230 # do we force pull?
1230 # do we force pull?
1231 self.force = force
1231 self.force = force
1232 # whether a streaming clone was requested
1232 # whether a streaming clone was requested
1233 self.streamclonerequested = streamclonerequested
1233 self.streamclonerequested = streamclonerequested
1234 # transaction manager
1234 # transaction manager
1235 self.trmanager = None
1235 self.trmanager = None
1236 # set of common changeset between local and remote before pull
1236 # set of common changeset between local and remote before pull
1237 self.common = None
1237 self.common = None
1238 # set of pulled head
1238 # set of pulled head
1239 self.rheads = None
1239 self.rheads = None
1240 # list of missing changeset to fetch remotely
1240 # list of missing changeset to fetch remotely
1241 self.fetch = None
1241 self.fetch = None
1242 # remote bookmarks data
1242 # remote bookmarks data
1243 self.remotebookmarks = remotebookmarks
1243 self.remotebookmarks = remotebookmarks
1244 # result of changegroup pulling (used as return code by pull)
1244 # result of changegroup pulling (used as return code by pull)
1245 self.cgresult = None
1245 self.cgresult = None
1246 # list of step already done
1246 # list of step already done
1247 self.stepsdone = set()
1247 self.stepsdone = set()
1248 # Whether we attempted a clone from pre-generated bundles.
1248 # Whether we attempted a clone from pre-generated bundles.
1249 self.clonebundleattempted = False
1249 self.clonebundleattempted = False
1250
1250
1251 @util.propertycache
1251 @util.propertycache
1252 def pulledsubset(self):
1252 def pulledsubset(self):
1253 """heads of the set of changeset target by the pull"""
1253 """heads of the set of changeset target by the pull"""
1254 # compute target subset
1254 # compute target subset
1255 if self.heads is None:
1255 if self.heads is None:
1256 # We pulled every thing possible
1256 # We pulled every thing possible
1257 # sync on everything common
1257 # sync on everything common
1258 c = set(self.common)
1258 c = set(self.common)
1259 ret = list(self.common)
1259 ret = list(self.common)
1260 for n in self.rheads:
1260 for n in self.rheads:
1261 if n not in c:
1261 if n not in c:
1262 ret.append(n)
1262 ret.append(n)
1263 return ret
1263 return ret
1264 else:
1264 else:
1265 # We pulled a specific subset
1265 # We pulled a specific subset
1266 # sync on this subset
1266 # sync on this subset
1267 return self.heads
1267 return self.heads
1268
1268
1269 @util.propertycache
1269 @util.propertycache
1270 def canusebundle2(self):
1270 def canusebundle2(self):
1271 return not _forcebundle1(self)
1271 return not _forcebundle1(self)
1272
1272
1273 @util.propertycache
1273 @util.propertycache
1274 def remotebundle2caps(self):
1274 def remotebundle2caps(self):
1275 return bundle2.bundle2caps(self.remote)
1275 return bundle2.bundle2caps(self.remote)
1276
1276
1277 def gettransaction(self):
1277 def gettransaction(self):
1278 # deprecated; talk to trmanager directly
1278 # deprecated; talk to trmanager directly
1279 return self.trmanager.transaction()
1279 return self.trmanager.transaction()
1280
1280
1281 class transactionmanager(util.transactional):
1281 class transactionmanager(util.transactional):
1282 """An object to manage the life cycle of a transaction
1282 """An object to manage the life cycle of a transaction
1283
1283
1284 It creates the transaction on demand and calls the appropriate hooks when
1284 It creates the transaction on demand and calls the appropriate hooks when
1285 closing the transaction."""
1285 closing the transaction."""
1286 def __init__(self, repo, source, url):
1286 def __init__(self, repo, source, url):
1287 self.repo = repo
1287 self.repo = repo
1288 self.source = source
1288 self.source = source
1289 self.url = url
1289 self.url = url
1290 self._tr = None
1290 self._tr = None
1291
1291
1292 def transaction(self):
1292 def transaction(self):
1293 """Return an open transaction object, constructing if necessary"""
1293 """Return an open transaction object, constructing if necessary"""
1294 if not self._tr:
1294 if not self._tr:
1295 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1295 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1296 self._tr = self.repo.transaction(trname)
1296 self._tr = self.repo.transaction(trname)
1297 self._tr.hookargs['source'] = self.source
1297 self._tr.hookargs['source'] = self.source
1298 self._tr.hookargs['url'] = self.url
1298 self._tr.hookargs['url'] = self.url
1299 return self._tr
1299 return self._tr
1300
1300
1301 def close(self):
1301 def close(self):
1302 """close transaction if created"""
1302 """close transaction if created"""
1303 if self._tr is not None:
1303 if self._tr is not None:
1304 self._tr.close()
1304 self._tr.close()
1305
1305
1306 def release(self):
1306 def release(self):
1307 """release transaction if created"""
1307 """release transaction if created"""
1308 if self._tr is not None:
1308 if self._tr is not None:
1309 self._tr.release()
1309 self._tr.release()
1310
1310
1311 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1311 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1312 streamclonerequested=None):
1312 streamclonerequested=None):
1313 """Fetch repository data from a remote.
1313 """Fetch repository data from a remote.
1314
1314
1315 This is the main function used to retrieve data from a remote repository.
1315 This is the main function used to retrieve data from a remote repository.
1316
1316
1317 ``repo`` is the local repository to clone into.
1317 ``repo`` is the local repository to clone into.
1318 ``remote`` is a peer instance.
1318 ``remote`` is a peer instance.
1319 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1319 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1320 default) means to pull everything from the remote.
1320 default) means to pull everything from the remote.
1321 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1321 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1322 default, all remote bookmarks are pulled.
1322 default, all remote bookmarks are pulled.
1323 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1323 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1324 initialization.
1324 initialization.
1325 ``streamclonerequested`` is a boolean indicating whether a "streaming
1325 ``streamclonerequested`` is a boolean indicating whether a "streaming
1326 clone" is requested. A "streaming clone" is essentially a raw file copy
1326 clone" is requested. A "streaming clone" is essentially a raw file copy
1327 of revlogs from the server. This only works when the local repository is
1327 of revlogs from the server. This only works when the local repository is
1328 empty. The default value of ``None`` means to respect the server
1328 empty. The default value of ``None`` means to respect the server
1329 configuration for preferring stream clones.
1329 configuration for preferring stream clones.
1330
1330
1331 Returns the ``pulloperation`` created for this pull.
1331 Returns the ``pulloperation`` created for this pull.
1332 """
1332 """
1333 if opargs is None:
1333 if opargs is None:
1334 opargs = {}
1334 opargs = {}
1335 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1335 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1336 streamclonerequested=streamclonerequested,
1336 streamclonerequested=streamclonerequested,
1337 **pycompat.strkwargs(opargs))
1337 **pycompat.strkwargs(opargs))
1338
1338
1339 peerlocal = pullop.remote.local()
1339 peerlocal = pullop.remote.local()
1340 if peerlocal:
1340 if peerlocal:
1341 missing = set(peerlocal.requirements) - pullop.repo.supported
1341 missing = set(peerlocal.requirements) - pullop.repo.supported
1342 if missing:
1342 if missing:
1343 msg = _("required features are not"
1343 msg = _("required features are not"
1344 " supported in the destination:"
1344 " supported in the destination:"
1345 " %s") % (', '.join(sorted(missing)))
1345 " %s") % (', '.join(sorted(missing)))
1346 raise error.Abort(msg)
1346 raise error.Abort(msg)
1347
1347
1348 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1348 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1349 with repo.wlock(), repo.lock(), pullop.trmanager:
1349 with repo.wlock(), repo.lock(), pullop.trmanager:
1350 # This should ideally be in _pullbundle2(). However, it needs to run
1350 # This should ideally be in _pullbundle2(). However, it needs to run
1351 # before discovery to avoid extra work.
1351 # before discovery to avoid extra work.
1352 _maybeapplyclonebundle(pullop)
1352 _maybeapplyclonebundle(pullop)
1353 streamclone.maybeperformlegacystreamclone(pullop)
1353 streamclone.maybeperformlegacystreamclone(pullop)
1354 _pulldiscovery(pullop)
1354 _pulldiscovery(pullop)
1355 if pullop.canusebundle2:
1355 if pullop.canusebundle2:
1356 _pullbundle2(pullop)
1356 _pullbundle2(pullop)
1357 _pullchangeset(pullop)
1357 _pullchangeset(pullop)
1358 _pullphase(pullop)
1358 _pullphase(pullop)
1359 _pullbookmarks(pullop)
1359 _pullbookmarks(pullop)
1360 _pullobsolete(pullop)
1360 _pullobsolete(pullop)
1361
1361
1362 # storing remotenames
1362 # storing remotenames
1363 if repo.ui.configbool('experimental', 'remotenames'):
1363 if repo.ui.configbool('experimental', 'remotenames'):
1364 logexchange.pullremotenames(repo, remote)
1364 logexchange.pullremotenames(repo, remote)
1365
1365
1366 return pullop
1366 return pullop
1367
1367
1368 # list of steps to perform discovery before pull
1368 # list of steps to perform discovery before pull
1369 pulldiscoveryorder = []
1369 pulldiscoveryorder = []
1370
1370
1371 # Mapping between step name and function
1371 # Mapping between step name and function
1372 #
1372 #
1373 # This exists to help extensions wrap steps if necessary
1373 # This exists to help extensions wrap steps if necessary
1374 pulldiscoverymapping = {}
1374 pulldiscoverymapping = {}
1375
1375
1376 def pulldiscovery(stepname):
1376 def pulldiscovery(stepname):
1377 """decorator for function performing discovery before pull
1377 """decorator for function performing discovery before pull
1378
1378
1379 The function is added to the step -> function mapping and appended to the
1379 The function is added to the step -> function mapping and appended to the
1380 list of steps. Beware that decorated function will be added in order (this
1380 list of steps. Beware that decorated function will be added in order (this
1381 may matter).
1381 may matter).
1382
1382
1383 You can only use this decorator for a new step, if you want to wrap a step
1383 You can only use this decorator for a new step, if you want to wrap a step
1384 from an extension, change the pulldiscovery dictionary directly."""
1384 from an extension, change the pulldiscovery dictionary directly."""
1385 def dec(func):
1385 def dec(func):
1386 assert stepname not in pulldiscoverymapping
1386 assert stepname not in pulldiscoverymapping
1387 pulldiscoverymapping[stepname] = func
1387 pulldiscoverymapping[stepname] = func
1388 pulldiscoveryorder.append(stepname)
1388 pulldiscoveryorder.append(stepname)
1389 return func
1389 return func
1390 return dec
1390 return dec
1391
1391
1392 def _pulldiscovery(pullop):
1392 def _pulldiscovery(pullop):
1393 """Run all discovery steps"""
1393 """Run all discovery steps"""
1394 for stepname in pulldiscoveryorder:
1394 for stepname in pulldiscoveryorder:
1395 step = pulldiscoverymapping[stepname]
1395 step = pulldiscoverymapping[stepname]
1396 step(pullop)
1396 step(pullop)
1397
1397
1398 @pulldiscovery('b1:bookmarks')
1398 @pulldiscovery('b1:bookmarks')
1399 def _pullbookmarkbundle1(pullop):
1399 def _pullbookmarkbundle1(pullop):
1400 """fetch bookmark data in bundle1 case
1400 """fetch bookmark data in bundle1 case
1401
1401
1402 If not using bundle2, we have to fetch bookmarks before changeset
1402 If not using bundle2, we have to fetch bookmarks before changeset
1403 discovery to reduce the chance and impact of race conditions."""
1403 discovery to reduce the chance and impact of race conditions."""
1404 if pullop.remotebookmarks is not None:
1404 if pullop.remotebookmarks is not None:
1405 return
1405 return
1406 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1406 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1407 # all known bundle2 servers now support listkeys, but lets be nice with
1407 # all known bundle2 servers now support listkeys, but lets be nice with
1408 # new implementation.
1408 # new implementation.
1409 return
1409 return
1410 books = pullop.remote.listkeys('bookmarks')
1410 books = pullop.remote.listkeys('bookmarks')
1411 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1411 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1412
1412
1413
1413
1414 @pulldiscovery('changegroup')
1414 @pulldiscovery('changegroup')
1415 def _pulldiscoverychangegroup(pullop):
1415 def _pulldiscoverychangegroup(pullop):
1416 """discovery phase for the pull
1416 """discovery phase for the pull
1417
1417
1418 Current handle changeset discovery only, will change handle all discovery
1418 Current handle changeset discovery only, will change handle all discovery
1419 at some point."""
1419 at some point."""
1420 tmp = discovery.findcommonincoming(pullop.repo,
1420 tmp = discovery.findcommonincoming(pullop.repo,
1421 pullop.remote,
1421 pullop.remote,
1422 heads=pullop.heads,
1422 heads=pullop.heads,
1423 force=pullop.force)
1423 force=pullop.force)
1424 common, fetch, rheads = tmp
1424 common, fetch, rheads = tmp
1425 nm = pullop.repo.unfiltered().changelog.nodemap
1425 nm = pullop.repo.unfiltered().changelog.nodemap
1426 if fetch and rheads:
1426 if fetch and rheads:
1427 # If a remote heads is filtered locally, put in back in common.
1427 # If a remote heads is filtered locally, put in back in common.
1428 #
1428 #
1429 # This is a hackish solution to catch most of "common but locally
1429 # This is a hackish solution to catch most of "common but locally
1430 # hidden situation". We do not performs discovery on unfiltered
1430 # hidden situation". We do not performs discovery on unfiltered
1431 # repository because it end up doing a pathological amount of round
1431 # repository because it end up doing a pathological amount of round
1432 # trip for w huge amount of changeset we do not care about.
1432 # trip for w huge amount of changeset we do not care about.
1433 #
1433 #
1434 # If a set of such "common but filtered" changeset exist on the server
1434 # If a set of such "common but filtered" changeset exist on the server
1435 # but are not including a remote heads, we'll not be able to detect it,
1435 # but are not including a remote heads, we'll not be able to detect it,
1436 scommon = set(common)
1436 scommon = set(common)
1437 for n in rheads:
1437 for n in rheads:
1438 if n in nm:
1438 if n in nm:
1439 if n not in scommon:
1439 if n not in scommon:
1440 common.append(n)
1440 common.append(n)
1441 if set(rheads).issubset(set(common)):
1441 if set(rheads).issubset(set(common)):
1442 fetch = []
1442 fetch = []
1443 pullop.common = common
1443 pullop.common = common
1444 pullop.fetch = fetch
1444 pullop.fetch = fetch
1445 pullop.rheads = rheads
1445 pullop.rheads = rheads
1446
1446
1447 def _pullbundle2(pullop):
1447 def _pullbundle2(pullop):
1448 """pull data using bundle2
1448 """pull data using bundle2
1449
1449
1450 For now, the only supported data are changegroup."""
1450 For now, the only supported data are changegroup."""
1451 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1451 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1452
1452
1453 # make ui easier to access
1453 # make ui easier to access
1454 ui = pullop.repo.ui
1454 ui = pullop.repo.ui
1455
1455
1456 # At the moment we don't do stream clones over bundle2. If that is
1456 # At the moment we don't do stream clones over bundle2. If that is
1457 # implemented then here's where the check for that will go.
1457 # implemented then here's where the check for that will go.
1458 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1458 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1459
1459
1460 # declare pull perimeters
1460 # declare pull perimeters
1461 kwargs['common'] = pullop.common
1461 kwargs['common'] = pullop.common
1462 kwargs['heads'] = pullop.heads or pullop.rheads
1462 kwargs['heads'] = pullop.heads or pullop.rheads
1463
1463
1464 if streaming:
1464 if streaming:
1465 kwargs['cg'] = False
1465 kwargs['cg'] = False
1466 kwargs['stream'] = True
1466 kwargs['stream'] = True
1467 pullop.stepsdone.add('changegroup')
1467 pullop.stepsdone.add('changegroup')
1468 pullop.stepsdone.add('phases')
1468
1469
1469 else:
1470 else:
1470 # pulling changegroup
1471 # pulling changegroup
1471 pullop.stepsdone.add('changegroup')
1472 pullop.stepsdone.add('changegroup')
1472
1473
1473 kwargs['cg'] = pullop.fetch
1474 kwargs['cg'] = pullop.fetch
1474
1475
1475 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1476 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1476 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1477 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1477 if (not legacyphase and hasbinaryphase):
1478 if (not legacyphase and hasbinaryphase):
1478 kwargs['phases'] = True
1479 kwargs['phases'] = True
1479 pullop.stepsdone.add('phases')
1480 pullop.stepsdone.add('phases')
1480
1481
1481 if 'listkeys' in pullop.remotebundle2caps:
1482 if 'listkeys' in pullop.remotebundle2caps:
1482 if 'phases' not in pullop.stepsdone:
1483 if 'phases' not in pullop.stepsdone:
1483 kwargs['listkeys'] = ['phases']
1484 kwargs['listkeys'] = ['phases']
1484
1485
1485 bookmarksrequested = False
1486 bookmarksrequested = False
1486 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1487 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1487 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1488 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1488
1489
1489 if pullop.remotebookmarks is not None:
1490 if pullop.remotebookmarks is not None:
1490 pullop.stepsdone.add('request-bookmarks')
1491 pullop.stepsdone.add('request-bookmarks')
1491
1492
1492 if ('request-bookmarks' not in pullop.stepsdone
1493 if ('request-bookmarks' not in pullop.stepsdone
1493 and pullop.remotebookmarks is None
1494 and pullop.remotebookmarks is None
1494 and not legacybookmark and hasbinarybook):
1495 and not legacybookmark and hasbinarybook):
1495 kwargs['bookmarks'] = True
1496 kwargs['bookmarks'] = True
1496 bookmarksrequested = True
1497 bookmarksrequested = True
1497
1498
1498 if 'listkeys' in pullop.remotebundle2caps:
1499 if 'listkeys' in pullop.remotebundle2caps:
1499 if 'request-bookmarks' not in pullop.stepsdone:
1500 if 'request-bookmarks' not in pullop.stepsdone:
1500 # make sure to always includes bookmark data when migrating
1501 # make sure to always includes bookmark data when migrating
1501 # `hg incoming --bundle` to using this function.
1502 # `hg incoming --bundle` to using this function.
1502 pullop.stepsdone.add('request-bookmarks')
1503 pullop.stepsdone.add('request-bookmarks')
1503 kwargs.setdefault('listkeys', []).append('bookmarks')
1504 kwargs.setdefault('listkeys', []).append('bookmarks')
1504
1505
1505 # If this is a full pull / clone and the server supports the clone bundles
1506 # If this is a full pull / clone and the server supports the clone bundles
1506 # feature, tell the server whether we attempted a clone bundle. The
1507 # feature, tell the server whether we attempted a clone bundle. The
1507 # presence of this flag indicates the client supports clone bundles. This
1508 # presence of this flag indicates the client supports clone bundles. This
1508 # will enable the server to treat clients that support clone bundles
1509 # will enable the server to treat clients that support clone bundles
1509 # differently from those that don't.
1510 # differently from those that don't.
1510 if (pullop.remote.capable('clonebundles')
1511 if (pullop.remote.capable('clonebundles')
1511 and pullop.heads is None and list(pullop.common) == [nullid]):
1512 and pullop.heads is None and list(pullop.common) == [nullid]):
1512 kwargs['cbattempted'] = pullop.clonebundleattempted
1513 kwargs['cbattempted'] = pullop.clonebundleattempted
1513
1514
1514 if streaming:
1515 if streaming:
1515 pullop.repo.ui.status(_('streaming all changes\n'))
1516 pullop.repo.ui.status(_('streaming all changes\n'))
1516 elif not pullop.fetch:
1517 elif not pullop.fetch:
1517 pullop.repo.ui.status(_("no changes found\n"))
1518 pullop.repo.ui.status(_("no changes found\n"))
1518 pullop.cgresult = 0
1519 pullop.cgresult = 0
1519 else:
1520 else:
1520 if pullop.heads is None and list(pullop.common) == [nullid]:
1521 if pullop.heads is None and list(pullop.common) == [nullid]:
1521 pullop.repo.ui.status(_("requesting all changes\n"))
1522 pullop.repo.ui.status(_("requesting all changes\n"))
1522 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1523 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1523 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1524 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1524 if obsolete.commonversion(remoteversions) is not None:
1525 if obsolete.commonversion(remoteversions) is not None:
1525 kwargs['obsmarkers'] = True
1526 kwargs['obsmarkers'] = True
1526 pullop.stepsdone.add('obsmarkers')
1527 pullop.stepsdone.add('obsmarkers')
1527 _pullbundle2extraprepare(pullop, kwargs)
1528 _pullbundle2extraprepare(pullop, kwargs)
1528 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1529 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1529 try:
1530 try:
1530 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction)
1531 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction)
1531 op.modes['bookmarks'] = 'records'
1532 op.modes['bookmarks'] = 'records'
1532 bundle2.processbundle(pullop.repo, bundle, op=op)
1533 bundle2.processbundle(pullop.repo, bundle, op=op)
1533 except bundle2.AbortFromPart as exc:
1534 except bundle2.AbortFromPart as exc:
1534 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1535 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1535 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1536 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1536 except error.BundleValueError as exc:
1537 except error.BundleValueError as exc:
1537 raise error.Abort(_('missing support for %s') % exc)
1538 raise error.Abort(_('missing support for %s') % exc)
1538
1539
1539 if pullop.fetch:
1540 if pullop.fetch:
1540 pullop.cgresult = bundle2.combinechangegroupresults(op)
1541 pullop.cgresult = bundle2.combinechangegroupresults(op)
1541
1542
1542 # processing phases change
1543 # processing phases change
1543 for namespace, value in op.records['listkeys']:
1544 for namespace, value in op.records['listkeys']:
1544 if namespace == 'phases':
1545 if namespace == 'phases':
1545 _pullapplyphases(pullop, value)
1546 _pullapplyphases(pullop, value)
1546
1547
1547 # processing bookmark update
1548 # processing bookmark update
1548 if bookmarksrequested:
1549 if bookmarksrequested:
1549 books = {}
1550 books = {}
1550 for record in op.records['bookmarks']:
1551 for record in op.records['bookmarks']:
1551 books[record['bookmark']] = record["node"]
1552 books[record['bookmark']] = record["node"]
1552 pullop.remotebookmarks = books
1553 pullop.remotebookmarks = books
1553 else:
1554 else:
1554 for namespace, value in op.records['listkeys']:
1555 for namespace, value in op.records['listkeys']:
1555 if namespace == 'bookmarks':
1556 if namespace == 'bookmarks':
1556 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1557 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1557
1558
1558 # bookmark data were either already there or pulled in the bundle
1559 # bookmark data were either already there or pulled in the bundle
1559 if pullop.remotebookmarks is not None:
1560 if pullop.remotebookmarks is not None:
1560 _pullbookmarks(pullop)
1561 _pullbookmarks(pullop)
1561
1562
1562 def _pullbundle2extraprepare(pullop, kwargs):
1563 def _pullbundle2extraprepare(pullop, kwargs):
1563 """hook function so that extensions can extend the getbundle call"""
1564 """hook function so that extensions can extend the getbundle call"""
1564
1565
1565 def _pullchangeset(pullop):
1566 def _pullchangeset(pullop):
1566 """pull changeset from unbundle into the local repo"""
1567 """pull changeset from unbundle into the local repo"""
1567 # We delay the open of the transaction as late as possible so we
1568 # We delay the open of the transaction as late as possible so we
1568 # don't open transaction for nothing or you break future useful
1569 # don't open transaction for nothing or you break future useful
1569 # rollback call
1570 # rollback call
1570 if 'changegroup' in pullop.stepsdone:
1571 if 'changegroup' in pullop.stepsdone:
1571 return
1572 return
1572 pullop.stepsdone.add('changegroup')
1573 pullop.stepsdone.add('changegroup')
1573 if not pullop.fetch:
1574 if not pullop.fetch:
1574 pullop.repo.ui.status(_("no changes found\n"))
1575 pullop.repo.ui.status(_("no changes found\n"))
1575 pullop.cgresult = 0
1576 pullop.cgresult = 0
1576 return
1577 return
1577 tr = pullop.gettransaction()
1578 tr = pullop.gettransaction()
1578 if pullop.heads is None and list(pullop.common) == [nullid]:
1579 if pullop.heads is None and list(pullop.common) == [nullid]:
1579 pullop.repo.ui.status(_("requesting all changes\n"))
1580 pullop.repo.ui.status(_("requesting all changes\n"))
1580 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1581 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1581 # issue1320, avoid a race if remote changed after discovery
1582 # issue1320, avoid a race if remote changed after discovery
1582 pullop.heads = pullop.rheads
1583 pullop.heads = pullop.rheads
1583
1584
1584 if pullop.remote.capable('getbundle'):
1585 if pullop.remote.capable('getbundle'):
1585 # TODO: get bundlecaps from remote
1586 # TODO: get bundlecaps from remote
1586 cg = pullop.remote.getbundle('pull', common=pullop.common,
1587 cg = pullop.remote.getbundle('pull', common=pullop.common,
1587 heads=pullop.heads or pullop.rheads)
1588 heads=pullop.heads or pullop.rheads)
1588 elif pullop.heads is None:
1589 elif pullop.heads is None:
1589 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1590 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1590 elif not pullop.remote.capable('changegroupsubset'):
1591 elif not pullop.remote.capable('changegroupsubset'):
1591 raise error.Abort(_("partial pull cannot be done because "
1592 raise error.Abort(_("partial pull cannot be done because "
1592 "other repository doesn't support "
1593 "other repository doesn't support "
1593 "changegroupsubset."))
1594 "changegroupsubset."))
1594 else:
1595 else:
1595 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1596 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1596 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1597 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1597 pullop.remote.url())
1598 pullop.remote.url())
1598 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1599 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1599
1600
1600 def _pullphase(pullop):
1601 def _pullphase(pullop):
1601 # Get remote phases data from remote
1602 # Get remote phases data from remote
1602 if 'phases' in pullop.stepsdone:
1603 if 'phases' in pullop.stepsdone:
1603 return
1604 return
1604 remotephases = pullop.remote.listkeys('phases')
1605 remotephases = pullop.remote.listkeys('phases')
1605 _pullapplyphases(pullop, remotephases)
1606 _pullapplyphases(pullop, remotephases)
1606
1607
1607 def _pullapplyphases(pullop, remotephases):
1608 def _pullapplyphases(pullop, remotephases):
1608 """apply phase movement from observed remote state"""
1609 """apply phase movement from observed remote state"""
1609 if 'phases' in pullop.stepsdone:
1610 if 'phases' in pullop.stepsdone:
1610 return
1611 return
1611 pullop.stepsdone.add('phases')
1612 pullop.stepsdone.add('phases')
1612 publishing = bool(remotephases.get('publishing', False))
1613 publishing = bool(remotephases.get('publishing', False))
1613 if remotephases and not publishing:
1614 if remotephases and not publishing:
1614 # remote is new and non-publishing
1615 # remote is new and non-publishing
1615 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1616 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1616 pullop.pulledsubset,
1617 pullop.pulledsubset,
1617 remotephases)
1618 remotephases)
1618 dheads = pullop.pulledsubset
1619 dheads = pullop.pulledsubset
1619 else:
1620 else:
1620 # Remote is old or publishing all common changesets
1621 # Remote is old or publishing all common changesets
1621 # should be seen as public
1622 # should be seen as public
1622 pheads = pullop.pulledsubset
1623 pheads = pullop.pulledsubset
1623 dheads = []
1624 dheads = []
1624 unfi = pullop.repo.unfiltered()
1625 unfi = pullop.repo.unfiltered()
1625 phase = unfi._phasecache.phase
1626 phase = unfi._phasecache.phase
1626 rev = unfi.changelog.nodemap.get
1627 rev = unfi.changelog.nodemap.get
1627 public = phases.public
1628 public = phases.public
1628 draft = phases.draft
1629 draft = phases.draft
1629
1630
1630 # exclude changesets already public locally and update the others
1631 # exclude changesets already public locally and update the others
1631 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1632 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1632 if pheads:
1633 if pheads:
1633 tr = pullop.gettransaction()
1634 tr = pullop.gettransaction()
1634 phases.advanceboundary(pullop.repo, tr, public, pheads)
1635 phases.advanceboundary(pullop.repo, tr, public, pheads)
1635
1636
1636 # exclude changesets already draft locally and update the others
1637 # exclude changesets already draft locally and update the others
1637 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1638 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1638 if dheads:
1639 if dheads:
1639 tr = pullop.gettransaction()
1640 tr = pullop.gettransaction()
1640 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1641 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1641
1642
1642 def _pullbookmarks(pullop):
1643 def _pullbookmarks(pullop):
1643 """process the remote bookmark information to update the local one"""
1644 """process the remote bookmark information to update the local one"""
1644 if 'bookmarks' in pullop.stepsdone:
1645 if 'bookmarks' in pullop.stepsdone:
1645 return
1646 return
1646 pullop.stepsdone.add('bookmarks')
1647 pullop.stepsdone.add('bookmarks')
1647 repo = pullop.repo
1648 repo = pullop.repo
1648 remotebookmarks = pullop.remotebookmarks
1649 remotebookmarks = pullop.remotebookmarks
1649 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1650 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1650 pullop.remote.url(),
1651 pullop.remote.url(),
1651 pullop.gettransaction,
1652 pullop.gettransaction,
1652 explicit=pullop.explicitbookmarks)
1653 explicit=pullop.explicitbookmarks)
1653
1654
1654 def _pullobsolete(pullop):
1655 def _pullobsolete(pullop):
1655 """utility function to pull obsolete markers from a remote
1656 """utility function to pull obsolete markers from a remote
1656
1657
1657 The `gettransaction` is function that return the pull transaction, creating
1658 The `gettransaction` is function that return the pull transaction, creating
1658 one if necessary. We return the transaction to inform the calling code that
1659 one if necessary. We return the transaction to inform the calling code that
1659 a new transaction have been created (when applicable).
1660 a new transaction have been created (when applicable).
1660
1661
1661 Exists mostly to allow overriding for experimentation purpose"""
1662 Exists mostly to allow overriding for experimentation purpose"""
1662 if 'obsmarkers' in pullop.stepsdone:
1663 if 'obsmarkers' in pullop.stepsdone:
1663 return
1664 return
1664 pullop.stepsdone.add('obsmarkers')
1665 pullop.stepsdone.add('obsmarkers')
1665 tr = None
1666 tr = None
1666 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1667 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1667 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1668 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1668 remoteobs = pullop.remote.listkeys('obsolete')
1669 remoteobs = pullop.remote.listkeys('obsolete')
1669 if 'dump0' in remoteobs:
1670 if 'dump0' in remoteobs:
1670 tr = pullop.gettransaction()
1671 tr = pullop.gettransaction()
1671 markers = []
1672 markers = []
1672 for key in sorted(remoteobs, reverse=True):
1673 for key in sorted(remoteobs, reverse=True):
1673 if key.startswith('dump'):
1674 if key.startswith('dump'):
1674 data = util.b85decode(remoteobs[key])
1675 data = util.b85decode(remoteobs[key])
1675 version, newmarks = obsolete._readmarkers(data)
1676 version, newmarks = obsolete._readmarkers(data)
1676 markers += newmarks
1677 markers += newmarks
1677 if markers:
1678 if markers:
1678 pullop.repo.obsstore.add(tr, markers)
1679 pullop.repo.obsstore.add(tr, markers)
1679 pullop.repo.invalidatevolatilesets()
1680 pullop.repo.invalidatevolatilesets()
1680 return tr
1681 return tr
1681
1682
1682 def caps20to10(repo):
1683 def caps20to10(repo):
1683 """return a set with appropriate options to use bundle20 during getbundle"""
1684 """return a set with appropriate options to use bundle20 during getbundle"""
1684 caps = {'HG20'}
1685 caps = {'HG20'}
1685 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1686 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1686 caps.add('bundle2=' + urlreq.quote(capsblob))
1687 caps.add('bundle2=' + urlreq.quote(capsblob))
1687 return caps
1688 return caps
1688
1689
1689 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1690 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1690 getbundle2partsorder = []
1691 getbundle2partsorder = []
1691
1692
1692 # Mapping between step name and function
1693 # Mapping between step name and function
1693 #
1694 #
1694 # This exists to help extensions wrap steps if necessary
1695 # This exists to help extensions wrap steps if necessary
1695 getbundle2partsmapping = {}
1696 getbundle2partsmapping = {}
1696
1697
1697 def getbundle2partsgenerator(stepname, idx=None):
1698 def getbundle2partsgenerator(stepname, idx=None):
1698 """decorator for function generating bundle2 part for getbundle
1699 """decorator for function generating bundle2 part for getbundle
1699
1700
1700 The function is added to the step -> function mapping and appended to the
1701 The function is added to the step -> function mapping and appended to the
1701 list of steps. Beware that decorated functions will be added in order
1702 list of steps. Beware that decorated functions will be added in order
1702 (this may matter).
1703 (this may matter).
1703
1704
1704 You can only use this decorator for new steps, if you want to wrap a step
1705 You can only use this decorator for new steps, if you want to wrap a step
1705 from an extension, attack the getbundle2partsmapping dictionary directly."""
1706 from an extension, attack the getbundle2partsmapping dictionary directly."""
1706 def dec(func):
1707 def dec(func):
1707 assert stepname not in getbundle2partsmapping
1708 assert stepname not in getbundle2partsmapping
1708 getbundle2partsmapping[stepname] = func
1709 getbundle2partsmapping[stepname] = func
1709 if idx is None:
1710 if idx is None:
1710 getbundle2partsorder.append(stepname)
1711 getbundle2partsorder.append(stepname)
1711 else:
1712 else:
1712 getbundle2partsorder.insert(idx, stepname)
1713 getbundle2partsorder.insert(idx, stepname)
1713 return func
1714 return func
1714 return dec
1715 return dec
1715
1716
1716 def bundle2requested(bundlecaps):
1717 def bundle2requested(bundlecaps):
1717 if bundlecaps is not None:
1718 if bundlecaps is not None:
1718 return any(cap.startswith('HG2') for cap in bundlecaps)
1719 return any(cap.startswith('HG2') for cap in bundlecaps)
1719 return False
1720 return False
1720
1721
1721 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1722 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1722 **kwargs):
1723 **kwargs):
1723 """Return chunks constituting a bundle's raw data.
1724 """Return chunks constituting a bundle's raw data.
1724
1725
1725 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1726 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1726 passed.
1727 passed.
1727
1728
1728 Returns an iterator over raw chunks (of varying sizes).
1729 Returns an iterator over raw chunks (of varying sizes).
1729 """
1730 """
1730 kwargs = pycompat.byteskwargs(kwargs)
1731 kwargs = pycompat.byteskwargs(kwargs)
1731 usebundle2 = bundle2requested(bundlecaps)
1732 usebundle2 = bundle2requested(bundlecaps)
1732 # bundle10 case
1733 # bundle10 case
1733 if not usebundle2:
1734 if not usebundle2:
1734 if bundlecaps and not kwargs.get('cg', True):
1735 if bundlecaps and not kwargs.get('cg', True):
1735 raise ValueError(_('request for bundle10 must include changegroup'))
1736 raise ValueError(_('request for bundle10 must include changegroup'))
1736
1737
1737 if kwargs:
1738 if kwargs:
1738 raise ValueError(_('unsupported getbundle arguments: %s')
1739 raise ValueError(_('unsupported getbundle arguments: %s')
1739 % ', '.join(sorted(kwargs.keys())))
1740 % ', '.join(sorted(kwargs.keys())))
1740 outgoing = _computeoutgoing(repo, heads, common)
1741 outgoing = _computeoutgoing(repo, heads, common)
1741 return changegroup.makestream(repo, outgoing, '01', source,
1742 return changegroup.makestream(repo, outgoing, '01', source,
1742 bundlecaps=bundlecaps)
1743 bundlecaps=bundlecaps)
1743
1744
1744 # bundle20 case
1745 # bundle20 case
1745 b2caps = {}
1746 b2caps = {}
1746 for bcaps in bundlecaps:
1747 for bcaps in bundlecaps:
1747 if bcaps.startswith('bundle2='):
1748 if bcaps.startswith('bundle2='):
1748 blob = urlreq.unquote(bcaps[len('bundle2='):])
1749 blob = urlreq.unquote(bcaps[len('bundle2='):])
1749 b2caps.update(bundle2.decodecaps(blob))
1750 b2caps.update(bundle2.decodecaps(blob))
1750 bundler = bundle2.bundle20(repo.ui, b2caps)
1751 bundler = bundle2.bundle20(repo.ui, b2caps)
1751
1752
1752 kwargs['heads'] = heads
1753 kwargs['heads'] = heads
1753 kwargs['common'] = common
1754 kwargs['common'] = common
1754
1755
1755 for name in getbundle2partsorder:
1756 for name in getbundle2partsorder:
1756 func = getbundle2partsmapping[name]
1757 func = getbundle2partsmapping[name]
1757 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1758 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1758 **pycompat.strkwargs(kwargs))
1759 **pycompat.strkwargs(kwargs))
1759
1760
1760 return bundler.getchunks()
1761 return bundler.getchunks()
1761
1762
1762 @getbundle2partsgenerator('stream')
1763 @getbundle2partsgenerator('stream')
1763 def _getbundlestream(bundler, repo, source, bundlecaps=None,
1764 def _getbundlestream(bundler, repo, source, bundlecaps=None,
1764 b2caps=None, heads=None, common=None, **kwargs):
1765 b2caps=None, heads=None, common=None, **kwargs):
1765 if not kwargs.get('stream', False):
1766 if not kwargs.get('stream', False):
1766 return
1767 return
1767 filecount, bytecount, it = streamclone.generatev2(repo)
1768 filecount, bytecount, it = streamclone.generatev2(repo)
1768 requirements = ' '.join(repo.requirements)
1769 requirements = ' '.join(repo.requirements)
1769 part = bundler.newpart('stream', data=it)
1770 part = bundler.newpart('stream', data=it)
1770 part.addparam('bytecount', '%d' % bytecount, mandatory=True)
1771 part.addparam('bytecount', '%d' % bytecount, mandatory=True)
1771 part.addparam('filecount', '%d' % filecount, mandatory=True)
1772 part.addparam('filecount', '%d' % filecount, mandatory=True)
1772 part.addparam('requirements', requirements, mandatory=True)
1773 part.addparam('requirements', requirements, mandatory=True)
1773 part.addparam('version', 'v2', mandatory=True)
1774 part.addparam('version', 'v2', mandatory=True)
1774
1775
1775 @getbundle2partsgenerator('changegroup')
1776 @getbundle2partsgenerator('changegroup')
1776 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1777 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1777 b2caps=None, heads=None, common=None, **kwargs):
1778 b2caps=None, heads=None, common=None, **kwargs):
1778 """add a changegroup part to the requested bundle"""
1779 """add a changegroup part to the requested bundle"""
1779 cgstream = None
1780 cgstream = None
1780 if kwargs.get(r'cg', True):
1781 if kwargs.get(r'cg', True):
1781 # build changegroup bundle here.
1782 # build changegroup bundle here.
1782 version = '01'
1783 version = '01'
1783 cgversions = b2caps.get('changegroup')
1784 cgversions = b2caps.get('changegroup')
1784 if cgversions: # 3.1 and 3.2 ship with an empty value
1785 if cgversions: # 3.1 and 3.2 ship with an empty value
1785 cgversions = [v for v in cgversions
1786 cgversions = [v for v in cgversions
1786 if v in changegroup.supportedoutgoingversions(repo)]
1787 if v in changegroup.supportedoutgoingversions(repo)]
1787 if not cgversions:
1788 if not cgversions:
1788 raise ValueError(_('no common changegroup version'))
1789 raise ValueError(_('no common changegroup version'))
1789 version = max(cgversions)
1790 version = max(cgversions)
1790 outgoing = _computeoutgoing(repo, heads, common)
1791 outgoing = _computeoutgoing(repo, heads, common)
1791 if outgoing.missing:
1792 if outgoing.missing:
1792 cgstream = changegroup.makestream(repo, outgoing, version, source,
1793 cgstream = changegroup.makestream(repo, outgoing, version, source,
1793 bundlecaps=bundlecaps)
1794 bundlecaps=bundlecaps)
1794
1795
1795 if cgstream:
1796 if cgstream:
1796 part = bundler.newpart('changegroup', data=cgstream)
1797 part = bundler.newpart('changegroup', data=cgstream)
1797 if cgversions:
1798 if cgversions:
1798 part.addparam('version', version)
1799 part.addparam('version', version)
1799 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1800 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1800 mandatory=False)
1801 mandatory=False)
1801 if 'treemanifest' in repo.requirements:
1802 if 'treemanifest' in repo.requirements:
1802 part.addparam('treemanifest', '1')
1803 part.addparam('treemanifest', '1')
1803
1804
1804 @getbundle2partsgenerator('bookmarks')
1805 @getbundle2partsgenerator('bookmarks')
1805 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
1806 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
1806 b2caps=None, **kwargs):
1807 b2caps=None, **kwargs):
1807 """add a bookmark part to the requested bundle"""
1808 """add a bookmark part to the requested bundle"""
1808 if not kwargs.get(r'bookmarks', False):
1809 if not kwargs.get(r'bookmarks', False):
1809 return
1810 return
1810 if 'bookmarks' not in b2caps:
1811 if 'bookmarks' not in b2caps:
1811 raise ValueError(_('no common bookmarks exchange method'))
1812 raise ValueError(_('no common bookmarks exchange method'))
1812 books = bookmod.listbinbookmarks(repo)
1813 books = bookmod.listbinbookmarks(repo)
1813 data = bookmod.binaryencode(books)
1814 data = bookmod.binaryencode(books)
1814 if data:
1815 if data:
1815 bundler.newpart('bookmarks', data=data)
1816 bundler.newpart('bookmarks', data=data)
1816
1817
1817 @getbundle2partsgenerator('listkeys')
1818 @getbundle2partsgenerator('listkeys')
1818 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1819 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1819 b2caps=None, **kwargs):
1820 b2caps=None, **kwargs):
1820 """add parts containing listkeys namespaces to the requested bundle"""
1821 """add parts containing listkeys namespaces to the requested bundle"""
1821 listkeys = kwargs.get(r'listkeys', ())
1822 listkeys = kwargs.get(r'listkeys', ())
1822 for namespace in listkeys:
1823 for namespace in listkeys:
1823 part = bundler.newpart('listkeys')
1824 part = bundler.newpart('listkeys')
1824 part.addparam('namespace', namespace)
1825 part.addparam('namespace', namespace)
1825 keys = repo.listkeys(namespace).items()
1826 keys = repo.listkeys(namespace).items()
1826 part.data = pushkey.encodekeys(keys)
1827 part.data = pushkey.encodekeys(keys)
1827
1828
1828 @getbundle2partsgenerator('obsmarkers')
1829 @getbundle2partsgenerator('obsmarkers')
1829 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1830 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1830 b2caps=None, heads=None, **kwargs):
1831 b2caps=None, heads=None, **kwargs):
1831 """add an obsolescence markers part to the requested bundle"""
1832 """add an obsolescence markers part to the requested bundle"""
1832 if kwargs.get(r'obsmarkers', False):
1833 if kwargs.get(r'obsmarkers', False):
1833 if heads is None:
1834 if heads is None:
1834 heads = repo.heads()
1835 heads = repo.heads()
1835 subset = [c.node() for c in repo.set('::%ln', heads)]
1836 subset = [c.node() for c in repo.set('::%ln', heads)]
1836 markers = repo.obsstore.relevantmarkers(subset)
1837 markers = repo.obsstore.relevantmarkers(subset)
1837 markers = sorted(markers)
1838 markers = sorted(markers)
1838 bundle2.buildobsmarkerspart(bundler, markers)
1839 bundle2.buildobsmarkerspart(bundler, markers)
1839
1840
1840 @getbundle2partsgenerator('phases')
1841 @getbundle2partsgenerator('phases')
1841 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1842 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1842 b2caps=None, heads=None, **kwargs):
1843 b2caps=None, heads=None, **kwargs):
1843 """add phase heads part to the requested bundle"""
1844 """add phase heads part to the requested bundle"""
1844 if kwargs.get(r'phases', False):
1845 if kwargs.get(r'phases', False):
1845 if not 'heads' in b2caps.get('phases'):
1846 if not 'heads' in b2caps.get('phases'):
1846 raise ValueError(_('no common phases exchange method'))
1847 raise ValueError(_('no common phases exchange method'))
1847 if heads is None:
1848 if heads is None:
1848 heads = repo.heads()
1849 heads = repo.heads()
1849
1850
1850 headsbyphase = collections.defaultdict(set)
1851 headsbyphase = collections.defaultdict(set)
1851 if repo.publishing():
1852 if repo.publishing():
1852 headsbyphase[phases.public] = heads
1853 headsbyphase[phases.public] = heads
1853 else:
1854 else:
1854 # find the appropriate heads to move
1855 # find the appropriate heads to move
1855
1856
1856 phase = repo._phasecache.phase
1857 phase = repo._phasecache.phase
1857 node = repo.changelog.node
1858 node = repo.changelog.node
1858 rev = repo.changelog.rev
1859 rev = repo.changelog.rev
1859 for h in heads:
1860 for h in heads:
1860 headsbyphase[phase(repo, rev(h))].add(h)
1861 headsbyphase[phase(repo, rev(h))].add(h)
1861 seenphases = list(headsbyphase.keys())
1862 seenphases = list(headsbyphase.keys())
1862
1863
1863 # We do not handle anything but public and draft phase for now)
1864 # We do not handle anything but public and draft phase for now)
1864 if seenphases:
1865 if seenphases:
1865 assert max(seenphases) <= phases.draft
1866 assert max(seenphases) <= phases.draft
1866
1867
1867 # if client is pulling non-public changesets, we need to find
1868 # if client is pulling non-public changesets, we need to find
1868 # intermediate public heads.
1869 # intermediate public heads.
1869 draftheads = headsbyphase.get(phases.draft, set())
1870 draftheads = headsbyphase.get(phases.draft, set())
1870 if draftheads:
1871 if draftheads:
1871 publicheads = headsbyphase.get(phases.public, set())
1872 publicheads = headsbyphase.get(phases.public, set())
1872
1873
1873 revset = 'heads(only(%ln, %ln) and public())'
1874 revset = 'heads(only(%ln, %ln) and public())'
1874 extraheads = repo.revs(revset, draftheads, publicheads)
1875 extraheads = repo.revs(revset, draftheads, publicheads)
1875 for r in extraheads:
1876 for r in extraheads:
1876 headsbyphase[phases.public].add(node(r))
1877 headsbyphase[phases.public].add(node(r))
1877
1878
1878 # transform data in a format used by the encoding function
1879 # transform data in a format used by the encoding function
1879 phasemapping = []
1880 phasemapping = []
1880 for phase in phases.allphases:
1881 for phase in phases.allphases:
1881 phasemapping.append(sorted(headsbyphase[phase]))
1882 phasemapping.append(sorted(headsbyphase[phase]))
1882
1883
1883 # generate the actual part
1884 # generate the actual part
1884 phasedata = phases.binaryencode(phasemapping)
1885 phasedata = phases.binaryencode(phasemapping)
1885 bundler.newpart('phase-heads', data=phasedata)
1886 bundler.newpart('phase-heads', data=phasedata)
1886
1887
1887 @getbundle2partsgenerator('hgtagsfnodes')
1888 @getbundle2partsgenerator('hgtagsfnodes')
1888 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1889 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1889 b2caps=None, heads=None, common=None,
1890 b2caps=None, heads=None, common=None,
1890 **kwargs):
1891 **kwargs):
1891 """Transfer the .hgtags filenodes mapping.
1892 """Transfer the .hgtags filenodes mapping.
1892
1893
1893 Only values for heads in this bundle will be transferred.
1894 Only values for heads in this bundle will be transferred.
1894
1895
1895 The part data consists of pairs of 20 byte changeset node and .hgtags
1896 The part data consists of pairs of 20 byte changeset node and .hgtags
1896 filenodes raw values.
1897 filenodes raw values.
1897 """
1898 """
1898 # Don't send unless:
1899 # Don't send unless:
1899 # - changeset are being exchanged,
1900 # - changeset are being exchanged,
1900 # - the client supports it.
1901 # - the client supports it.
1901 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
1902 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
1902 return
1903 return
1903
1904
1904 outgoing = _computeoutgoing(repo, heads, common)
1905 outgoing = _computeoutgoing(repo, heads, common)
1905 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1906 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1906
1907
1907 def check_heads(repo, their_heads, context):
1908 def check_heads(repo, their_heads, context):
1908 """check if the heads of a repo have been modified
1909 """check if the heads of a repo have been modified
1909
1910
1910 Used by peer for unbundling.
1911 Used by peer for unbundling.
1911 """
1912 """
1912 heads = repo.heads()
1913 heads = repo.heads()
1913 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1914 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1914 if not (their_heads == ['force'] or their_heads == heads or
1915 if not (their_heads == ['force'] or their_heads == heads or
1915 their_heads == ['hashed', heads_hash]):
1916 their_heads == ['hashed', heads_hash]):
1916 # someone else committed/pushed/unbundled while we
1917 # someone else committed/pushed/unbundled while we
1917 # were transferring data
1918 # were transferring data
1918 raise error.PushRaced('repository changed while %s - '
1919 raise error.PushRaced('repository changed while %s - '
1919 'please try again' % context)
1920 'please try again' % context)
1920
1921
1921 def unbundle(repo, cg, heads, source, url):
1922 def unbundle(repo, cg, heads, source, url):
1922 """Apply a bundle to a repo.
1923 """Apply a bundle to a repo.
1923
1924
1924 this function makes sure the repo is locked during the application and have
1925 this function makes sure the repo is locked during the application and have
1925 mechanism to check that no push race occurred between the creation of the
1926 mechanism to check that no push race occurred between the creation of the
1926 bundle and its application.
1927 bundle and its application.
1927
1928
1928 If the push was raced as PushRaced exception is raised."""
1929 If the push was raced as PushRaced exception is raised."""
1929 r = 0
1930 r = 0
1930 # need a transaction when processing a bundle2 stream
1931 # need a transaction when processing a bundle2 stream
1931 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1932 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1932 lockandtr = [None, None, None]
1933 lockandtr = [None, None, None]
1933 recordout = None
1934 recordout = None
1934 # quick fix for output mismatch with bundle2 in 3.4
1935 # quick fix for output mismatch with bundle2 in 3.4
1935 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
1936 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
1936 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1937 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1937 captureoutput = True
1938 captureoutput = True
1938 try:
1939 try:
1939 # note: outside bundle1, 'heads' is expected to be empty and this
1940 # note: outside bundle1, 'heads' is expected to be empty and this
1940 # 'check_heads' call wil be a no-op
1941 # 'check_heads' call wil be a no-op
1941 check_heads(repo, heads, 'uploading changes')
1942 check_heads(repo, heads, 'uploading changes')
1942 # push can proceed
1943 # push can proceed
1943 if not isinstance(cg, bundle2.unbundle20):
1944 if not isinstance(cg, bundle2.unbundle20):
1944 # legacy case: bundle1 (changegroup 01)
1945 # legacy case: bundle1 (changegroup 01)
1945 txnname = "\n".join([source, util.hidepassword(url)])
1946 txnname = "\n".join([source, util.hidepassword(url)])
1946 with repo.lock(), repo.transaction(txnname) as tr:
1947 with repo.lock(), repo.transaction(txnname) as tr:
1947 op = bundle2.applybundle(repo, cg, tr, source, url)
1948 op = bundle2.applybundle(repo, cg, tr, source, url)
1948 r = bundle2.combinechangegroupresults(op)
1949 r = bundle2.combinechangegroupresults(op)
1949 else:
1950 else:
1950 r = None
1951 r = None
1951 try:
1952 try:
1952 def gettransaction():
1953 def gettransaction():
1953 if not lockandtr[2]:
1954 if not lockandtr[2]:
1954 lockandtr[0] = repo.wlock()
1955 lockandtr[0] = repo.wlock()
1955 lockandtr[1] = repo.lock()
1956 lockandtr[1] = repo.lock()
1956 lockandtr[2] = repo.transaction(source)
1957 lockandtr[2] = repo.transaction(source)
1957 lockandtr[2].hookargs['source'] = source
1958 lockandtr[2].hookargs['source'] = source
1958 lockandtr[2].hookargs['url'] = url
1959 lockandtr[2].hookargs['url'] = url
1959 lockandtr[2].hookargs['bundle2'] = '1'
1960 lockandtr[2].hookargs['bundle2'] = '1'
1960 return lockandtr[2]
1961 return lockandtr[2]
1961
1962
1962 # Do greedy locking by default until we're satisfied with lazy
1963 # Do greedy locking by default until we're satisfied with lazy
1963 # locking.
1964 # locking.
1964 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1965 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1965 gettransaction()
1966 gettransaction()
1966
1967
1967 op = bundle2.bundleoperation(repo, gettransaction,
1968 op = bundle2.bundleoperation(repo, gettransaction,
1968 captureoutput=captureoutput)
1969 captureoutput=captureoutput)
1969 try:
1970 try:
1970 op = bundle2.processbundle(repo, cg, op=op)
1971 op = bundle2.processbundle(repo, cg, op=op)
1971 finally:
1972 finally:
1972 r = op.reply
1973 r = op.reply
1973 if captureoutput and r is not None:
1974 if captureoutput and r is not None:
1974 repo.ui.pushbuffer(error=True, subproc=True)
1975 repo.ui.pushbuffer(error=True, subproc=True)
1975 def recordout(output):
1976 def recordout(output):
1976 r.newpart('output', data=output, mandatory=False)
1977 r.newpart('output', data=output, mandatory=False)
1977 if lockandtr[2] is not None:
1978 if lockandtr[2] is not None:
1978 lockandtr[2].close()
1979 lockandtr[2].close()
1979 except BaseException as exc:
1980 except BaseException as exc:
1980 exc.duringunbundle2 = True
1981 exc.duringunbundle2 = True
1981 if captureoutput and r is not None:
1982 if captureoutput and r is not None:
1982 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1983 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1983 def recordout(output):
1984 def recordout(output):
1984 part = bundle2.bundlepart('output', data=output,
1985 part = bundle2.bundlepart('output', data=output,
1985 mandatory=False)
1986 mandatory=False)
1986 parts.append(part)
1987 parts.append(part)
1987 raise
1988 raise
1988 finally:
1989 finally:
1989 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1990 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1990 if recordout is not None:
1991 if recordout is not None:
1991 recordout(repo.ui.popbuffer())
1992 recordout(repo.ui.popbuffer())
1992 return r
1993 return r
1993
1994
1994 def _maybeapplyclonebundle(pullop):
1995 def _maybeapplyclonebundle(pullop):
1995 """Apply a clone bundle from a remote, if possible."""
1996 """Apply a clone bundle from a remote, if possible."""
1996
1997
1997 repo = pullop.repo
1998 repo = pullop.repo
1998 remote = pullop.remote
1999 remote = pullop.remote
1999
2000
2000 if not repo.ui.configbool('ui', 'clonebundles'):
2001 if not repo.ui.configbool('ui', 'clonebundles'):
2001 return
2002 return
2002
2003
2003 # Only run if local repo is empty.
2004 # Only run if local repo is empty.
2004 if len(repo):
2005 if len(repo):
2005 return
2006 return
2006
2007
2007 if pullop.heads:
2008 if pullop.heads:
2008 return
2009 return
2009
2010
2010 if not remote.capable('clonebundles'):
2011 if not remote.capable('clonebundles'):
2011 return
2012 return
2012
2013
2013 res = remote._call('clonebundles')
2014 res = remote._call('clonebundles')
2014
2015
2015 # If we call the wire protocol command, that's good enough to record the
2016 # If we call the wire protocol command, that's good enough to record the
2016 # attempt.
2017 # attempt.
2017 pullop.clonebundleattempted = True
2018 pullop.clonebundleattempted = True
2018
2019
2019 entries = parseclonebundlesmanifest(repo, res)
2020 entries = parseclonebundlesmanifest(repo, res)
2020 if not entries:
2021 if not entries:
2021 repo.ui.note(_('no clone bundles available on remote; '
2022 repo.ui.note(_('no clone bundles available on remote; '
2022 'falling back to regular clone\n'))
2023 'falling back to regular clone\n'))
2023 return
2024 return
2024
2025
2025 entries = filterclonebundleentries(
2026 entries = filterclonebundleentries(
2026 repo, entries, streamclonerequested=pullop.streamclonerequested)
2027 repo, entries, streamclonerequested=pullop.streamclonerequested)
2027
2028
2028 if not entries:
2029 if not entries:
2029 # There is a thundering herd concern here. However, if a server
2030 # There is a thundering herd concern here. However, if a server
2030 # operator doesn't advertise bundles appropriate for its clients,
2031 # operator doesn't advertise bundles appropriate for its clients,
2031 # they deserve what's coming. Furthermore, from a client's
2032 # they deserve what's coming. Furthermore, from a client's
2032 # perspective, no automatic fallback would mean not being able to
2033 # perspective, no automatic fallback would mean not being able to
2033 # clone!
2034 # clone!
2034 repo.ui.warn(_('no compatible clone bundles available on server; '
2035 repo.ui.warn(_('no compatible clone bundles available on server; '
2035 'falling back to regular clone\n'))
2036 'falling back to regular clone\n'))
2036 repo.ui.warn(_('(you may want to report this to the server '
2037 repo.ui.warn(_('(you may want to report this to the server '
2037 'operator)\n'))
2038 'operator)\n'))
2038 return
2039 return
2039
2040
2040 entries = sortclonebundleentries(repo.ui, entries)
2041 entries = sortclonebundleentries(repo.ui, entries)
2041
2042
2042 url = entries[0]['URL']
2043 url = entries[0]['URL']
2043 repo.ui.status(_('applying clone bundle from %s\n') % url)
2044 repo.ui.status(_('applying clone bundle from %s\n') % url)
2044 if trypullbundlefromurl(repo.ui, repo, url):
2045 if trypullbundlefromurl(repo.ui, repo, url):
2045 repo.ui.status(_('finished applying clone bundle\n'))
2046 repo.ui.status(_('finished applying clone bundle\n'))
2046 # Bundle failed.
2047 # Bundle failed.
2047 #
2048 #
2048 # We abort by default to avoid the thundering herd of
2049 # We abort by default to avoid the thundering herd of
2049 # clients flooding a server that was expecting expensive
2050 # clients flooding a server that was expecting expensive
2050 # clone load to be offloaded.
2051 # clone load to be offloaded.
2051 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2052 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2052 repo.ui.warn(_('falling back to normal clone\n'))
2053 repo.ui.warn(_('falling back to normal clone\n'))
2053 else:
2054 else:
2054 raise error.Abort(_('error applying bundle'),
2055 raise error.Abort(_('error applying bundle'),
2055 hint=_('if this error persists, consider contacting '
2056 hint=_('if this error persists, consider contacting '
2056 'the server operator or disable clone '
2057 'the server operator or disable clone '
2057 'bundles via '
2058 'bundles via '
2058 '"--config ui.clonebundles=false"'))
2059 '"--config ui.clonebundles=false"'))
2059
2060
2060 def parseclonebundlesmanifest(repo, s):
2061 def parseclonebundlesmanifest(repo, s):
2061 """Parses the raw text of a clone bundles manifest.
2062 """Parses the raw text of a clone bundles manifest.
2062
2063
2063 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2064 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2064 to the URL and other keys are the attributes for the entry.
2065 to the URL and other keys are the attributes for the entry.
2065 """
2066 """
2066 m = []
2067 m = []
2067 for line in s.splitlines():
2068 for line in s.splitlines():
2068 fields = line.split()
2069 fields = line.split()
2069 if not fields:
2070 if not fields:
2070 continue
2071 continue
2071 attrs = {'URL': fields[0]}
2072 attrs = {'URL': fields[0]}
2072 for rawattr in fields[1:]:
2073 for rawattr in fields[1:]:
2073 key, value = rawattr.split('=', 1)
2074 key, value = rawattr.split('=', 1)
2074 key = urlreq.unquote(key)
2075 key = urlreq.unquote(key)
2075 value = urlreq.unquote(value)
2076 value = urlreq.unquote(value)
2076 attrs[key] = value
2077 attrs[key] = value
2077
2078
2078 # Parse BUNDLESPEC into components. This makes client-side
2079 # Parse BUNDLESPEC into components. This makes client-side
2079 # preferences easier to specify since you can prefer a single
2080 # preferences easier to specify since you can prefer a single
2080 # component of the BUNDLESPEC.
2081 # component of the BUNDLESPEC.
2081 if key == 'BUNDLESPEC':
2082 if key == 'BUNDLESPEC':
2082 try:
2083 try:
2083 comp, version, params = parsebundlespec(repo, value,
2084 comp, version, params = parsebundlespec(repo, value,
2084 externalnames=True)
2085 externalnames=True)
2085 attrs['COMPRESSION'] = comp
2086 attrs['COMPRESSION'] = comp
2086 attrs['VERSION'] = version
2087 attrs['VERSION'] = version
2087 except error.InvalidBundleSpecification:
2088 except error.InvalidBundleSpecification:
2088 pass
2089 pass
2089 except error.UnsupportedBundleSpecification:
2090 except error.UnsupportedBundleSpecification:
2090 pass
2091 pass
2091
2092
2092 m.append(attrs)
2093 m.append(attrs)
2093
2094
2094 return m
2095 return m
2095
2096
2096 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2097 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2097 """Remove incompatible clone bundle manifest entries.
2098 """Remove incompatible clone bundle manifest entries.
2098
2099
2099 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2100 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2100 and returns a new list consisting of only the entries that this client
2101 and returns a new list consisting of only the entries that this client
2101 should be able to apply.
2102 should be able to apply.
2102
2103
2103 There is no guarantee we'll be able to apply all returned entries because
2104 There is no guarantee we'll be able to apply all returned entries because
2104 the metadata we use to filter on may be missing or wrong.
2105 the metadata we use to filter on may be missing or wrong.
2105 """
2106 """
2106 newentries = []
2107 newentries = []
2107 for entry in entries:
2108 for entry in entries:
2108 spec = entry.get('BUNDLESPEC')
2109 spec = entry.get('BUNDLESPEC')
2109 if spec:
2110 if spec:
2110 try:
2111 try:
2111 comp, version, params = parsebundlespec(repo, spec, strict=True)
2112 comp, version, params = parsebundlespec(repo, spec, strict=True)
2112
2113
2113 # If a stream clone was requested, filter out non-streamclone
2114 # If a stream clone was requested, filter out non-streamclone
2114 # entries.
2115 # entries.
2115 if streamclonerequested and (comp != 'UN' or version != 's1'):
2116 if streamclonerequested and (comp != 'UN' or version != 's1'):
2116 repo.ui.debug('filtering %s because not a stream clone\n' %
2117 repo.ui.debug('filtering %s because not a stream clone\n' %
2117 entry['URL'])
2118 entry['URL'])
2118 continue
2119 continue
2119
2120
2120 except error.InvalidBundleSpecification as e:
2121 except error.InvalidBundleSpecification as e:
2121 repo.ui.debug(str(e) + '\n')
2122 repo.ui.debug(str(e) + '\n')
2122 continue
2123 continue
2123 except error.UnsupportedBundleSpecification as e:
2124 except error.UnsupportedBundleSpecification as e:
2124 repo.ui.debug('filtering %s because unsupported bundle '
2125 repo.ui.debug('filtering %s because unsupported bundle '
2125 'spec: %s\n' % (entry['URL'], str(e)))
2126 'spec: %s\n' % (entry['URL'], str(e)))
2126 continue
2127 continue
2127 # If we don't have a spec and requested a stream clone, we don't know
2128 # If we don't have a spec and requested a stream clone, we don't know
2128 # what the entry is so don't attempt to apply it.
2129 # what the entry is so don't attempt to apply it.
2129 elif streamclonerequested:
2130 elif streamclonerequested:
2130 repo.ui.debug('filtering %s because cannot determine if a stream '
2131 repo.ui.debug('filtering %s because cannot determine if a stream '
2131 'clone bundle\n' % entry['URL'])
2132 'clone bundle\n' % entry['URL'])
2132 continue
2133 continue
2133
2134
2134 if 'REQUIRESNI' in entry and not sslutil.hassni:
2135 if 'REQUIRESNI' in entry and not sslutil.hassni:
2135 repo.ui.debug('filtering %s because SNI not supported\n' %
2136 repo.ui.debug('filtering %s because SNI not supported\n' %
2136 entry['URL'])
2137 entry['URL'])
2137 continue
2138 continue
2138
2139
2139 newentries.append(entry)
2140 newentries.append(entry)
2140
2141
2141 return newentries
2142 return newentries
2142
2143
2143 class clonebundleentry(object):
2144 class clonebundleentry(object):
2144 """Represents an item in a clone bundles manifest.
2145 """Represents an item in a clone bundles manifest.
2145
2146
2146 This rich class is needed to support sorting since sorted() in Python 3
2147 This rich class is needed to support sorting since sorted() in Python 3
2147 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2148 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2148 won't work.
2149 won't work.
2149 """
2150 """
2150
2151
2151 def __init__(self, value, prefers):
2152 def __init__(self, value, prefers):
2152 self.value = value
2153 self.value = value
2153 self.prefers = prefers
2154 self.prefers = prefers
2154
2155
2155 def _cmp(self, other):
2156 def _cmp(self, other):
2156 for prefkey, prefvalue in self.prefers:
2157 for prefkey, prefvalue in self.prefers:
2157 avalue = self.value.get(prefkey)
2158 avalue = self.value.get(prefkey)
2158 bvalue = other.value.get(prefkey)
2159 bvalue = other.value.get(prefkey)
2159
2160
2160 # Special case for b missing attribute and a matches exactly.
2161 # Special case for b missing attribute and a matches exactly.
2161 if avalue is not None and bvalue is None and avalue == prefvalue:
2162 if avalue is not None and bvalue is None and avalue == prefvalue:
2162 return -1
2163 return -1
2163
2164
2164 # Special case for a missing attribute and b matches exactly.
2165 # Special case for a missing attribute and b matches exactly.
2165 if bvalue is not None and avalue is None and bvalue == prefvalue:
2166 if bvalue is not None and avalue is None and bvalue == prefvalue:
2166 return 1
2167 return 1
2167
2168
2168 # We can't compare unless attribute present on both.
2169 # We can't compare unless attribute present on both.
2169 if avalue is None or bvalue is None:
2170 if avalue is None or bvalue is None:
2170 continue
2171 continue
2171
2172
2172 # Same values should fall back to next attribute.
2173 # Same values should fall back to next attribute.
2173 if avalue == bvalue:
2174 if avalue == bvalue:
2174 continue
2175 continue
2175
2176
2176 # Exact matches come first.
2177 # Exact matches come first.
2177 if avalue == prefvalue:
2178 if avalue == prefvalue:
2178 return -1
2179 return -1
2179 if bvalue == prefvalue:
2180 if bvalue == prefvalue:
2180 return 1
2181 return 1
2181
2182
2182 # Fall back to next attribute.
2183 # Fall back to next attribute.
2183 continue
2184 continue
2184
2185
2185 # If we got here we couldn't sort by attributes and prefers. Fall
2186 # If we got here we couldn't sort by attributes and prefers. Fall
2186 # back to index order.
2187 # back to index order.
2187 return 0
2188 return 0
2188
2189
2189 def __lt__(self, other):
2190 def __lt__(self, other):
2190 return self._cmp(other) < 0
2191 return self._cmp(other) < 0
2191
2192
2192 def __gt__(self, other):
2193 def __gt__(self, other):
2193 return self._cmp(other) > 0
2194 return self._cmp(other) > 0
2194
2195
2195 def __eq__(self, other):
2196 def __eq__(self, other):
2196 return self._cmp(other) == 0
2197 return self._cmp(other) == 0
2197
2198
2198 def __le__(self, other):
2199 def __le__(self, other):
2199 return self._cmp(other) <= 0
2200 return self._cmp(other) <= 0
2200
2201
2201 def __ge__(self, other):
2202 def __ge__(self, other):
2202 return self._cmp(other) >= 0
2203 return self._cmp(other) >= 0
2203
2204
2204 def __ne__(self, other):
2205 def __ne__(self, other):
2205 return self._cmp(other) != 0
2206 return self._cmp(other) != 0
2206
2207
2207 def sortclonebundleentries(ui, entries):
2208 def sortclonebundleentries(ui, entries):
2208 prefers = ui.configlist('ui', 'clonebundleprefers')
2209 prefers = ui.configlist('ui', 'clonebundleprefers')
2209 if not prefers:
2210 if not prefers:
2210 return list(entries)
2211 return list(entries)
2211
2212
2212 prefers = [p.split('=', 1) for p in prefers]
2213 prefers = [p.split('=', 1) for p in prefers]
2213
2214
2214 items = sorted(clonebundleentry(v, prefers) for v in entries)
2215 items = sorted(clonebundleentry(v, prefers) for v in entries)
2215 return [i.value for i in items]
2216 return [i.value for i in items]
2216
2217
2217 def trypullbundlefromurl(ui, repo, url):
2218 def trypullbundlefromurl(ui, repo, url):
2218 """Attempt to apply a bundle from a URL."""
2219 """Attempt to apply a bundle from a URL."""
2219 with repo.lock(), repo.transaction('bundleurl') as tr:
2220 with repo.lock(), repo.transaction('bundleurl') as tr:
2220 try:
2221 try:
2221 fh = urlmod.open(ui, url)
2222 fh = urlmod.open(ui, url)
2222 cg = readbundle(ui, fh, 'stream')
2223 cg = readbundle(ui, fh, 'stream')
2223
2224
2224 if isinstance(cg, streamclone.streamcloneapplier):
2225 if isinstance(cg, streamclone.streamcloneapplier):
2225 cg.apply(repo)
2226 cg.apply(repo)
2226 else:
2227 else:
2227 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2228 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2228 return True
2229 return True
2229 except urlerr.httperror as e:
2230 except urlerr.httperror as e:
2230 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2231 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2231 except urlerr.urlerror as e:
2232 except urlerr.urlerror as e:
2232 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2233 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2233
2234
2234 return False
2235 return False
@@ -1,541 +1,595 b''
1 # streamclone.py - producing and consuming streaming repository data
1 # streamclone.py - producing and consuming streaming repository data
2 #
2 #
3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import contextlib
11 import os
10 import struct
12 import struct
13 import tempfile
11
14
12 from .i18n import _
15 from .i18n import _
13 from . import (
16 from . import (
14 branchmap,
17 branchmap,
15 error,
18 error,
16 phases,
19 phases,
17 store,
20 store,
18 util,
21 util,
19 )
22 )
20
23
21 def canperformstreamclone(pullop, bundle2=False):
24 def canperformstreamclone(pullop, bundle2=False):
22 """Whether it is possible to perform a streaming clone as part of pull.
25 """Whether it is possible to perform a streaming clone as part of pull.
23
26
24 ``bundle2`` will cause the function to consider stream clone through
27 ``bundle2`` will cause the function to consider stream clone through
25 bundle2 and only through bundle2.
28 bundle2 and only through bundle2.
26
29
27 Returns a tuple of (supported, requirements). ``supported`` is True if
30 Returns a tuple of (supported, requirements). ``supported`` is True if
28 streaming clone is supported and False otherwise. ``requirements`` is
31 streaming clone is supported and False otherwise. ``requirements`` is
29 a set of repo requirements from the remote, or ``None`` if stream clone
32 a set of repo requirements from the remote, or ``None`` if stream clone
30 isn't supported.
33 isn't supported.
31 """
34 """
32 repo = pullop.repo
35 repo = pullop.repo
33 remote = pullop.remote
36 remote = pullop.remote
34
37
35 bundle2supported = False
38 bundle2supported = False
36 if pullop.canusebundle2:
39 if pullop.canusebundle2:
37 if 'v2' in pullop.remotebundle2caps.get('stream', []):
40 if 'v2' in pullop.remotebundle2caps.get('stream', []):
38 bundle2supported = True
41 bundle2supported = True
39 # else
42 # else
40 # Server doesn't support bundle2 stream clone or doesn't support
43 # Server doesn't support bundle2 stream clone or doesn't support
41 # the versions we support. Fall back and possibly allow legacy.
44 # the versions we support. Fall back and possibly allow legacy.
42
45
43 # Ensures legacy code path uses available bundle2.
46 # Ensures legacy code path uses available bundle2.
44 if bundle2supported and not bundle2:
47 if bundle2supported and not bundle2:
45 return False, None
48 return False, None
46 # Ensures bundle2 doesn't try to do a stream clone if it isn't supported.
49 # Ensures bundle2 doesn't try to do a stream clone if it isn't supported.
47 elif bundle2 and not bundle2supported:
50 elif bundle2 and not bundle2supported:
48 return False, None
51 return False, None
49
52
50 # Streaming clone only works on empty repositories.
53 # Streaming clone only works on empty repositories.
51 if len(repo):
54 if len(repo):
52 return False, None
55 return False, None
53
56
54 # Streaming clone only works if all data is being requested.
57 # Streaming clone only works if all data is being requested.
55 if pullop.heads:
58 if pullop.heads:
56 return False, None
59 return False, None
57
60
58 streamrequested = pullop.streamclonerequested
61 streamrequested = pullop.streamclonerequested
59
62
60 # If we don't have a preference, let the server decide for us. This
63 # If we don't have a preference, let the server decide for us. This
61 # likely only comes into play in LANs.
64 # likely only comes into play in LANs.
62 if streamrequested is None:
65 if streamrequested is None:
63 # The server can advertise whether to prefer streaming clone.
66 # The server can advertise whether to prefer streaming clone.
64 streamrequested = remote.capable('stream-preferred')
67 streamrequested = remote.capable('stream-preferred')
65
68
66 if not streamrequested:
69 if not streamrequested:
67 return False, None
70 return False, None
68
71
69 # In order for stream clone to work, the client has to support all the
72 # In order for stream clone to work, the client has to support all the
70 # requirements advertised by the server.
73 # requirements advertised by the server.
71 #
74 #
72 # The server advertises its requirements via the "stream" and "streamreqs"
75 # The server advertises its requirements via the "stream" and "streamreqs"
73 # capability. "stream" (a value-less capability) is advertised if and only
76 # capability. "stream" (a value-less capability) is advertised if and only
74 # if the only requirement is "revlogv1." Else, the "streamreqs" capability
77 # if the only requirement is "revlogv1." Else, the "streamreqs" capability
75 # is advertised and contains a comma-delimited list of requirements.
78 # is advertised and contains a comma-delimited list of requirements.
76 requirements = set()
79 requirements = set()
77 if remote.capable('stream'):
80 if remote.capable('stream'):
78 requirements.add('revlogv1')
81 requirements.add('revlogv1')
79 else:
82 else:
80 streamreqs = remote.capable('streamreqs')
83 streamreqs = remote.capable('streamreqs')
81 # This is weird and shouldn't happen with modern servers.
84 # This is weird and shouldn't happen with modern servers.
82 if not streamreqs:
85 if not streamreqs:
83 pullop.repo.ui.warn(_(
86 pullop.repo.ui.warn(_(
84 'warning: stream clone requested but server has them '
87 'warning: stream clone requested but server has them '
85 'disabled\n'))
88 'disabled\n'))
86 return False, None
89 return False, None
87
90
88 streamreqs = set(streamreqs.split(','))
91 streamreqs = set(streamreqs.split(','))
89 # Server requires something we don't support. Bail.
92 # Server requires something we don't support. Bail.
90 missingreqs = streamreqs - repo.supportedformats
93 missingreqs = streamreqs - repo.supportedformats
91 if missingreqs:
94 if missingreqs:
92 pullop.repo.ui.warn(_(
95 pullop.repo.ui.warn(_(
93 'warning: stream clone requested but client is missing '
96 'warning: stream clone requested but client is missing '
94 'requirements: %s\n') % ', '.join(sorted(missingreqs)))
97 'requirements: %s\n') % ', '.join(sorted(missingreqs)))
95 pullop.repo.ui.warn(
98 pullop.repo.ui.warn(
96 _('(see https://www.mercurial-scm.org/wiki/MissingRequirement '
99 _('(see https://www.mercurial-scm.org/wiki/MissingRequirement '
97 'for more information)\n'))
100 'for more information)\n'))
98 return False, None
101 return False, None
99 requirements = streamreqs
102 requirements = streamreqs
100
103
101 return True, requirements
104 return True, requirements
102
105
103 def maybeperformlegacystreamclone(pullop):
106 def maybeperformlegacystreamclone(pullop):
104 """Possibly perform a legacy stream clone operation.
107 """Possibly perform a legacy stream clone operation.
105
108
106 Legacy stream clones are performed as part of pull but before all other
109 Legacy stream clones are performed as part of pull but before all other
107 operations.
110 operations.
108
111
109 A legacy stream clone will not be performed if a bundle2 stream clone is
112 A legacy stream clone will not be performed if a bundle2 stream clone is
110 supported.
113 supported.
111 """
114 """
112 supported, requirements = canperformstreamclone(pullop)
115 supported, requirements = canperformstreamclone(pullop)
113
116
114 if not supported:
117 if not supported:
115 return
118 return
116
119
117 repo = pullop.repo
120 repo = pullop.repo
118 remote = pullop.remote
121 remote = pullop.remote
119
122
120 # Save remote branchmap. We will use it later to speed up branchcache
123 # Save remote branchmap. We will use it later to speed up branchcache
121 # creation.
124 # creation.
122 rbranchmap = None
125 rbranchmap = None
123 if remote.capable('branchmap'):
126 if remote.capable('branchmap'):
124 rbranchmap = remote.branchmap()
127 rbranchmap = remote.branchmap()
125
128
126 repo.ui.status(_('streaming all changes\n'))
129 repo.ui.status(_('streaming all changes\n'))
127
130
128 fp = remote.stream_out()
131 fp = remote.stream_out()
129 l = fp.readline()
132 l = fp.readline()
130 try:
133 try:
131 resp = int(l)
134 resp = int(l)
132 except ValueError:
135 except ValueError:
133 raise error.ResponseError(
136 raise error.ResponseError(
134 _('unexpected response from remote server:'), l)
137 _('unexpected response from remote server:'), l)
135 if resp == 1:
138 if resp == 1:
136 raise error.Abort(_('operation forbidden by server'))
139 raise error.Abort(_('operation forbidden by server'))
137 elif resp == 2:
140 elif resp == 2:
138 raise error.Abort(_('locking the remote repository failed'))
141 raise error.Abort(_('locking the remote repository failed'))
139 elif resp != 0:
142 elif resp != 0:
140 raise error.Abort(_('the server sent an unknown error code'))
143 raise error.Abort(_('the server sent an unknown error code'))
141
144
142 l = fp.readline()
145 l = fp.readline()
143 try:
146 try:
144 filecount, bytecount = map(int, l.split(' ', 1))
147 filecount, bytecount = map(int, l.split(' ', 1))
145 except (ValueError, TypeError):
148 except (ValueError, TypeError):
146 raise error.ResponseError(
149 raise error.ResponseError(
147 _('unexpected response from remote server:'), l)
150 _('unexpected response from remote server:'), l)
148
151
149 with repo.lock():
152 with repo.lock():
150 consumev1(repo, fp, filecount, bytecount)
153 consumev1(repo, fp, filecount, bytecount)
151
154
152 # new requirements = old non-format requirements +
155 # new requirements = old non-format requirements +
153 # new format-related remote requirements
156 # new format-related remote requirements
154 # requirements from the streamed-in repository
157 # requirements from the streamed-in repository
155 repo.requirements = requirements | (
158 repo.requirements = requirements | (
156 repo.requirements - repo.supportedformats)
159 repo.requirements - repo.supportedformats)
157 repo._applyopenerreqs()
160 repo._applyopenerreqs()
158 repo._writerequirements()
161 repo._writerequirements()
159
162
160 if rbranchmap:
163 if rbranchmap:
161 branchmap.replacecache(repo, rbranchmap)
164 branchmap.replacecache(repo, rbranchmap)
162
165
163 repo.invalidate()
166 repo.invalidate()
164
167
165 def allowservergeneration(repo):
168 def allowservergeneration(repo):
166 """Whether streaming clones are allowed from the server."""
169 """Whether streaming clones are allowed from the server."""
167 if not repo.ui.configbool('server', 'uncompressed', untrusted=True):
170 if not repo.ui.configbool('server', 'uncompressed', untrusted=True):
168 return False
171 return False
169
172
170 # The way stream clone works makes it impossible to hide secret changesets.
173 # The way stream clone works makes it impossible to hide secret changesets.
171 # So don't allow this by default.
174 # So don't allow this by default.
172 secret = phases.hassecret(repo)
175 secret = phases.hassecret(repo)
173 if secret:
176 if secret:
174 return repo.ui.configbool('server', 'uncompressedallowsecret')
177 return repo.ui.configbool('server', 'uncompressedallowsecret')
175
178
176 return True
179 return True
177
180
178 # This is it's own function so extensions can override it.
181 # This is it's own function so extensions can override it.
179 def _walkstreamfiles(repo):
182 def _walkstreamfiles(repo):
180 return repo.store.walk()
183 return repo.store.walk()
181
184
182 def generatev1(repo):
185 def generatev1(repo):
183 """Emit content for version 1 of a streaming clone.
186 """Emit content for version 1 of a streaming clone.
184
187
185 This returns a 3-tuple of (file count, byte size, data iterator).
188 This returns a 3-tuple of (file count, byte size, data iterator).
186
189
187 The data iterator consists of N entries for each file being transferred.
190 The data iterator consists of N entries for each file being transferred.
188 Each file entry starts as a line with the file name and integer size
191 Each file entry starts as a line with the file name and integer size
189 delimited by a null byte.
192 delimited by a null byte.
190
193
191 The raw file data follows. Following the raw file data is the next file
194 The raw file data follows. Following the raw file data is the next file
192 entry, or EOF.
195 entry, or EOF.
193
196
194 When used on the wire protocol, an additional line indicating protocol
197 When used on the wire protocol, an additional line indicating protocol
195 success will be prepended to the stream. This function is not responsible
198 success will be prepended to the stream. This function is not responsible
196 for adding it.
199 for adding it.
197
200
198 This function will obtain a repository lock to ensure a consistent view of
201 This function will obtain a repository lock to ensure a consistent view of
199 the store is captured. It therefore may raise LockError.
202 the store is captured. It therefore may raise LockError.
200 """
203 """
201 entries = []
204 entries = []
202 total_bytes = 0
205 total_bytes = 0
203 # Get consistent snapshot of repo, lock during scan.
206 # Get consistent snapshot of repo, lock during scan.
204 with repo.lock():
207 with repo.lock():
205 repo.ui.debug('scanning\n')
208 repo.ui.debug('scanning\n')
206 for name, ename, size in _walkstreamfiles(repo):
209 for name, ename, size in _walkstreamfiles(repo):
207 if size:
210 if size:
208 entries.append((name, size))
211 entries.append((name, size))
209 total_bytes += size
212 total_bytes += size
210
213
211 repo.ui.debug('%d files, %d bytes to transfer\n' %
214 repo.ui.debug('%d files, %d bytes to transfer\n' %
212 (len(entries), total_bytes))
215 (len(entries), total_bytes))
213
216
214 svfs = repo.svfs
217 svfs = repo.svfs
215 debugflag = repo.ui.debugflag
218 debugflag = repo.ui.debugflag
216
219
217 def emitrevlogdata():
220 def emitrevlogdata():
218 for name, size in entries:
221 for name, size in entries:
219 if debugflag:
222 if debugflag:
220 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
223 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
221 # partially encode name over the wire for backwards compat
224 # partially encode name over the wire for backwards compat
222 yield '%s\0%d\n' % (store.encodedir(name), size)
225 yield '%s\0%d\n' % (store.encodedir(name), size)
223 # auditing at this stage is both pointless (paths are already
226 # auditing at this stage is both pointless (paths are already
224 # trusted by the local repo) and expensive
227 # trusted by the local repo) and expensive
225 with svfs(name, 'rb', auditpath=False) as fp:
228 with svfs(name, 'rb', auditpath=False) as fp:
226 if size <= 65536:
229 if size <= 65536:
227 yield fp.read(size)
230 yield fp.read(size)
228 else:
231 else:
229 for chunk in util.filechunkiter(fp, limit=size):
232 for chunk in util.filechunkiter(fp, limit=size):
230 yield chunk
233 yield chunk
231
234
232 return len(entries), total_bytes, emitrevlogdata()
235 return len(entries), total_bytes, emitrevlogdata()
233
236
234 def generatev1wireproto(repo):
237 def generatev1wireproto(repo):
235 """Emit content for version 1 of streaming clone suitable for the wire.
238 """Emit content for version 1 of streaming clone suitable for the wire.
236
239
237 This is the data output from ``generatev1()`` with 2 header lines. The
240 This is the data output from ``generatev1()`` with 2 header lines. The
238 first line indicates overall success. The 2nd contains the file count and
241 first line indicates overall success. The 2nd contains the file count and
239 byte size of payload.
242 byte size of payload.
240
243
241 The success line contains "0" for success, "1" for stream generation not
244 The success line contains "0" for success, "1" for stream generation not
242 allowed, and "2" for error locking the repository (possibly indicating
245 allowed, and "2" for error locking the repository (possibly indicating
243 a permissions error for the server process).
246 a permissions error for the server process).
244 """
247 """
245 if not allowservergeneration(repo):
248 if not allowservergeneration(repo):
246 yield '1\n'
249 yield '1\n'
247 return
250 return
248
251
249 try:
252 try:
250 filecount, bytecount, it = generatev1(repo)
253 filecount, bytecount, it = generatev1(repo)
251 except error.LockError:
254 except error.LockError:
252 yield '2\n'
255 yield '2\n'
253 return
256 return
254
257
255 # Indicates successful response.
258 # Indicates successful response.
256 yield '0\n'
259 yield '0\n'
257 yield '%d %d\n' % (filecount, bytecount)
260 yield '%d %d\n' % (filecount, bytecount)
258 for chunk in it:
261 for chunk in it:
259 yield chunk
262 yield chunk
260
263
261 def generatebundlev1(repo, compression='UN'):
264 def generatebundlev1(repo, compression='UN'):
262 """Emit content for version 1 of a stream clone bundle.
265 """Emit content for version 1 of a stream clone bundle.
263
266
264 The first 4 bytes of the output ("HGS1") denote this as stream clone
267 The first 4 bytes of the output ("HGS1") denote this as stream clone
265 bundle version 1.
268 bundle version 1.
266
269
267 The next 2 bytes indicate the compression type. Only "UN" is currently
270 The next 2 bytes indicate the compression type. Only "UN" is currently
268 supported.
271 supported.
269
272
270 The next 16 bytes are two 64-bit big endian unsigned integers indicating
273 The next 16 bytes are two 64-bit big endian unsigned integers indicating
271 file count and byte count, respectively.
274 file count and byte count, respectively.
272
275
273 The next 2 bytes is a 16-bit big endian unsigned short declaring the length
276 The next 2 bytes is a 16-bit big endian unsigned short declaring the length
274 of the requirements string, including a trailing \0. The following N bytes
277 of the requirements string, including a trailing \0. The following N bytes
275 are the requirements string, which is ASCII containing a comma-delimited
278 are the requirements string, which is ASCII containing a comma-delimited
276 list of repo requirements that are needed to support the data.
279 list of repo requirements that are needed to support the data.
277
280
278 The remaining content is the output of ``generatev1()`` (which may be
281 The remaining content is the output of ``generatev1()`` (which may be
279 compressed in the future).
282 compressed in the future).
280
283
281 Returns a tuple of (requirements, data generator).
284 Returns a tuple of (requirements, data generator).
282 """
285 """
283 if compression != 'UN':
286 if compression != 'UN':
284 raise ValueError('we do not support the compression argument yet')
287 raise ValueError('we do not support the compression argument yet')
285
288
286 requirements = repo.requirements & repo.supportedformats
289 requirements = repo.requirements & repo.supportedformats
287 requires = ','.join(sorted(requirements))
290 requires = ','.join(sorted(requirements))
288
291
289 def gen():
292 def gen():
290 yield 'HGS1'
293 yield 'HGS1'
291 yield compression
294 yield compression
292
295
293 filecount, bytecount, it = generatev1(repo)
296 filecount, bytecount, it = generatev1(repo)
294 repo.ui.status(_('writing %d bytes for %d files\n') %
297 repo.ui.status(_('writing %d bytes for %d files\n') %
295 (bytecount, filecount))
298 (bytecount, filecount))
296
299
297 yield struct.pack('>QQ', filecount, bytecount)
300 yield struct.pack('>QQ', filecount, bytecount)
298 yield struct.pack('>H', len(requires) + 1)
301 yield struct.pack('>H', len(requires) + 1)
299 yield requires + '\0'
302 yield requires + '\0'
300
303
301 # This is where we'll add compression in the future.
304 # This is where we'll add compression in the future.
302 assert compression == 'UN'
305 assert compression == 'UN'
303
306
304 seen = 0
307 seen = 0
305 repo.ui.progress(_('bundle'), 0, total=bytecount, unit=_('bytes'))
308 repo.ui.progress(_('bundle'), 0, total=bytecount, unit=_('bytes'))
306
309
307 for chunk in it:
310 for chunk in it:
308 seen += len(chunk)
311 seen += len(chunk)
309 repo.ui.progress(_('bundle'), seen, total=bytecount,
312 repo.ui.progress(_('bundle'), seen, total=bytecount,
310 unit=_('bytes'))
313 unit=_('bytes'))
311 yield chunk
314 yield chunk
312
315
313 repo.ui.progress(_('bundle'), None)
316 repo.ui.progress(_('bundle'), None)
314
317
315 return requirements, gen()
318 return requirements, gen()
316
319
317 def consumev1(repo, fp, filecount, bytecount):
320 def consumev1(repo, fp, filecount, bytecount):
318 """Apply the contents from version 1 of a streaming clone file handle.
321 """Apply the contents from version 1 of a streaming clone file handle.
319
322
320 This takes the output from "stream_out" and applies it to the specified
323 This takes the output from "stream_out" and applies it to the specified
321 repository.
324 repository.
322
325
323 Like "stream_out," the status line added by the wire protocol is not
326 Like "stream_out," the status line added by the wire protocol is not
324 handled by this function.
327 handled by this function.
325 """
328 """
326 with repo.lock():
329 with repo.lock():
327 repo.ui.status(_('%d files to transfer, %s of data\n') %
330 repo.ui.status(_('%d files to transfer, %s of data\n') %
328 (filecount, util.bytecount(bytecount)))
331 (filecount, util.bytecount(bytecount)))
329 handled_bytes = 0
332 handled_bytes = 0
330 repo.ui.progress(_('clone'), 0, total=bytecount, unit=_('bytes'))
333 repo.ui.progress(_('clone'), 0, total=bytecount, unit=_('bytes'))
331 start = util.timer()
334 start = util.timer()
332
335
333 # TODO: get rid of (potential) inconsistency
336 # TODO: get rid of (potential) inconsistency
334 #
337 #
335 # If transaction is started and any @filecache property is
338 # If transaction is started and any @filecache property is
336 # changed at this point, it causes inconsistency between
339 # changed at this point, it causes inconsistency between
337 # in-memory cached property and streamclone-ed file on the
340 # in-memory cached property and streamclone-ed file on the
338 # disk. Nested transaction prevents transaction scope "clone"
341 # disk. Nested transaction prevents transaction scope "clone"
339 # below from writing in-memory changes out at the end of it,
342 # below from writing in-memory changes out at the end of it,
340 # even though in-memory changes are discarded at the end of it
343 # even though in-memory changes are discarded at the end of it
341 # regardless of transaction nesting.
344 # regardless of transaction nesting.
342 #
345 #
343 # But transaction nesting can't be simply prohibited, because
346 # But transaction nesting can't be simply prohibited, because
344 # nesting occurs also in ordinary case (e.g. enabling
347 # nesting occurs also in ordinary case (e.g. enabling
345 # clonebundles).
348 # clonebundles).
346
349
347 with repo.transaction('clone'):
350 with repo.transaction('clone'):
348 with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount):
351 with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount):
349 for i in xrange(filecount):
352 for i in xrange(filecount):
350 # XXX doesn't support '\n' or '\r' in filenames
353 # XXX doesn't support '\n' or '\r' in filenames
351 l = fp.readline()
354 l = fp.readline()
352 try:
355 try:
353 name, size = l.split('\0', 1)
356 name, size = l.split('\0', 1)
354 size = int(size)
357 size = int(size)
355 except (ValueError, TypeError):
358 except (ValueError, TypeError):
356 raise error.ResponseError(
359 raise error.ResponseError(
357 _('unexpected response from remote server:'), l)
360 _('unexpected response from remote server:'), l)
358 if repo.ui.debugflag:
361 if repo.ui.debugflag:
359 repo.ui.debug('adding %s (%s)\n' %
362 repo.ui.debug('adding %s (%s)\n' %
360 (name, util.bytecount(size)))
363 (name, util.bytecount(size)))
361 # for backwards compat, name was partially encoded
364 # for backwards compat, name was partially encoded
362 path = store.decodedir(name)
365 path = store.decodedir(name)
363 with repo.svfs(path, 'w', backgroundclose=True) as ofp:
366 with repo.svfs(path, 'w', backgroundclose=True) as ofp:
364 for chunk in util.filechunkiter(fp, limit=size):
367 for chunk in util.filechunkiter(fp, limit=size):
365 handled_bytes += len(chunk)
368 handled_bytes += len(chunk)
366 repo.ui.progress(_('clone'), handled_bytes,
369 repo.ui.progress(_('clone'), handled_bytes,
367 total=bytecount, unit=_('bytes'))
370 total=bytecount, unit=_('bytes'))
368 ofp.write(chunk)
371 ofp.write(chunk)
369
372
370 # force @filecache properties to be reloaded from
373 # force @filecache properties to be reloaded from
371 # streamclone-ed file at next access
374 # streamclone-ed file at next access
372 repo.invalidate(clearfilecache=True)
375 repo.invalidate(clearfilecache=True)
373
376
374 elapsed = util.timer() - start
377 elapsed = util.timer() - start
375 if elapsed <= 0:
378 if elapsed <= 0:
376 elapsed = 0.001
379 elapsed = 0.001
377 repo.ui.progress(_('clone'), None)
380 repo.ui.progress(_('clone'), None)
378 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
381 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
379 (util.bytecount(bytecount), elapsed,
382 (util.bytecount(bytecount), elapsed,
380 util.bytecount(bytecount / elapsed)))
383 util.bytecount(bytecount / elapsed)))
381
384
382 def readbundle1header(fp):
385 def readbundle1header(fp):
383 compression = fp.read(2)
386 compression = fp.read(2)
384 if compression != 'UN':
387 if compression != 'UN':
385 raise error.Abort(_('only uncompressed stream clone bundles are '
388 raise error.Abort(_('only uncompressed stream clone bundles are '
386 'supported; got %s') % compression)
389 'supported; got %s') % compression)
387
390
388 filecount, bytecount = struct.unpack('>QQ', fp.read(16))
391 filecount, bytecount = struct.unpack('>QQ', fp.read(16))
389 requireslen = struct.unpack('>H', fp.read(2))[0]
392 requireslen = struct.unpack('>H', fp.read(2))[0]
390 requires = fp.read(requireslen)
393 requires = fp.read(requireslen)
391
394
392 if not requires.endswith('\0'):
395 if not requires.endswith('\0'):
393 raise error.Abort(_('malformed stream clone bundle: '
396 raise error.Abort(_('malformed stream clone bundle: '
394 'requirements not properly encoded'))
397 'requirements not properly encoded'))
395
398
396 requirements = set(requires.rstrip('\0').split(','))
399 requirements = set(requires.rstrip('\0').split(','))
397
400
398 return filecount, bytecount, requirements
401 return filecount, bytecount, requirements
399
402
400 def applybundlev1(repo, fp):
403 def applybundlev1(repo, fp):
401 """Apply the content from a stream clone bundle version 1.
404 """Apply the content from a stream clone bundle version 1.
402
405
403 We assume the 4 byte header has been read and validated and the file handle
406 We assume the 4 byte header has been read and validated and the file handle
404 is at the 2 byte compression identifier.
407 is at the 2 byte compression identifier.
405 """
408 """
406 if len(repo):
409 if len(repo):
407 raise error.Abort(_('cannot apply stream clone bundle on non-empty '
410 raise error.Abort(_('cannot apply stream clone bundle on non-empty '
408 'repo'))
411 'repo'))
409
412
410 filecount, bytecount, requirements = readbundle1header(fp)
413 filecount, bytecount, requirements = readbundle1header(fp)
411 missingreqs = requirements - repo.supportedformats
414 missingreqs = requirements - repo.supportedformats
412 if missingreqs:
415 if missingreqs:
413 raise error.Abort(_('unable to apply stream clone: '
416 raise error.Abort(_('unable to apply stream clone: '
414 'unsupported format: %s') %
417 'unsupported format: %s') %
415 ', '.join(sorted(missingreqs)))
418 ', '.join(sorted(missingreqs)))
416
419
417 consumev1(repo, fp, filecount, bytecount)
420 consumev1(repo, fp, filecount, bytecount)
418
421
419 class streamcloneapplier(object):
422 class streamcloneapplier(object):
420 """Class to manage applying streaming clone bundles.
423 """Class to manage applying streaming clone bundles.
421
424
422 We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle
425 We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle
423 readers to perform bundle type-specific functionality.
426 readers to perform bundle type-specific functionality.
424 """
427 """
425 def __init__(self, fh):
428 def __init__(self, fh):
426 self._fh = fh
429 self._fh = fh
427
430
428 def apply(self, repo):
431 def apply(self, repo):
429 return applybundlev1(repo, self._fh)
432 return applybundlev1(repo, self._fh)
430
433
434 # type of file to stream
435 _fileappend = 0 # append only file
436 _filefull = 1 # full snapshot file
437
438 # This is it's own function so extensions can override it.
439 def _walkstreamfullstorefiles(repo):
440 """list snapshot file from the store"""
441 fnames = []
442 if not repo.publishing():
443 fnames.append('phaseroots')
444 return fnames
445
446 def _filterfull(entry, copy, vfs):
447 """actually copy the snapshot files"""
448 name, ftype, data = entry
449 if ftype != _filefull:
450 return entry
451 return (name, ftype, copy(vfs.join(name)))
452
453 @contextlib.contextmanager
454 def maketempcopies():
455 """return a function to temporary copy file"""
456 files = []
457 try:
458 def copy(src):
459 fd, dst = tempfile.mkstemp()
460 os.close(fd)
461 files.append(dst)
462 util.copyfiles(src, dst, hardlink=True)
463 return dst
464 yield copy
465 finally:
466 for tmp in files:
467 util.tryunlink(tmp)
468
431 def _emit(repo, entries, totalfilesize):
469 def _emit(repo, entries, totalfilesize):
432 """actually emit the stream bundle"""
470 """actually emit the stream bundle"""
471 vfs = repo.svfs
433 progress = repo.ui.progress
472 progress = repo.ui.progress
434 progress(_('bundle'), 0, total=totalfilesize, unit=_('bytes'))
473 progress(_('bundle'), 0, total=totalfilesize, unit=_('bytes'))
435 vfs = repo.svfs
474 with maketempcopies() as copy:
436 try:
475 try:
437 seen = 0
476 # copy is delayed until we are in the try
438 for name, size in entries:
477 entries = [_filterfull(e, copy, vfs) for e in entries]
439 yield util.uvarintencode(len(name))
478 yield None # this release the lock on the repository
440 fp = vfs(name)
479 seen = 0
441 try:
480
442 yield util.uvarintencode(size)
481 for name, ftype, data in entries:
443 yield name
482 yield util.uvarintencode(len(name))
444 if size <= 65536:
483 if ftype == _fileappend:
445 chunks = (fp.read(size),)
484 fp = vfs(name)
446 else:
485 size = data
447 chunks = util.filechunkiter(fp, limit=size)
486 elif ftype == _filefull:
448 for chunk in chunks:
487 fp = open(data, 'rb')
449 seen += len(chunk)
488 size = util.fstat(fp).st_size
450 progress(_('bundle'), seen, total=totalfilesize,
489 try:
451 unit=_('bytes'))
490 yield util.uvarintencode(size)
452 yield chunk
491 yield name
453 finally:
492 if size <= 65536:
454 fp.close()
493 chunks = (fp.read(size),)
455 finally:
494 else:
456 progress(_('bundle'), None)
495 chunks = util.filechunkiter(fp, limit=size)
496 for chunk in chunks:
497 seen += len(chunk)
498 progress(_('bundle'), seen, total=totalfilesize,
499 unit=_('bytes'))
500 yield chunk
501 finally:
502 fp.close()
503 finally:
504 progress(_('bundle'), None)
457
505
458 def generatev2(repo):
506 def generatev2(repo):
459 """Emit content for version 2 of a streaming clone.
507 """Emit content for version 2 of a streaming clone.
460
508
461 the data stream consists the following entries:
509 the data stream consists the following entries:
462 1) A varint containing the length of the filename
510 1) A varint containing the length of the filename
463 2) A varint containing the length of file data
511 2) A varint containing the length of file data
464 3) N bytes containing the filename (the internal, store-agnostic form)
512 3) N bytes containing the filename (the internal, store-agnostic form)
465 4) N bytes containing the file data
513 4) N bytes containing the file data
466
514
467 Returns a 3-tuple of (file count, file size, data iterator).
515 Returns a 3-tuple of (file count, file size, data iterator).
468 """
516 """
469
517
470 with repo.lock():
518 with repo.lock():
471
519
472 entries = []
520 entries = []
473 totalfilesize = 0
521 totalfilesize = 0
474
522
475 repo.ui.debug('scanning\n')
523 repo.ui.debug('scanning\n')
476 for name, ename, size in _walkstreamfiles(repo):
524 for name, ename, size in _walkstreamfiles(repo):
477 if size:
525 if size:
478 entries.append((name, size))
526 entries.append((name, _fileappend, size))
479 totalfilesize += size
527 totalfilesize += size
528 for name in _walkstreamfullstorefiles(repo):
529 if repo.svfs.exists(name):
530 totalfilesize += repo.svfs.lstat(name).st_size
531 entries.append((name, _filefull, None))
480
532
481 chunks = _emit(repo, entries, totalfilesize)
533 chunks = _emit(repo, entries, totalfilesize)
534 first = next(chunks)
535 assert first is None
482
536
483 return len(entries), totalfilesize, chunks
537 return len(entries), totalfilesize, chunks
484
538
485 def consumev2(repo, fp, filecount, filesize):
539 def consumev2(repo, fp, filecount, filesize):
486 """Apply the contents from a version 2 streaming clone.
540 """Apply the contents from a version 2 streaming clone.
487
541
488 Data is read from an object that only needs to provide a ``read(size)``
542 Data is read from an object that only needs to provide a ``read(size)``
489 method.
543 method.
490 """
544 """
491 with repo.lock():
545 with repo.lock():
492 repo.ui.status(_('%d files to transfer, %s of data\n') %
546 repo.ui.status(_('%d files to transfer, %s of data\n') %
493 (filecount, util.bytecount(filesize)))
547 (filecount, util.bytecount(filesize)))
494
548
495 start = util.timer()
549 start = util.timer()
496 handledbytes = 0
550 handledbytes = 0
497 progress = repo.ui.progress
551 progress = repo.ui.progress
498
552
499 progress(_('clone'), handledbytes, total=filesize, unit=_('bytes'))
553 progress(_('clone'), handledbytes, total=filesize, unit=_('bytes'))
500
554
501 vfs = repo.svfs
555 vfs = repo.svfs
502
556
503 with repo.transaction('clone'):
557 with repo.transaction('clone'):
504 with vfs.backgroundclosing(repo.ui):
558 with vfs.backgroundclosing(repo.ui):
505 for i in range(filecount):
559 for i in range(filecount):
506 namelen = util.uvarintdecodestream(fp)
560 namelen = util.uvarintdecodestream(fp)
507 datalen = util.uvarintdecodestream(fp)
561 datalen = util.uvarintdecodestream(fp)
508
562
509 name = fp.read(namelen)
563 name = fp.read(namelen)
510
564
511 if repo.ui.debugflag:
565 if repo.ui.debugflag:
512 repo.ui.debug('adding %s (%s)\n' %
566 repo.ui.debug('adding %s (%s)\n' %
513 (name, util.bytecount(datalen)))
567 (name, util.bytecount(datalen)))
514
568
515 with vfs(name, 'w') as ofp:
569 with vfs(name, 'w') as ofp:
516 for chunk in util.filechunkiter(fp, limit=datalen):
570 for chunk in util.filechunkiter(fp, limit=datalen):
517 handledbytes += len(chunk)
571 handledbytes += len(chunk)
518 progress(_('clone'), handledbytes, total=filesize,
572 progress(_('clone'), handledbytes, total=filesize,
519 unit=_('bytes'))
573 unit=_('bytes'))
520 ofp.write(chunk)
574 ofp.write(chunk)
521
575
522 # force @filecache properties to be reloaded from
576 # force @filecache properties to be reloaded from
523 # streamclone-ed file at next access
577 # streamclone-ed file at next access
524 repo.invalidate(clearfilecache=True)
578 repo.invalidate(clearfilecache=True)
525
579
526 elapsed = util.timer() - start
580 elapsed = util.timer() - start
527 if elapsed <= 0:
581 if elapsed <= 0:
528 elapsed = 0.001
582 elapsed = 0.001
529 progress(_('clone'), None)
583 progress(_('clone'), None)
530 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
584 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
531 (util.bytecount(handledbytes), elapsed,
585 (util.bytecount(handledbytes), elapsed,
532 util.bytecount(handledbytes / elapsed)))
586 util.bytecount(handledbytes / elapsed)))
533
587
534 def applybundlev2(repo, fp, filecount, filesize, requirements):
588 def applybundlev2(repo, fp, filecount, filesize, requirements):
535 missingreqs = [r for r in requirements if r not in repo.supported]
589 missingreqs = [r for r in requirements if r not in repo.supported]
536 if missingreqs:
590 if missingreqs:
537 raise error.Abort(_('unable to apply stream clone: '
591 raise error.Abort(_('unable to apply stream clone: '
538 'unsupported format: %s') %
592 'unsupported format: %s') %
539 ', '.join(sorted(missingreqs)))
593 ', '.join(sorted(missingreqs)))
540
594
541 consumev2(repo, fp, filecount, filesize)
595 consumev2(repo, fp, filecount, filesize)
@@ -1,332 +1,330 b''
1 #require serve
1 #require serve
2
2
3 #testcases stream-legacy stream-bundle2
3 #testcases stream-legacy stream-bundle2
4
4
5 #if stream-bundle2
5 #if stream-bundle2
6 $ cat << EOF >> $HGRCPATH
6 $ cat << EOF >> $HGRCPATH
7 > [experimental]
7 > [experimental]
8 > bundle2.stream = yes
8 > bundle2.stream = yes
9 > EOF
9 > EOF
10 #endif
10 #endif
11
11
12 Initialize repository
12 Initialize repository
13 the status call is to check for issue5130
13 the status call is to check for issue5130
14
14
15 $ hg init server
15 $ hg init server
16 $ cd server
16 $ cd server
17 $ touch foo
17 $ touch foo
18 $ hg -q commit -A -m initial
18 $ hg -q commit -A -m initial
19 >>> for i in range(1024):
19 >>> for i in range(1024):
20 ... with open(str(i), 'wb') as fh:
20 ... with open(str(i), 'wb') as fh:
21 ... fh.write(str(i))
21 ... fh.write(str(i))
22 $ hg -q commit -A -m 'add a lot of files'
22 $ hg -q commit -A -m 'add a lot of files'
23 $ hg st
23 $ hg st
24 $ hg serve -p $HGPORT -d --pid-file=hg.pid
24 $ hg serve -p $HGPORT -d --pid-file=hg.pid
25 $ cat hg.pid >> $DAEMON_PIDS
25 $ cat hg.pid >> $DAEMON_PIDS
26 $ cd ..
26 $ cd ..
27
27
28 Basic clone
28 Basic clone
29
29
30 #if stream-legacy
30 #if stream-legacy
31 $ hg clone --stream -U http://localhost:$HGPORT clone1
31 $ hg clone --stream -U http://localhost:$HGPORT clone1
32 streaming all changes
32 streaming all changes
33 1027 files to transfer, 96.3 KB of data
33 1027 files to transfer, 96.3 KB of data
34 transferred 96.3 KB in * seconds (*/sec) (glob)
34 transferred 96.3 KB in * seconds (*/sec) (glob)
35 searching for changes
35 searching for changes
36 no changes found
36 no changes found
37 #endif
37 #endif
38 #if stream-bundle2
38 #if stream-bundle2
39 $ hg clone --stream -U http://localhost:$HGPORT clone1
39 $ hg clone --stream -U http://localhost:$HGPORT clone1
40 streaming all changes
40 streaming all changes
41 1027 files to transfer, 96.3 KB of data
41 1027 files to transfer, 96.3 KB of data
42 transferred 96.3 KB in * seconds (* */sec) (glob)
42 transferred 96.3 KB in * seconds (* */sec) (glob)
43 #endif
43 #endif
44
44
45 --uncompressed is an alias to --stream
45 --uncompressed is an alias to --stream
46
46
47 #if stream-legacy
47 #if stream-legacy
48 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
48 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
49 streaming all changes
49 streaming all changes
50 1027 files to transfer, 96.3 KB of data
50 1027 files to transfer, 96.3 KB of data
51 transferred 96.3 KB in * seconds (*/sec) (glob)
51 transferred 96.3 KB in * seconds (*/sec) (glob)
52 searching for changes
52 searching for changes
53 no changes found
53 no changes found
54 #endif
54 #endif
55 #if stream-bundle2
55 #if stream-bundle2
56 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
56 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
57 streaming all changes
57 streaming all changes
58 1027 files to transfer, 96.3 KB of data
58 1027 files to transfer, 96.3 KB of data
59 transferred 96.3 KB in * seconds (* */sec) (glob)
59 transferred 96.3 KB in * seconds (* */sec) (glob)
60 #endif
60 #endif
61
61
62 Clone with background file closing enabled
62 Clone with background file closing enabled
63
63
64 #if stream-legacy
64 #if stream-legacy
65 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
65 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
66 using http://localhost:$HGPORT/
66 using http://localhost:$HGPORT/
67 sending capabilities command
67 sending capabilities command
68 sending branchmap command
68 sending branchmap command
69 streaming all changes
69 streaming all changes
70 sending stream_out command
70 sending stream_out command
71 1027 files to transfer, 96.3 KB of data
71 1027 files to transfer, 96.3 KB of data
72 starting 4 threads for background file closing
72 starting 4 threads for background file closing
73 transferred 96.3 KB in * seconds (*/sec) (glob)
73 transferred 96.3 KB in * seconds (*/sec) (glob)
74 query 1; heads
74 query 1; heads
75 sending batch command
75 sending batch command
76 searching for changes
76 searching for changes
77 all remote heads known locally
77 all remote heads known locally
78 no changes found
78 no changes found
79 sending getbundle command
79 sending getbundle command
80 bundle2-input-bundle: with-transaction
80 bundle2-input-bundle: with-transaction
81 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
81 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
82 bundle2-input-part: "phase-heads" supported
82 bundle2-input-part: "phase-heads" supported
83 bundle2-input-part: total payload size 24
83 bundle2-input-part: total payload size 24
84 bundle2-input-bundle: 1 parts total
84 bundle2-input-bundle: 1 parts total
85 checking for updated bookmarks
85 checking for updated bookmarks
86 #endif
86 #endif
87 #if stream-bundle2
87 #if stream-bundle2
88 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
88 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
89 using http://localhost:$HGPORT/
89 using http://localhost:$HGPORT/
90 sending capabilities command
90 sending capabilities command
91 query 1; heads
91 query 1; heads
92 sending batch command
92 sending batch command
93 streaming all changes
93 streaming all changes
94 sending getbundle command
94 sending getbundle command
95 bundle2-input-bundle: with-transaction
95 bundle2-input-bundle: with-transaction
96 bundle2-input-part: "stream" (params: 4 mandatory) supported
96 bundle2-input-part: "stream" (params: 4 mandatory) supported
97 applying stream bundle
97 applying stream bundle
98 1027 files to transfer, 96.3 KB of data
98 1027 files to transfer, 96.3 KB of data
99 starting 4 threads for background file closing
99 starting 4 threads for background file closing
100 transferred 96.3 KB in * seconds (* */sec) (glob)
100 transferred 96.3 KB in * seconds (* */sec) (glob)
101 bundle2-input-part: total payload size 110887
101 bundle2-input-part: total payload size 110887
102 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
102 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
103 bundle2-input-part: "phase-heads" supported
103 bundle2-input-bundle: 1 parts total
104 bundle2-input-part: total payload size 24
105 bundle2-input-bundle: 2 parts total
106 checking for updated bookmarks
104 checking for updated bookmarks
107 #endif
105 #endif
108
106
109 Cannot stream clone when there are secret changesets
107 Cannot stream clone when there are secret changesets
110
108
111 $ hg -R server phase --force --secret -r tip
109 $ hg -R server phase --force --secret -r tip
112 $ hg clone --stream -U http://localhost:$HGPORT secret-denied
110 $ hg clone --stream -U http://localhost:$HGPORT secret-denied
113 warning: stream clone requested but server has them disabled
111 warning: stream clone requested but server has them disabled
114 requesting all changes
112 requesting all changes
115 adding changesets
113 adding changesets
116 adding manifests
114 adding manifests
117 adding file changes
115 adding file changes
118 added 1 changesets with 1 changes to 1 files
116 added 1 changesets with 1 changes to 1 files
119 new changesets 96ee1d7354c4
117 new changesets 96ee1d7354c4
120
118
121 $ killdaemons.py
119 $ killdaemons.py
122
120
123 Streaming of secrets can be overridden by server config
121 Streaming of secrets can be overridden by server config
124
122
125 $ cd server
123 $ cd server
126 $ hg serve --config server.uncompressedallowsecret=true -p $HGPORT -d --pid-file=hg.pid
124 $ hg serve --config server.uncompressedallowsecret=true -p $HGPORT -d --pid-file=hg.pid
127 $ cat hg.pid > $DAEMON_PIDS
125 $ cat hg.pid > $DAEMON_PIDS
128 $ cd ..
126 $ cd ..
129
127
130 #if stream-legacy
128 #if stream-legacy
131 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
129 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
132 streaming all changes
130 streaming all changes
133 1027 files to transfer, 96.3 KB of data
131 1027 files to transfer, 96.3 KB of data
134 transferred 96.3 KB in * seconds (*/sec) (glob)
132 transferred 96.3 KB in * seconds (*/sec) (glob)
135 searching for changes
133 searching for changes
136 no changes found
134 no changes found
137 #endif
135 #endif
138 #if stream-bundle2
136 #if stream-bundle2
139 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
137 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
140 streaming all changes
138 streaming all changes
141 1027 files to transfer, 96.3 KB of data
139 1027 files to transfer, 96.3 KB of data
142 transferred 96.3 KB in * seconds (* */sec) (glob)
140 transferred 96.3 KB in * seconds (* */sec) (glob)
143 #endif
141 #endif
144
142
145 $ killdaemons.py
143 $ killdaemons.py
146
144
147 Verify interaction between preferuncompressed and secret presence
145 Verify interaction between preferuncompressed and secret presence
148
146
149 $ cd server
147 $ cd server
150 $ hg serve --config server.preferuncompressed=true -p $HGPORT -d --pid-file=hg.pid
148 $ hg serve --config server.preferuncompressed=true -p $HGPORT -d --pid-file=hg.pid
151 $ cat hg.pid > $DAEMON_PIDS
149 $ cat hg.pid > $DAEMON_PIDS
152 $ cd ..
150 $ cd ..
153
151
154 $ hg clone -U http://localhost:$HGPORT preferuncompressed-secret
152 $ hg clone -U http://localhost:$HGPORT preferuncompressed-secret
155 requesting all changes
153 requesting all changes
156 adding changesets
154 adding changesets
157 adding manifests
155 adding manifests
158 adding file changes
156 adding file changes
159 added 1 changesets with 1 changes to 1 files
157 added 1 changesets with 1 changes to 1 files
160 new changesets 96ee1d7354c4
158 new changesets 96ee1d7354c4
161
159
162 $ killdaemons.py
160 $ killdaemons.py
163
161
164 Clone not allowed when full bundles disabled and can't serve secrets
162 Clone not allowed when full bundles disabled and can't serve secrets
165
163
166 $ cd server
164 $ cd server
167 $ hg serve --config server.disablefullbundle=true -p $HGPORT -d --pid-file=hg.pid
165 $ hg serve --config server.disablefullbundle=true -p $HGPORT -d --pid-file=hg.pid
168 $ cat hg.pid > $DAEMON_PIDS
166 $ cat hg.pid > $DAEMON_PIDS
169 $ cd ..
167 $ cd ..
170
168
171 $ hg clone --stream http://localhost:$HGPORT secret-full-disabled
169 $ hg clone --stream http://localhost:$HGPORT secret-full-disabled
172 warning: stream clone requested but server has them disabled
170 warning: stream clone requested but server has them disabled
173 requesting all changes
171 requesting all changes
174 remote: abort: server has pull-based clones disabled
172 remote: abort: server has pull-based clones disabled
175 abort: pull failed on remote
173 abort: pull failed on remote
176 (remove --pull if specified or upgrade Mercurial)
174 (remove --pull if specified or upgrade Mercurial)
177 [255]
175 [255]
178
176
179 Local stream clone with secrets involved
177 Local stream clone with secrets involved
180 (This is just a test over behavior: if you have access to the repo's files,
178 (This is just a test over behavior: if you have access to the repo's files,
181 there is no security so it isn't important to prevent a clone here.)
179 there is no security so it isn't important to prevent a clone here.)
182
180
183 $ hg clone -U --stream server local-secret
181 $ hg clone -U --stream server local-secret
184 warning: stream clone requested but server has them disabled
182 warning: stream clone requested but server has them disabled
185 requesting all changes
183 requesting all changes
186 adding changesets
184 adding changesets
187 adding manifests
185 adding manifests
188 adding file changes
186 adding file changes
189 added 1 changesets with 1 changes to 1 files
187 added 1 changesets with 1 changes to 1 files
190 new changesets 96ee1d7354c4
188 new changesets 96ee1d7354c4
191
189
192 Stream clone while repo is changing:
190 Stream clone while repo is changing:
193
191
194 $ mkdir changing
192 $ mkdir changing
195 $ cd changing
193 $ cd changing
196
194
197 extension for delaying the server process so we reliably can modify the repo
195 extension for delaying the server process so we reliably can modify the repo
198 while cloning
196 while cloning
199
197
200 $ cat > delayer.py <<EOF
198 $ cat > delayer.py <<EOF
201 > import time
199 > import time
202 > from mercurial import extensions, vfs
200 > from mercurial import extensions, vfs
203 > def __call__(orig, self, path, *args, **kwargs):
201 > def __call__(orig, self, path, *args, **kwargs):
204 > if path == 'data/f1.i':
202 > if path == 'data/f1.i':
205 > time.sleep(2)
203 > time.sleep(2)
206 > return orig(self, path, *args, **kwargs)
204 > return orig(self, path, *args, **kwargs)
207 > extensions.wrapfunction(vfs.vfs, '__call__', __call__)
205 > extensions.wrapfunction(vfs.vfs, '__call__', __call__)
208 > EOF
206 > EOF
209
207
210 prepare repo with small and big file to cover both code paths in emitrevlogdata
208 prepare repo with small and big file to cover both code paths in emitrevlogdata
211
209
212 $ hg init repo
210 $ hg init repo
213 $ touch repo/f1
211 $ touch repo/f1
214 $ $TESTDIR/seq.py 50000 > repo/f2
212 $ $TESTDIR/seq.py 50000 > repo/f2
215 $ hg -R repo ci -Aqm "0"
213 $ hg -R repo ci -Aqm "0"
216 $ hg serve -R repo -p $HGPORT1 -d --pid-file=hg.pid --config extensions.delayer=delayer.py
214 $ hg serve -R repo -p $HGPORT1 -d --pid-file=hg.pid --config extensions.delayer=delayer.py
217 $ cat hg.pid >> $DAEMON_PIDS
215 $ cat hg.pid >> $DAEMON_PIDS
218
216
219 clone while modifying the repo between stating file with write lock and
217 clone while modifying the repo between stating file with write lock and
220 actually serving file content
218 actually serving file content
221
219
222 $ hg clone -q --stream -U http://localhost:$HGPORT1 clone &
220 $ hg clone -q --stream -U http://localhost:$HGPORT1 clone &
223 $ sleep 1
221 $ sleep 1
224 $ echo >> repo/f1
222 $ echo >> repo/f1
225 $ echo >> repo/f2
223 $ echo >> repo/f2
226 $ hg -R repo ci -m "1"
224 $ hg -R repo ci -m "1"
227 $ wait
225 $ wait
228 $ hg -R clone id
226 $ hg -R clone id
229 000000000000
227 000000000000
230 $ cd ..
228 $ cd ..
231
229
232 Stream repository with bookmarks
230 Stream repository with bookmarks
233 --------------------------------
231 --------------------------------
234
232
235 (revert introduction of secret changeset)
233 (revert introduction of secret changeset)
236
234
237 $ hg -R server phase --draft 'secret()'
235 $ hg -R server phase --draft 'secret()'
238
236
239 add a bookmark
237 add a bookmark
240
238
241 $ hg -R server bookmark -r tip some-bookmark
239 $ hg -R server bookmark -r tip some-bookmark
242
240
243 clone it
241 clone it
244
242
245 #if stream-legacy
243 #if stream-legacy
246 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
244 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
247 streaming all changes
245 streaming all changes
248 1027 files to transfer, 96.3 KB of data
246 1027 files to transfer, 96.3 KB of data
249 transferred 96.3 KB in * seconds (*) (glob)
247 transferred 96.3 KB in * seconds (*) (glob)
250 searching for changes
248 searching for changes
251 no changes found
249 no changes found
252 updating to branch default
250 updating to branch default
253 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
251 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
254 #endif
252 #endif
255 #if stream-bundle2
253 #if stream-bundle2
256 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
254 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
257 streaming all changes
255 streaming all changes
258 1027 files to transfer, 96.3 KB of data
256 1027 files to transfer, 96.3 KB of data
259 transferred 96.3 KB in * seconds (* */sec) (glob)
257 transferred 96.3 KB in * seconds (* */sec) (glob)
260 updating to branch default
258 updating to branch default
261 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
259 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
262 #endif
260 #endif
263 $ hg -R with-bookmarks bookmarks
261 $ hg -R with-bookmarks bookmarks
264 some-bookmark 1:c17445101a72
262 some-bookmark 1:c17445101a72
265
263
266 Stream repository with phases
264 Stream repository with phases
267 -----------------------------
265 -----------------------------
268
266
269 Clone as publishing
267 Clone as publishing
270
268
271 $ hg -R server phase -r 'all()'
269 $ hg -R server phase -r 'all()'
272 0: draft
270 0: draft
273 1: draft
271 1: draft
274
272
275 #if stream-legacy
273 #if stream-legacy
276 $ hg clone --stream http://localhost:$HGPORT phase-publish
274 $ hg clone --stream http://localhost:$HGPORT phase-publish
277 streaming all changes
275 streaming all changes
278 1027 files to transfer, 96.3 KB of data
276 1027 files to transfer, 96.3 KB of data
279 transferred 96.3 KB in * seconds (*) (glob)
277 transferred 96.3 KB in * seconds (*) (glob)
280 searching for changes
278 searching for changes
281 no changes found
279 no changes found
282 updating to branch default
280 updating to branch default
283 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
281 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
284 #endif
282 #endif
285 #if stream-bundle2
283 #if stream-bundle2
286 $ hg clone --stream http://localhost:$HGPORT phase-publish
284 $ hg clone --stream http://localhost:$HGPORT phase-publish
287 streaming all changes
285 streaming all changes
288 1027 files to transfer, 96.3 KB of data
286 1027 files to transfer, 96.3 KB of data
289 transferred 96.3 KB in * seconds (* */sec) (glob)
287 transferred 96.3 KB in * seconds (* */sec) (glob)
290 updating to branch default
288 updating to branch default
291 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
289 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
292 #endif
290 #endif
293 $ hg -R phase-publish phase -r 'all()'
291 $ hg -R phase-publish phase -r 'all()'
294 0: public
292 0: public
295 1: public
293 1: public
296
294
297 Clone as non publishing
295 Clone as non publishing
298
296
299 $ cat << EOF >> server/.hg/hgrc
297 $ cat << EOF >> server/.hg/hgrc
300 > [phases]
298 > [phases]
301 > publish = False
299 > publish = False
302 > EOF
300 > EOF
303 $ killdaemons.py
301 $ killdaemons.py
304 $ hg -R server serve -p $HGPORT -d --pid-file=hg.pid
302 $ hg -R server serve -p $HGPORT -d --pid-file=hg.pid
305 $ cat hg.pid >> $DAEMON_PIDS
303 $ cat hg.pid >> $DAEMON_PIDS
306
304
307 #if stream-legacy
305 #if stream-legacy
308 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
306 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
309 streaming all changes
307 streaming all changes
310 1027 files to transfer, 96.3 KB of data
308 1027 files to transfer, 96.3 KB of data
311 transferred 96.3 KB in * seconds (*) (glob)
309 transferred 96.3 KB in * seconds (*) (glob)
312 searching for changes
310 searching for changes
313 no changes found
311 no changes found
314 updating to branch default
312 updating to branch default
315 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
313 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
316 $ hg -R phase-no-publish phase -r 'all()'
314 $ hg -R phase-no-publish phase -r 'all()'
317 0: public
315 0: public
318 1: public
316 1: public
319 #endif
317 #endif
320 #if stream-bundle2
318 #if stream-bundle2
321 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
319 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
322 streaming all changes
320 streaming all changes
323 1027 files to transfer, 96.3 KB of data
321 1028 files to transfer, 96.4 KB of data
324 transferred 96.3 KB in * seconds (* */sec) (glob)
322 transferred 96.4 KB in * seconds (* */sec) (glob)
325 updating to branch default
323 updating to branch default
326 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
324 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
327 $ hg -R phase-no-publish phase -r 'all()'
325 $ hg -R phase-no-publish phase -r 'all()'
328 0: public
326 0: draft
329 1: public
327 1: draft
330 #endif
328 #endif
331
329
332 $ killdaemons.py
330 $ killdaemons.py
General Comments 0
You need to be logged in to leave comments. Login now