##// END OF EJS Templates
exchange: support declaring pull depth...
Gregory Szorc -
r40367:ac59de55 default
parent child Browse files
Show More
@@ -1,2650 +1,2657 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import hashlib
11 import hashlib
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 bin,
15 bin,
16 hex,
16 hex,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 )
19 )
20 from .thirdparty import (
20 from .thirdparty import (
21 attr,
21 attr,
22 )
22 )
23 from . import (
23 from . import (
24 bookmarks as bookmod,
24 bookmarks as bookmod,
25 bundle2,
25 bundle2,
26 changegroup,
26 changegroup,
27 discovery,
27 discovery,
28 error,
28 error,
29 exchangev2,
29 exchangev2,
30 lock as lockmod,
30 lock as lockmod,
31 logexchange,
31 logexchange,
32 narrowspec,
32 narrowspec,
33 obsolete,
33 obsolete,
34 phases,
34 phases,
35 pushkey,
35 pushkey,
36 pycompat,
36 pycompat,
37 repository,
37 repository,
38 scmutil,
38 scmutil,
39 sslutil,
39 sslutil,
40 streamclone,
40 streamclone,
41 url as urlmod,
41 url as urlmod,
42 util,
42 util,
43 )
43 )
44 from .utils import (
44 from .utils import (
45 stringutil,
45 stringutil,
46 )
46 )
47
47
48 urlerr = util.urlerr
48 urlerr = util.urlerr
49 urlreq = util.urlreq
49 urlreq = util.urlreq
50
50
51 _NARROWACL_SECTION = 'narrowhgacl'
51 _NARROWACL_SECTION = 'narrowhgacl'
52
52
53 # Maps bundle version human names to changegroup versions.
53 # Maps bundle version human names to changegroup versions.
54 _bundlespeccgversions = {'v1': '01',
54 _bundlespeccgversions = {'v1': '01',
55 'v2': '02',
55 'v2': '02',
56 'packed1': 's1',
56 'packed1': 's1',
57 'bundle2': '02', #legacy
57 'bundle2': '02', #legacy
58 }
58 }
59
59
60 # Maps bundle version with content opts to choose which part to bundle
60 # Maps bundle version with content opts to choose which part to bundle
61 _bundlespeccontentopts = {
61 _bundlespeccontentopts = {
62 'v1': {
62 'v1': {
63 'changegroup': True,
63 'changegroup': True,
64 'cg.version': '01',
64 'cg.version': '01',
65 'obsolescence': False,
65 'obsolescence': False,
66 'phases': False,
66 'phases': False,
67 'tagsfnodescache': False,
67 'tagsfnodescache': False,
68 'revbranchcache': False
68 'revbranchcache': False
69 },
69 },
70 'v2': {
70 'v2': {
71 'changegroup': True,
71 'changegroup': True,
72 'cg.version': '02',
72 'cg.version': '02',
73 'obsolescence': False,
73 'obsolescence': False,
74 'phases': False,
74 'phases': False,
75 'tagsfnodescache': True,
75 'tagsfnodescache': True,
76 'revbranchcache': True
76 'revbranchcache': True
77 },
77 },
78 'packed1' : {
78 'packed1' : {
79 'cg.version': 's1'
79 'cg.version': 's1'
80 }
80 }
81 }
81 }
82 _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
82 _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
83
83
84 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
84 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
85 "tagsfnodescache": False,
85 "tagsfnodescache": False,
86 "revbranchcache": False}}
86 "revbranchcache": False}}
87
87
88 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
88 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
89 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
89 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
90
90
91 @attr.s
91 @attr.s
92 class bundlespec(object):
92 class bundlespec(object):
93 compression = attr.ib()
93 compression = attr.ib()
94 wirecompression = attr.ib()
94 wirecompression = attr.ib()
95 version = attr.ib()
95 version = attr.ib()
96 wireversion = attr.ib()
96 wireversion = attr.ib()
97 params = attr.ib()
97 params = attr.ib()
98 contentopts = attr.ib()
98 contentopts = attr.ib()
99
99
100 def parsebundlespec(repo, spec, strict=True):
100 def parsebundlespec(repo, spec, strict=True):
101 """Parse a bundle string specification into parts.
101 """Parse a bundle string specification into parts.
102
102
103 Bundle specifications denote a well-defined bundle/exchange format.
103 Bundle specifications denote a well-defined bundle/exchange format.
104 The content of a given specification should not change over time in
104 The content of a given specification should not change over time in
105 order to ensure that bundles produced by a newer version of Mercurial are
105 order to ensure that bundles produced by a newer version of Mercurial are
106 readable from an older version.
106 readable from an older version.
107
107
108 The string currently has the form:
108 The string currently has the form:
109
109
110 <compression>-<type>[;<parameter0>[;<parameter1>]]
110 <compression>-<type>[;<parameter0>[;<parameter1>]]
111
111
112 Where <compression> is one of the supported compression formats
112 Where <compression> is one of the supported compression formats
113 and <type> is (currently) a version string. A ";" can follow the type and
113 and <type> is (currently) a version string. A ";" can follow the type and
114 all text afterwards is interpreted as URI encoded, ";" delimited key=value
114 all text afterwards is interpreted as URI encoded, ";" delimited key=value
115 pairs.
115 pairs.
116
116
117 If ``strict`` is True (the default) <compression> is required. Otherwise,
117 If ``strict`` is True (the default) <compression> is required. Otherwise,
118 it is optional.
118 it is optional.
119
119
120 Returns a bundlespec object of (compression, version, parameters).
120 Returns a bundlespec object of (compression, version, parameters).
121 Compression will be ``None`` if not in strict mode and a compression isn't
121 Compression will be ``None`` if not in strict mode and a compression isn't
122 defined.
122 defined.
123
123
124 An ``InvalidBundleSpecification`` is raised when the specification is
124 An ``InvalidBundleSpecification`` is raised when the specification is
125 not syntactically well formed.
125 not syntactically well formed.
126
126
127 An ``UnsupportedBundleSpecification`` is raised when the compression or
127 An ``UnsupportedBundleSpecification`` is raised when the compression or
128 bundle type/version is not recognized.
128 bundle type/version is not recognized.
129
129
130 Note: this function will likely eventually return a more complex data
130 Note: this function will likely eventually return a more complex data
131 structure, including bundle2 part information.
131 structure, including bundle2 part information.
132 """
132 """
133 def parseparams(s):
133 def parseparams(s):
134 if ';' not in s:
134 if ';' not in s:
135 return s, {}
135 return s, {}
136
136
137 params = {}
137 params = {}
138 version, paramstr = s.split(';', 1)
138 version, paramstr = s.split(';', 1)
139
139
140 for p in paramstr.split(';'):
140 for p in paramstr.split(';'):
141 if '=' not in p:
141 if '=' not in p:
142 raise error.InvalidBundleSpecification(
142 raise error.InvalidBundleSpecification(
143 _('invalid bundle specification: '
143 _('invalid bundle specification: '
144 'missing "=" in parameter: %s') % p)
144 'missing "=" in parameter: %s') % p)
145
145
146 key, value = p.split('=', 1)
146 key, value = p.split('=', 1)
147 key = urlreq.unquote(key)
147 key = urlreq.unquote(key)
148 value = urlreq.unquote(value)
148 value = urlreq.unquote(value)
149 params[key] = value
149 params[key] = value
150
150
151 return version, params
151 return version, params
152
152
153
153
154 if strict and '-' not in spec:
154 if strict and '-' not in spec:
155 raise error.InvalidBundleSpecification(
155 raise error.InvalidBundleSpecification(
156 _('invalid bundle specification; '
156 _('invalid bundle specification; '
157 'must be prefixed with compression: %s') % spec)
157 'must be prefixed with compression: %s') % spec)
158
158
159 if '-' in spec:
159 if '-' in spec:
160 compression, version = spec.split('-', 1)
160 compression, version = spec.split('-', 1)
161
161
162 if compression not in util.compengines.supportedbundlenames:
162 if compression not in util.compengines.supportedbundlenames:
163 raise error.UnsupportedBundleSpecification(
163 raise error.UnsupportedBundleSpecification(
164 _('%s compression is not supported') % compression)
164 _('%s compression is not supported') % compression)
165
165
166 version, params = parseparams(version)
166 version, params = parseparams(version)
167
167
168 if version not in _bundlespeccgversions:
168 if version not in _bundlespeccgversions:
169 raise error.UnsupportedBundleSpecification(
169 raise error.UnsupportedBundleSpecification(
170 _('%s is not a recognized bundle version') % version)
170 _('%s is not a recognized bundle version') % version)
171 else:
171 else:
172 # Value could be just the compression or just the version, in which
172 # Value could be just the compression or just the version, in which
173 # case some defaults are assumed (but only when not in strict mode).
173 # case some defaults are assumed (but only when not in strict mode).
174 assert not strict
174 assert not strict
175
175
176 spec, params = parseparams(spec)
176 spec, params = parseparams(spec)
177
177
178 if spec in util.compengines.supportedbundlenames:
178 if spec in util.compengines.supportedbundlenames:
179 compression = spec
179 compression = spec
180 version = 'v1'
180 version = 'v1'
181 # Generaldelta repos require v2.
181 # Generaldelta repos require v2.
182 if 'generaldelta' in repo.requirements:
182 if 'generaldelta' in repo.requirements:
183 version = 'v2'
183 version = 'v2'
184 # Modern compression engines require v2.
184 # Modern compression engines require v2.
185 if compression not in _bundlespecv1compengines:
185 if compression not in _bundlespecv1compengines:
186 version = 'v2'
186 version = 'v2'
187 elif spec in _bundlespeccgversions:
187 elif spec in _bundlespeccgversions:
188 if spec == 'packed1':
188 if spec == 'packed1':
189 compression = 'none'
189 compression = 'none'
190 else:
190 else:
191 compression = 'bzip2'
191 compression = 'bzip2'
192 version = spec
192 version = spec
193 else:
193 else:
194 raise error.UnsupportedBundleSpecification(
194 raise error.UnsupportedBundleSpecification(
195 _('%s is not a recognized bundle specification') % spec)
195 _('%s is not a recognized bundle specification') % spec)
196
196
197 # Bundle version 1 only supports a known set of compression engines.
197 # Bundle version 1 only supports a known set of compression engines.
198 if version == 'v1' and compression not in _bundlespecv1compengines:
198 if version == 'v1' and compression not in _bundlespecv1compengines:
199 raise error.UnsupportedBundleSpecification(
199 raise error.UnsupportedBundleSpecification(
200 _('compression engine %s is not supported on v1 bundles') %
200 _('compression engine %s is not supported on v1 bundles') %
201 compression)
201 compression)
202
202
203 # The specification for packed1 can optionally declare the data formats
203 # The specification for packed1 can optionally declare the data formats
204 # required to apply it. If we see this metadata, compare against what the
204 # required to apply it. If we see this metadata, compare against what the
205 # repo supports and error if the bundle isn't compatible.
205 # repo supports and error if the bundle isn't compatible.
206 if version == 'packed1' and 'requirements' in params:
206 if version == 'packed1' and 'requirements' in params:
207 requirements = set(params['requirements'].split(','))
207 requirements = set(params['requirements'].split(','))
208 missingreqs = requirements - repo.supportedformats
208 missingreqs = requirements - repo.supportedformats
209 if missingreqs:
209 if missingreqs:
210 raise error.UnsupportedBundleSpecification(
210 raise error.UnsupportedBundleSpecification(
211 _('missing support for repository features: %s') %
211 _('missing support for repository features: %s') %
212 ', '.join(sorted(missingreqs)))
212 ', '.join(sorted(missingreqs)))
213
213
214 # Compute contentopts based on the version
214 # Compute contentopts based on the version
215 contentopts = _bundlespeccontentopts.get(version, {}).copy()
215 contentopts = _bundlespeccontentopts.get(version, {}).copy()
216
216
217 # Process the variants
217 # Process the variants
218 if "stream" in params and params["stream"] == "v2":
218 if "stream" in params and params["stream"] == "v2":
219 variant = _bundlespecvariants["streamv2"]
219 variant = _bundlespecvariants["streamv2"]
220 contentopts.update(variant)
220 contentopts.update(variant)
221
221
222 engine = util.compengines.forbundlename(compression)
222 engine = util.compengines.forbundlename(compression)
223 compression, wirecompression = engine.bundletype()
223 compression, wirecompression = engine.bundletype()
224 wireversion = _bundlespeccgversions[version]
224 wireversion = _bundlespeccgversions[version]
225
225
226 return bundlespec(compression, wirecompression, version, wireversion,
226 return bundlespec(compression, wirecompression, version, wireversion,
227 params, contentopts)
227 params, contentopts)
228
228
229 def readbundle(ui, fh, fname, vfs=None):
229 def readbundle(ui, fh, fname, vfs=None):
230 header = changegroup.readexactly(fh, 4)
230 header = changegroup.readexactly(fh, 4)
231
231
232 alg = None
232 alg = None
233 if not fname:
233 if not fname:
234 fname = "stream"
234 fname = "stream"
235 if not header.startswith('HG') and header.startswith('\0'):
235 if not header.startswith('HG') and header.startswith('\0'):
236 fh = changegroup.headerlessfixup(fh, header)
236 fh = changegroup.headerlessfixup(fh, header)
237 header = "HG10"
237 header = "HG10"
238 alg = 'UN'
238 alg = 'UN'
239 elif vfs:
239 elif vfs:
240 fname = vfs.join(fname)
240 fname = vfs.join(fname)
241
241
242 magic, version = header[0:2], header[2:4]
242 magic, version = header[0:2], header[2:4]
243
243
244 if magic != 'HG':
244 if magic != 'HG':
245 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
245 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
246 if version == '10':
246 if version == '10':
247 if alg is None:
247 if alg is None:
248 alg = changegroup.readexactly(fh, 2)
248 alg = changegroup.readexactly(fh, 2)
249 return changegroup.cg1unpacker(fh, alg)
249 return changegroup.cg1unpacker(fh, alg)
250 elif version.startswith('2'):
250 elif version.startswith('2'):
251 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
251 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
252 elif version == 'S1':
252 elif version == 'S1':
253 return streamclone.streamcloneapplier(fh)
253 return streamclone.streamcloneapplier(fh)
254 else:
254 else:
255 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
255 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
256
256
257 def getbundlespec(ui, fh):
257 def getbundlespec(ui, fh):
258 """Infer the bundlespec from a bundle file handle.
258 """Infer the bundlespec from a bundle file handle.
259
259
260 The input file handle is seeked and the original seek position is not
260 The input file handle is seeked and the original seek position is not
261 restored.
261 restored.
262 """
262 """
263 def speccompression(alg):
263 def speccompression(alg):
264 try:
264 try:
265 return util.compengines.forbundletype(alg).bundletype()[0]
265 return util.compengines.forbundletype(alg).bundletype()[0]
266 except KeyError:
266 except KeyError:
267 return None
267 return None
268
268
269 b = readbundle(ui, fh, None)
269 b = readbundle(ui, fh, None)
270 if isinstance(b, changegroup.cg1unpacker):
270 if isinstance(b, changegroup.cg1unpacker):
271 alg = b._type
271 alg = b._type
272 if alg == '_truncatedBZ':
272 if alg == '_truncatedBZ':
273 alg = 'BZ'
273 alg = 'BZ'
274 comp = speccompression(alg)
274 comp = speccompression(alg)
275 if not comp:
275 if not comp:
276 raise error.Abort(_('unknown compression algorithm: %s') % alg)
276 raise error.Abort(_('unknown compression algorithm: %s') % alg)
277 return '%s-v1' % comp
277 return '%s-v1' % comp
278 elif isinstance(b, bundle2.unbundle20):
278 elif isinstance(b, bundle2.unbundle20):
279 if 'Compression' in b.params:
279 if 'Compression' in b.params:
280 comp = speccompression(b.params['Compression'])
280 comp = speccompression(b.params['Compression'])
281 if not comp:
281 if not comp:
282 raise error.Abort(_('unknown compression algorithm: %s') % comp)
282 raise error.Abort(_('unknown compression algorithm: %s') % comp)
283 else:
283 else:
284 comp = 'none'
284 comp = 'none'
285
285
286 version = None
286 version = None
287 for part in b.iterparts():
287 for part in b.iterparts():
288 if part.type == 'changegroup':
288 if part.type == 'changegroup':
289 version = part.params['version']
289 version = part.params['version']
290 if version in ('01', '02'):
290 if version in ('01', '02'):
291 version = 'v2'
291 version = 'v2'
292 else:
292 else:
293 raise error.Abort(_('changegroup version %s does not have '
293 raise error.Abort(_('changegroup version %s does not have '
294 'a known bundlespec') % version,
294 'a known bundlespec') % version,
295 hint=_('try upgrading your Mercurial '
295 hint=_('try upgrading your Mercurial '
296 'client'))
296 'client'))
297 elif part.type == 'stream2' and version is None:
297 elif part.type == 'stream2' and version is None:
298 # A stream2 part requires to be part of a v2 bundle
298 # A stream2 part requires to be part of a v2 bundle
299 version = "v2"
299 version = "v2"
300 requirements = urlreq.unquote(part.params['requirements'])
300 requirements = urlreq.unquote(part.params['requirements'])
301 splitted = requirements.split()
301 splitted = requirements.split()
302 params = bundle2._formatrequirementsparams(splitted)
302 params = bundle2._formatrequirementsparams(splitted)
303 return 'none-v2;stream=v2;%s' % params
303 return 'none-v2;stream=v2;%s' % params
304
304
305 if not version:
305 if not version:
306 raise error.Abort(_('could not identify changegroup version in '
306 raise error.Abort(_('could not identify changegroup version in '
307 'bundle'))
307 'bundle'))
308
308
309 return '%s-%s' % (comp, version)
309 return '%s-%s' % (comp, version)
310 elif isinstance(b, streamclone.streamcloneapplier):
310 elif isinstance(b, streamclone.streamcloneapplier):
311 requirements = streamclone.readbundle1header(fh)[2]
311 requirements = streamclone.readbundle1header(fh)[2]
312 formatted = bundle2._formatrequirementsparams(requirements)
312 formatted = bundle2._formatrequirementsparams(requirements)
313 return 'none-packed1;%s' % formatted
313 return 'none-packed1;%s' % formatted
314 else:
314 else:
315 raise error.Abort(_('unknown bundle type: %s') % b)
315 raise error.Abort(_('unknown bundle type: %s') % b)
316
316
317 def _computeoutgoing(repo, heads, common):
317 def _computeoutgoing(repo, heads, common):
318 """Computes which revs are outgoing given a set of common
318 """Computes which revs are outgoing given a set of common
319 and a set of heads.
319 and a set of heads.
320
320
321 This is a separate function so extensions can have access to
321 This is a separate function so extensions can have access to
322 the logic.
322 the logic.
323
323
324 Returns a discovery.outgoing object.
324 Returns a discovery.outgoing object.
325 """
325 """
326 cl = repo.changelog
326 cl = repo.changelog
327 if common:
327 if common:
328 hasnode = cl.hasnode
328 hasnode = cl.hasnode
329 common = [n for n in common if hasnode(n)]
329 common = [n for n in common if hasnode(n)]
330 else:
330 else:
331 common = [nullid]
331 common = [nullid]
332 if not heads:
332 if not heads:
333 heads = cl.heads()
333 heads = cl.heads()
334 return discovery.outgoing(repo, common, heads)
334 return discovery.outgoing(repo, common, heads)
335
335
336 def _forcebundle1(op):
336 def _forcebundle1(op):
337 """return true if a pull/push must use bundle1
337 """return true if a pull/push must use bundle1
338
338
339 This function is used to allow testing of the older bundle version"""
339 This function is used to allow testing of the older bundle version"""
340 ui = op.repo.ui
340 ui = op.repo.ui
341 # The goal is this config is to allow developer to choose the bundle
341 # The goal is this config is to allow developer to choose the bundle
342 # version used during exchanged. This is especially handy during test.
342 # version used during exchanged. This is especially handy during test.
343 # Value is a list of bundle version to be picked from, highest version
343 # Value is a list of bundle version to be picked from, highest version
344 # should be used.
344 # should be used.
345 #
345 #
346 # developer config: devel.legacy.exchange
346 # developer config: devel.legacy.exchange
347 exchange = ui.configlist('devel', 'legacy.exchange')
347 exchange = ui.configlist('devel', 'legacy.exchange')
348 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
348 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
349 return forcebundle1 or not op.remote.capable('bundle2')
349 return forcebundle1 or not op.remote.capable('bundle2')
350
350
351 class pushoperation(object):
351 class pushoperation(object):
352 """A object that represent a single push operation
352 """A object that represent a single push operation
353
353
354 Its purpose is to carry push related state and very common operations.
354 Its purpose is to carry push related state and very common operations.
355
355
356 A new pushoperation should be created at the beginning of each push and
356 A new pushoperation should be created at the beginning of each push and
357 discarded afterward.
357 discarded afterward.
358 """
358 """
359
359
360 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
360 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
361 bookmarks=(), pushvars=None):
361 bookmarks=(), pushvars=None):
362 # repo we push from
362 # repo we push from
363 self.repo = repo
363 self.repo = repo
364 self.ui = repo.ui
364 self.ui = repo.ui
365 # repo we push to
365 # repo we push to
366 self.remote = remote
366 self.remote = remote
367 # force option provided
367 # force option provided
368 self.force = force
368 self.force = force
369 # revs to be pushed (None is "all")
369 # revs to be pushed (None is "all")
370 self.revs = revs
370 self.revs = revs
371 # bookmark explicitly pushed
371 # bookmark explicitly pushed
372 self.bookmarks = bookmarks
372 self.bookmarks = bookmarks
373 # allow push of new branch
373 # allow push of new branch
374 self.newbranch = newbranch
374 self.newbranch = newbranch
375 # step already performed
375 # step already performed
376 # (used to check what steps have been already performed through bundle2)
376 # (used to check what steps have been already performed through bundle2)
377 self.stepsdone = set()
377 self.stepsdone = set()
378 # Integer version of the changegroup push result
378 # Integer version of the changegroup push result
379 # - None means nothing to push
379 # - None means nothing to push
380 # - 0 means HTTP error
380 # - 0 means HTTP error
381 # - 1 means we pushed and remote head count is unchanged *or*
381 # - 1 means we pushed and remote head count is unchanged *or*
382 # we have outgoing changesets but refused to push
382 # we have outgoing changesets but refused to push
383 # - other values as described by addchangegroup()
383 # - other values as described by addchangegroup()
384 self.cgresult = None
384 self.cgresult = None
385 # Boolean value for the bookmark push
385 # Boolean value for the bookmark push
386 self.bkresult = None
386 self.bkresult = None
387 # discover.outgoing object (contains common and outgoing data)
387 # discover.outgoing object (contains common and outgoing data)
388 self.outgoing = None
388 self.outgoing = None
389 # all remote topological heads before the push
389 # all remote topological heads before the push
390 self.remoteheads = None
390 self.remoteheads = None
391 # Details of the remote branch pre and post push
391 # Details of the remote branch pre and post push
392 #
392 #
393 # mapping: {'branch': ([remoteheads],
393 # mapping: {'branch': ([remoteheads],
394 # [newheads],
394 # [newheads],
395 # [unsyncedheads],
395 # [unsyncedheads],
396 # [discardedheads])}
396 # [discardedheads])}
397 # - branch: the branch name
397 # - branch: the branch name
398 # - remoteheads: the list of remote heads known locally
398 # - remoteheads: the list of remote heads known locally
399 # None if the branch is new
399 # None if the branch is new
400 # - newheads: the new remote heads (known locally) with outgoing pushed
400 # - newheads: the new remote heads (known locally) with outgoing pushed
401 # - unsyncedheads: the list of remote heads unknown locally.
401 # - unsyncedheads: the list of remote heads unknown locally.
402 # - discardedheads: the list of remote heads made obsolete by the push
402 # - discardedheads: the list of remote heads made obsolete by the push
403 self.pushbranchmap = None
403 self.pushbranchmap = None
404 # testable as a boolean indicating if any nodes are missing locally.
404 # testable as a boolean indicating if any nodes are missing locally.
405 self.incoming = None
405 self.incoming = None
406 # summary of the remote phase situation
406 # summary of the remote phase situation
407 self.remotephases = None
407 self.remotephases = None
408 # phases changes that must be pushed along side the changesets
408 # phases changes that must be pushed along side the changesets
409 self.outdatedphases = None
409 self.outdatedphases = None
410 # phases changes that must be pushed if changeset push fails
410 # phases changes that must be pushed if changeset push fails
411 self.fallbackoutdatedphases = None
411 self.fallbackoutdatedphases = None
412 # outgoing obsmarkers
412 # outgoing obsmarkers
413 self.outobsmarkers = set()
413 self.outobsmarkers = set()
414 # outgoing bookmarks
414 # outgoing bookmarks
415 self.outbookmarks = []
415 self.outbookmarks = []
416 # transaction manager
416 # transaction manager
417 self.trmanager = None
417 self.trmanager = None
418 # map { pushkey partid -> callback handling failure}
418 # map { pushkey partid -> callback handling failure}
419 # used to handle exception from mandatory pushkey part failure
419 # used to handle exception from mandatory pushkey part failure
420 self.pkfailcb = {}
420 self.pkfailcb = {}
421 # an iterable of pushvars or None
421 # an iterable of pushvars or None
422 self.pushvars = pushvars
422 self.pushvars = pushvars
423
423
424 @util.propertycache
424 @util.propertycache
425 def futureheads(self):
425 def futureheads(self):
426 """future remote heads if the changeset push succeeds"""
426 """future remote heads if the changeset push succeeds"""
427 return self.outgoing.missingheads
427 return self.outgoing.missingheads
428
428
429 @util.propertycache
429 @util.propertycache
430 def fallbackheads(self):
430 def fallbackheads(self):
431 """future remote heads if the changeset push fails"""
431 """future remote heads if the changeset push fails"""
432 if self.revs is None:
432 if self.revs is None:
433 # not target to push, all common are relevant
433 # not target to push, all common are relevant
434 return self.outgoing.commonheads
434 return self.outgoing.commonheads
435 unfi = self.repo.unfiltered()
435 unfi = self.repo.unfiltered()
436 # I want cheads = heads(::missingheads and ::commonheads)
436 # I want cheads = heads(::missingheads and ::commonheads)
437 # (missingheads is revs with secret changeset filtered out)
437 # (missingheads is revs with secret changeset filtered out)
438 #
438 #
439 # This can be expressed as:
439 # This can be expressed as:
440 # cheads = ( (missingheads and ::commonheads)
440 # cheads = ( (missingheads and ::commonheads)
441 # + (commonheads and ::missingheads))"
441 # + (commonheads and ::missingheads))"
442 # )
442 # )
443 #
443 #
444 # while trying to push we already computed the following:
444 # while trying to push we already computed the following:
445 # common = (::commonheads)
445 # common = (::commonheads)
446 # missing = ((commonheads::missingheads) - commonheads)
446 # missing = ((commonheads::missingheads) - commonheads)
447 #
447 #
448 # We can pick:
448 # We can pick:
449 # * missingheads part of common (::commonheads)
449 # * missingheads part of common (::commonheads)
450 common = self.outgoing.common
450 common = self.outgoing.common
451 nm = self.repo.changelog.nodemap
451 nm = self.repo.changelog.nodemap
452 cheads = [node for node in self.revs if nm[node] in common]
452 cheads = [node for node in self.revs if nm[node] in common]
453 # and
453 # and
454 # * commonheads parents on missing
454 # * commonheads parents on missing
455 revset = unfi.set('%ln and parents(roots(%ln))',
455 revset = unfi.set('%ln and parents(roots(%ln))',
456 self.outgoing.commonheads,
456 self.outgoing.commonheads,
457 self.outgoing.missing)
457 self.outgoing.missing)
458 cheads.extend(c.node() for c in revset)
458 cheads.extend(c.node() for c in revset)
459 return cheads
459 return cheads
460
460
461 @property
461 @property
462 def commonheads(self):
462 def commonheads(self):
463 """set of all common heads after changeset bundle push"""
463 """set of all common heads after changeset bundle push"""
464 if self.cgresult:
464 if self.cgresult:
465 return self.futureheads
465 return self.futureheads
466 else:
466 else:
467 return self.fallbackheads
467 return self.fallbackheads
468
468
469 # mapping of message used when pushing bookmark
469 # mapping of message used when pushing bookmark
470 bookmsgmap = {'update': (_("updating bookmark %s\n"),
470 bookmsgmap = {'update': (_("updating bookmark %s\n"),
471 _('updating bookmark %s failed!\n')),
471 _('updating bookmark %s failed!\n')),
472 'export': (_("exporting bookmark %s\n"),
472 'export': (_("exporting bookmark %s\n"),
473 _('exporting bookmark %s failed!\n')),
473 _('exporting bookmark %s failed!\n')),
474 'delete': (_("deleting remote bookmark %s\n"),
474 'delete': (_("deleting remote bookmark %s\n"),
475 _('deleting remote bookmark %s failed!\n')),
475 _('deleting remote bookmark %s failed!\n')),
476 }
476 }
477
477
478
478
479 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
479 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
480 opargs=None):
480 opargs=None):
481 '''Push outgoing changesets (limited by revs) from a local
481 '''Push outgoing changesets (limited by revs) from a local
482 repository to remote. Return an integer:
482 repository to remote. Return an integer:
483 - None means nothing to push
483 - None means nothing to push
484 - 0 means HTTP error
484 - 0 means HTTP error
485 - 1 means we pushed and remote head count is unchanged *or*
485 - 1 means we pushed and remote head count is unchanged *or*
486 we have outgoing changesets but refused to push
486 we have outgoing changesets but refused to push
487 - other values as described by addchangegroup()
487 - other values as described by addchangegroup()
488 '''
488 '''
489 if opargs is None:
489 if opargs is None:
490 opargs = {}
490 opargs = {}
491 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
491 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
492 **pycompat.strkwargs(opargs))
492 **pycompat.strkwargs(opargs))
493 if pushop.remote.local():
493 if pushop.remote.local():
494 missing = (set(pushop.repo.requirements)
494 missing = (set(pushop.repo.requirements)
495 - pushop.remote.local().supported)
495 - pushop.remote.local().supported)
496 if missing:
496 if missing:
497 msg = _("required features are not"
497 msg = _("required features are not"
498 " supported in the destination:"
498 " supported in the destination:"
499 " %s") % (', '.join(sorted(missing)))
499 " %s") % (', '.join(sorted(missing)))
500 raise error.Abort(msg)
500 raise error.Abort(msg)
501
501
502 if not pushop.remote.canpush():
502 if not pushop.remote.canpush():
503 raise error.Abort(_("destination does not support push"))
503 raise error.Abort(_("destination does not support push"))
504
504
505 if not pushop.remote.capable('unbundle'):
505 if not pushop.remote.capable('unbundle'):
506 raise error.Abort(_('cannot push: destination does not support the '
506 raise error.Abort(_('cannot push: destination does not support the '
507 'unbundle wire protocol command'))
507 'unbundle wire protocol command'))
508
508
509 # get lock as we might write phase data
509 # get lock as we might write phase data
510 wlock = lock = None
510 wlock = lock = None
511 try:
511 try:
512 # bundle2 push may receive a reply bundle touching bookmarks or other
512 # bundle2 push may receive a reply bundle touching bookmarks or other
513 # things requiring the wlock. Take it now to ensure proper ordering.
513 # things requiring the wlock. Take it now to ensure proper ordering.
514 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
514 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
515 if (not _forcebundle1(pushop)) and maypushback:
515 if (not _forcebundle1(pushop)) and maypushback:
516 wlock = pushop.repo.wlock()
516 wlock = pushop.repo.wlock()
517 lock = pushop.repo.lock()
517 lock = pushop.repo.lock()
518 pushop.trmanager = transactionmanager(pushop.repo,
518 pushop.trmanager = transactionmanager(pushop.repo,
519 'push-response',
519 'push-response',
520 pushop.remote.url())
520 pushop.remote.url())
521 except error.LockUnavailable as err:
521 except error.LockUnavailable as err:
522 # source repo cannot be locked.
522 # source repo cannot be locked.
523 # We do not abort the push, but just disable the local phase
523 # We do not abort the push, but just disable the local phase
524 # synchronisation.
524 # synchronisation.
525 msg = ('cannot lock source repository: %s\n'
525 msg = ('cannot lock source repository: %s\n'
526 % stringutil.forcebytestr(err))
526 % stringutil.forcebytestr(err))
527 pushop.ui.debug(msg)
527 pushop.ui.debug(msg)
528
528
529 with wlock or util.nullcontextmanager(), \
529 with wlock or util.nullcontextmanager(), \
530 lock or util.nullcontextmanager(), \
530 lock or util.nullcontextmanager(), \
531 pushop.trmanager or util.nullcontextmanager():
531 pushop.trmanager or util.nullcontextmanager():
532 pushop.repo.checkpush(pushop)
532 pushop.repo.checkpush(pushop)
533 _pushdiscovery(pushop)
533 _pushdiscovery(pushop)
534 if not _forcebundle1(pushop):
534 if not _forcebundle1(pushop):
535 _pushbundle2(pushop)
535 _pushbundle2(pushop)
536 _pushchangeset(pushop)
536 _pushchangeset(pushop)
537 _pushsyncphase(pushop)
537 _pushsyncphase(pushop)
538 _pushobsolete(pushop)
538 _pushobsolete(pushop)
539 _pushbookmark(pushop)
539 _pushbookmark(pushop)
540
540
541 if repo.ui.configbool('experimental', 'remotenames'):
541 if repo.ui.configbool('experimental', 'remotenames'):
542 logexchange.pullremotenames(repo, remote)
542 logexchange.pullremotenames(repo, remote)
543
543
544 return pushop
544 return pushop
545
545
546 # list of steps to perform discovery before push
546 # list of steps to perform discovery before push
547 pushdiscoveryorder = []
547 pushdiscoveryorder = []
548
548
549 # Mapping between step name and function
549 # Mapping between step name and function
550 #
550 #
551 # This exists to help extensions wrap steps if necessary
551 # This exists to help extensions wrap steps if necessary
552 pushdiscoverymapping = {}
552 pushdiscoverymapping = {}
553
553
554 def pushdiscovery(stepname):
554 def pushdiscovery(stepname):
555 """decorator for function performing discovery before push
555 """decorator for function performing discovery before push
556
556
557 The function is added to the step -> function mapping and appended to the
557 The function is added to the step -> function mapping and appended to the
558 list of steps. Beware that decorated function will be added in order (this
558 list of steps. Beware that decorated function will be added in order (this
559 may matter).
559 may matter).
560
560
561 You can only use this decorator for a new step, if you want to wrap a step
561 You can only use this decorator for a new step, if you want to wrap a step
562 from an extension, change the pushdiscovery dictionary directly."""
562 from an extension, change the pushdiscovery dictionary directly."""
563 def dec(func):
563 def dec(func):
564 assert stepname not in pushdiscoverymapping
564 assert stepname not in pushdiscoverymapping
565 pushdiscoverymapping[stepname] = func
565 pushdiscoverymapping[stepname] = func
566 pushdiscoveryorder.append(stepname)
566 pushdiscoveryorder.append(stepname)
567 return func
567 return func
568 return dec
568 return dec
569
569
570 def _pushdiscovery(pushop):
570 def _pushdiscovery(pushop):
571 """Run all discovery steps"""
571 """Run all discovery steps"""
572 for stepname in pushdiscoveryorder:
572 for stepname in pushdiscoveryorder:
573 step = pushdiscoverymapping[stepname]
573 step = pushdiscoverymapping[stepname]
574 step(pushop)
574 step(pushop)
575
575
576 @pushdiscovery('changeset')
576 @pushdiscovery('changeset')
577 def _pushdiscoverychangeset(pushop):
577 def _pushdiscoverychangeset(pushop):
578 """discover the changeset that need to be pushed"""
578 """discover the changeset that need to be pushed"""
579 fci = discovery.findcommonincoming
579 fci = discovery.findcommonincoming
580 if pushop.revs:
580 if pushop.revs:
581 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
581 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
582 ancestorsof=pushop.revs)
582 ancestorsof=pushop.revs)
583 else:
583 else:
584 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
584 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
585 common, inc, remoteheads = commoninc
585 common, inc, remoteheads = commoninc
586 fco = discovery.findcommonoutgoing
586 fco = discovery.findcommonoutgoing
587 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
587 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
588 commoninc=commoninc, force=pushop.force)
588 commoninc=commoninc, force=pushop.force)
589 pushop.outgoing = outgoing
589 pushop.outgoing = outgoing
590 pushop.remoteheads = remoteheads
590 pushop.remoteheads = remoteheads
591 pushop.incoming = inc
591 pushop.incoming = inc
592
592
593 @pushdiscovery('phase')
593 @pushdiscovery('phase')
594 def _pushdiscoveryphase(pushop):
594 def _pushdiscoveryphase(pushop):
595 """discover the phase that needs to be pushed
595 """discover the phase that needs to be pushed
596
596
597 (computed for both success and failure case for changesets push)"""
597 (computed for both success and failure case for changesets push)"""
598 outgoing = pushop.outgoing
598 outgoing = pushop.outgoing
599 unfi = pushop.repo.unfiltered()
599 unfi = pushop.repo.unfiltered()
600 remotephases = listkeys(pushop.remote, 'phases')
600 remotephases = listkeys(pushop.remote, 'phases')
601
601
602 if (pushop.ui.configbool('ui', '_usedassubrepo')
602 if (pushop.ui.configbool('ui', '_usedassubrepo')
603 and remotephases # server supports phases
603 and remotephases # server supports phases
604 and not pushop.outgoing.missing # no changesets to be pushed
604 and not pushop.outgoing.missing # no changesets to be pushed
605 and remotephases.get('publishing', False)):
605 and remotephases.get('publishing', False)):
606 # When:
606 # When:
607 # - this is a subrepo push
607 # - this is a subrepo push
608 # - and remote support phase
608 # - and remote support phase
609 # - and no changeset are to be pushed
609 # - and no changeset are to be pushed
610 # - and remote is publishing
610 # - and remote is publishing
611 # We may be in issue 3781 case!
611 # We may be in issue 3781 case!
612 # We drop the possible phase synchronisation done by
612 # We drop the possible phase synchronisation done by
613 # courtesy to publish changesets possibly locally draft
613 # courtesy to publish changesets possibly locally draft
614 # on the remote.
614 # on the remote.
615 pushop.outdatedphases = []
615 pushop.outdatedphases = []
616 pushop.fallbackoutdatedphases = []
616 pushop.fallbackoutdatedphases = []
617 return
617 return
618
618
619 pushop.remotephases = phases.remotephasessummary(pushop.repo,
619 pushop.remotephases = phases.remotephasessummary(pushop.repo,
620 pushop.fallbackheads,
620 pushop.fallbackheads,
621 remotephases)
621 remotephases)
622 droots = pushop.remotephases.draftroots
622 droots = pushop.remotephases.draftroots
623
623
624 extracond = ''
624 extracond = ''
625 if not pushop.remotephases.publishing:
625 if not pushop.remotephases.publishing:
626 extracond = ' and public()'
626 extracond = ' and public()'
627 revset = 'heads((%%ln::%%ln) %s)' % extracond
627 revset = 'heads((%%ln::%%ln) %s)' % extracond
628 # Get the list of all revs draft on remote by public here.
628 # Get the list of all revs draft on remote by public here.
629 # XXX Beware that revset break if droots is not strictly
629 # XXX Beware that revset break if droots is not strictly
630 # XXX root we may want to ensure it is but it is costly
630 # XXX root we may want to ensure it is but it is costly
631 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
631 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
632 if not outgoing.missing:
632 if not outgoing.missing:
633 future = fallback
633 future = fallback
634 else:
634 else:
635 # adds changeset we are going to push as draft
635 # adds changeset we are going to push as draft
636 #
636 #
637 # should not be necessary for publishing server, but because of an
637 # should not be necessary for publishing server, but because of an
638 # issue fixed in xxxxx we have to do it anyway.
638 # issue fixed in xxxxx we have to do it anyway.
639 fdroots = list(unfi.set('roots(%ln + %ln::)',
639 fdroots = list(unfi.set('roots(%ln + %ln::)',
640 outgoing.missing, droots))
640 outgoing.missing, droots))
641 fdroots = [f.node() for f in fdroots]
641 fdroots = [f.node() for f in fdroots]
642 future = list(unfi.set(revset, fdroots, pushop.futureheads))
642 future = list(unfi.set(revset, fdroots, pushop.futureheads))
643 pushop.outdatedphases = future
643 pushop.outdatedphases = future
644 pushop.fallbackoutdatedphases = fallback
644 pushop.fallbackoutdatedphases = fallback
645
645
646 @pushdiscovery('obsmarker')
646 @pushdiscovery('obsmarker')
647 def _pushdiscoveryobsmarkers(pushop):
647 def _pushdiscoveryobsmarkers(pushop):
648 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
648 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
649 return
649 return
650
650
651 if not pushop.repo.obsstore:
651 if not pushop.repo.obsstore:
652 return
652 return
653
653
654 if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):
654 if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):
655 return
655 return
656
656
657 repo = pushop.repo
657 repo = pushop.repo
658 # very naive computation, that can be quite expensive on big repo.
658 # very naive computation, that can be quite expensive on big repo.
659 # However: evolution is currently slow on them anyway.
659 # However: evolution is currently slow on them anyway.
660 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
660 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
661 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
661 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
662
662
663 @pushdiscovery('bookmarks')
663 @pushdiscovery('bookmarks')
664 def _pushdiscoverybookmarks(pushop):
664 def _pushdiscoverybookmarks(pushop):
665 ui = pushop.ui
665 ui = pushop.ui
666 repo = pushop.repo.unfiltered()
666 repo = pushop.repo.unfiltered()
667 remote = pushop.remote
667 remote = pushop.remote
668 ui.debug("checking for updated bookmarks\n")
668 ui.debug("checking for updated bookmarks\n")
669 ancestors = ()
669 ancestors = ()
670 if pushop.revs:
670 if pushop.revs:
671 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
671 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
672 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
672 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
673
673
674 remotebookmark = listkeys(remote, 'bookmarks')
674 remotebookmark = listkeys(remote, 'bookmarks')
675
675
676 explicit = set([repo._bookmarks.expandname(bookmark)
676 explicit = set([repo._bookmarks.expandname(bookmark)
677 for bookmark in pushop.bookmarks])
677 for bookmark in pushop.bookmarks])
678
678
679 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
679 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
680 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
680 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
681
681
682 def safehex(x):
682 def safehex(x):
683 if x is None:
683 if x is None:
684 return x
684 return x
685 return hex(x)
685 return hex(x)
686
686
687 def hexifycompbookmarks(bookmarks):
687 def hexifycompbookmarks(bookmarks):
688 return [(b, safehex(scid), safehex(dcid))
688 return [(b, safehex(scid), safehex(dcid))
689 for (b, scid, dcid) in bookmarks]
689 for (b, scid, dcid) in bookmarks]
690
690
691 comp = [hexifycompbookmarks(marks) for marks in comp]
691 comp = [hexifycompbookmarks(marks) for marks in comp]
692 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
692 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
693
693
694 def _processcompared(pushop, pushed, explicit, remotebms, comp):
694 def _processcompared(pushop, pushed, explicit, remotebms, comp):
695 """take decision on bookmark to pull from the remote bookmark
695 """take decision on bookmark to pull from the remote bookmark
696
696
697 Exist to help extensions who want to alter this behavior.
697 Exist to help extensions who want to alter this behavior.
698 """
698 """
699 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
699 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
700
700
701 repo = pushop.repo
701 repo = pushop.repo
702
702
703 for b, scid, dcid in advsrc:
703 for b, scid, dcid in advsrc:
704 if b in explicit:
704 if b in explicit:
705 explicit.remove(b)
705 explicit.remove(b)
706 if not pushed or repo[scid].rev() in pushed:
706 if not pushed or repo[scid].rev() in pushed:
707 pushop.outbookmarks.append((b, dcid, scid))
707 pushop.outbookmarks.append((b, dcid, scid))
708 # search added bookmark
708 # search added bookmark
709 for b, scid, dcid in addsrc:
709 for b, scid, dcid in addsrc:
710 if b in explicit:
710 if b in explicit:
711 explicit.remove(b)
711 explicit.remove(b)
712 pushop.outbookmarks.append((b, '', scid))
712 pushop.outbookmarks.append((b, '', scid))
713 # search for overwritten bookmark
713 # search for overwritten bookmark
714 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
714 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
715 if b in explicit:
715 if b in explicit:
716 explicit.remove(b)
716 explicit.remove(b)
717 pushop.outbookmarks.append((b, dcid, scid))
717 pushop.outbookmarks.append((b, dcid, scid))
718 # search for bookmark to delete
718 # search for bookmark to delete
719 for b, scid, dcid in adddst:
719 for b, scid, dcid in adddst:
720 if b in explicit:
720 if b in explicit:
721 explicit.remove(b)
721 explicit.remove(b)
722 # treat as "deleted locally"
722 # treat as "deleted locally"
723 pushop.outbookmarks.append((b, dcid, ''))
723 pushop.outbookmarks.append((b, dcid, ''))
724 # identical bookmarks shouldn't get reported
724 # identical bookmarks shouldn't get reported
725 for b, scid, dcid in same:
725 for b, scid, dcid in same:
726 if b in explicit:
726 if b in explicit:
727 explicit.remove(b)
727 explicit.remove(b)
728
728
729 if explicit:
729 if explicit:
730 explicit = sorted(explicit)
730 explicit = sorted(explicit)
731 # we should probably list all of them
731 # we should probably list all of them
732 pushop.ui.warn(_('bookmark %s does not exist on the local '
732 pushop.ui.warn(_('bookmark %s does not exist on the local '
733 'or remote repository!\n') % explicit[0])
733 'or remote repository!\n') % explicit[0])
734 pushop.bkresult = 2
734 pushop.bkresult = 2
735
735
736 pushop.outbookmarks.sort()
736 pushop.outbookmarks.sort()
737
737
738 def _pushcheckoutgoing(pushop):
738 def _pushcheckoutgoing(pushop):
739 outgoing = pushop.outgoing
739 outgoing = pushop.outgoing
740 unfi = pushop.repo.unfiltered()
740 unfi = pushop.repo.unfiltered()
741 if not outgoing.missing:
741 if not outgoing.missing:
742 # nothing to push
742 # nothing to push
743 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
743 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
744 return False
744 return False
745 # something to push
745 # something to push
746 if not pushop.force:
746 if not pushop.force:
747 # if repo.obsstore == False --> no obsolete
747 # if repo.obsstore == False --> no obsolete
748 # then, save the iteration
748 # then, save the iteration
749 if unfi.obsstore:
749 if unfi.obsstore:
750 # this message are here for 80 char limit reason
750 # this message are here for 80 char limit reason
751 mso = _("push includes obsolete changeset: %s!")
751 mso = _("push includes obsolete changeset: %s!")
752 mspd = _("push includes phase-divergent changeset: %s!")
752 mspd = _("push includes phase-divergent changeset: %s!")
753 mscd = _("push includes content-divergent changeset: %s!")
753 mscd = _("push includes content-divergent changeset: %s!")
754 mst = {"orphan": _("push includes orphan changeset: %s!"),
754 mst = {"orphan": _("push includes orphan changeset: %s!"),
755 "phase-divergent": mspd,
755 "phase-divergent": mspd,
756 "content-divergent": mscd}
756 "content-divergent": mscd}
757 # If we are to push if there is at least one
757 # If we are to push if there is at least one
758 # obsolete or unstable changeset in missing, at
758 # obsolete or unstable changeset in missing, at
759 # least one of the missinghead will be obsolete or
759 # least one of the missinghead will be obsolete or
760 # unstable. So checking heads only is ok
760 # unstable. So checking heads only is ok
761 for node in outgoing.missingheads:
761 for node in outgoing.missingheads:
762 ctx = unfi[node]
762 ctx = unfi[node]
763 if ctx.obsolete():
763 if ctx.obsolete():
764 raise error.Abort(mso % ctx)
764 raise error.Abort(mso % ctx)
765 elif ctx.isunstable():
765 elif ctx.isunstable():
766 # TODO print more than one instability in the abort
766 # TODO print more than one instability in the abort
767 # message
767 # message
768 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
768 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
769
769
770 discovery.checkheads(pushop)
770 discovery.checkheads(pushop)
771 return True
771 return True
772
772
773 # List of names of steps to perform for an outgoing bundle2, order matters.
773 # List of names of steps to perform for an outgoing bundle2, order matters.
774 b2partsgenorder = []
774 b2partsgenorder = []
775
775
776 # Mapping between step name and function
776 # Mapping between step name and function
777 #
777 #
778 # This exists to help extensions wrap steps if necessary
778 # This exists to help extensions wrap steps if necessary
779 b2partsgenmapping = {}
779 b2partsgenmapping = {}
780
780
781 def b2partsgenerator(stepname, idx=None):
781 def b2partsgenerator(stepname, idx=None):
782 """decorator for function generating bundle2 part
782 """decorator for function generating bundle2 part
783
783
784 The function is added to the step -> function mapping and appended to the
784 The function is added to the step -> function mapping and appended to the
785 list of steps. Beware that decorated functions will be added in order
785 list of steps. Beware that decorated functions will be added in order
786 (this may matter).
786 (this may matter).
787
787
788 You can only use this decorator for new steps, if you want to wrap a step
788 You can only use this decorator for new steps, if you want to wrap a step
789 from an extension, attack the b2partsgenmapping dictionary directly."""
789 from an extension, attack the b2partsgenmapping dictionary directly."""
790 def dec(func):
790 def dec(func):
791 assert stepname not in b2partsgenmapping
791 assert stepname not in b2partsgenmapping
792 b2partsgenmapping[stepname] = func
792 b2partsgenmapping[stepname] = func
793 if idx is None:
793 if idx is None:
794 b2partsgenorder.append(stepname)
794 b2partsgenorder.append(stepname)
795 else:
795 else:
796 b2partsgenorder.insert(idx, stepname)
796 b2partsgenorder.insert(idx, stepname)
797 return func
797 return func
798 return dec
798 return dec
799
799
800 def _pushb2ctxcheckheads(pushop, bundler):
800 def _pushb2ctxcheckheads(pushop, bundler):
801 """Generate race condition checking parts
801 """Generate race condition checking parts
802
802
803 Exists as an independent function to aid extensions
803 Exists as an independent function to aid extensions
804 """
804 """
805 # * 'force' do not check for push race,
805 # * 'force' do not check for push race,
806 # * if we don't push anything, there are nothing to check.
806 # * if we don't push anything, there are nothing to check.
807 if not pushop.force and pushop.outgoing.missingheads:
807 if not pushop.force and pushop.outgoing.missingheads:
808 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
808 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
809 emptyremote = pushop.pushbranchmap is None
809 emptyremote = pushop.pushbranchmap is None
810 if not allowunrelated or emptyremote:
810 if not allowunrelated or emptyremote:
811 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
811 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
812 else:
812 else:
813 affected = set()
813 affected = set()
814 for branch, heads in pushop.pushbranchmap.iteritems():
814 for branch, heads in pushop.pushbranchmap.iteritems():
815 remoteheads, newheads, unsyncedheads, discardedheads = heads
815 remoteheads, newheads, unsyncedheads, discardedheads = heads
816 if remoteheads is not None:
816 if remoteheads is not None:
817 remote = set(remoteheads)
817 remote = set(remoteheads)
818 affected |= set(discardedheads) & remote
818 affected |= set(discardedheads) & remote
819 affected |= remote - set(newheads)
819 affected |= remote - set(newheads)
820 if affected:
820 if affected:
821 data = iter(sorted(affected))
821 data = iter(sorted(affected))
822 bundler.newpart('check:updated-heads', data=data)
822 bundler.newpart('check:updated-heads', data=data)
823
823
824 def _pushing(pushop):
824 def _pushing(pushop):
825 """return True if we are pushing anything"""
825 """return True if we are pushing anything"""
826 return bool(pushop.outgoing.missing
826 return bool(pushop.outgoing.missing
827 or pushop.outdatedphases
827 or pushop.outdatedphases
828 or pushop.outobsmarkers
828 or pushop.outobsmarkers
829 or pushop.outbookmarks)
829 or pushop.outbookmarks)
830
830
831 @b2partsgenerator('check-bookmarks')
831 @b2partsgenerator('check-bookmarks')
832 def _pushb2checkbookmarks(pushop, bundler):
832 def _pushb2checkbookmarks(pushop, bundler):
833 """insert bookmark move checking"""
833 """insert bookmark move checking"""
834 if not _pushing(pushop) or pushop.force:
834 if not _pushing(pushop) or pushop.force:
835 return
835 return
836 b2caps = bundle2.bundle2caps(pushop.remote)
836 b2caps = bundle2.bundle2caps(pushop.remote)
837 hasbookmarkcheck = 'bookmarks' in b2caps
837 hasbookmarkcheck = 'bookmarks' in b2caps
838 if not (pushop.outbookmarks and hasbookmarkcheck):
838 if not (pushop.outbookmarks and hasbookmarkcheck):
839 return
839 return
840 data = []
840 data = []
841 for book, old, new in pushop.outbookmarks:
841 for book, old, new in pushop.outbookmarks:
842 old = bin(old)
842 old = bin(old)
843 data.append((book, old))
843 data.append((book, old))
844 checkdata = bookmod.binaryencode(data)
844 checkdata = bookmod.binaryencode(data)
845 bundler.newpart('check:bookmarks', data=checkdata)
845 bundler.newpart('check:bookmarks', data=checkdata)
846
846
847 @b2partsgenerator('check-phases')
847 @b2partsgenerator('check-phases')
848 def _pushb2checkphases(pushop, bundler):
848 def _pushb2checkphases(pushop, bundler):
849 """insert phase move checking"""
849 """insert phase move checking"""
850 if not _pushing(pushop) or pushop.force:
850 if not _pushing(pushop) or pushop.force:
851 return
851 return
852 b2caps = bundle2.bundle2caps(pushop.remote)
852 b2caps = bundle2.bundle2caps(pushop.remote)
853 hasphaseheads = 'heads' in b2caps.get('phases', ())
853 hasphaseheads = 'heads' in b2caps.get('phases', ())
854 if pushop.remotephases is not None and hasphaseheads:
854 if pushop.remotephases is not None and hasphaseheads:
855 # check that the remote phase has not changed
855 # check that the remote phase has not changed
856 checks = [[] for p in phases.allphases]
856 checks = [[] for p in phases.allphases]
857 checks[phases.public].extend(pushop.remotephases.publicheads)
857 checks[phases.public].extend(pushop.remotephases.publicheads)
858 checks[phases.draft].extend(pushop.remotephases.draftroots)
858 checks[phases.draft].extend(pushop.remotephases.draftroots)
859 if any(checks):
859 if any(checks):
860 for nodes in checks:
860 for nodes in checks:
861 nodes.sort()
861 nodes.sort()
862 checkdata = phases.binaryencode(checks)
862 checkdata = phases.binaryencode(checks)
863 bundler.newpart('check:phases', data=checkdata)
863 bundler.newpart('check:phases', data=checkdata)
864
864
865 @b2partsgenerator('changeset')
865 @b2partsgenerator('changeset')
866 def _pushb2ctx(pushop, bundler):
866 def _pushb2ctx(pushop, bundler):
867 """handle changegroup push through bundle2
867 """handle changegroup push through bundle2
868
868
869 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
869 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
870 """
870 """
871 if 'changesets' in pushop.stepsdone:
871 if 'changesets' in pushop.stepsdone:
872 return
872 return
873 pushop.stepsdone.add('changesets')
873 pushop.stepsdone.add('changesets')
874 # Send known heads to the server for race detection.
874 # Send known heads to the server for race detection.
875 if not _pushcheckoutgoing(pushop):
875 if not _pushcheckoutgoing(pushop):
876 return
876 return
877 pushop.repo.prepushoutgoinghooks(pushop)
877 pushop.repo.prepushoutgoinghooks(pushop)
878
878
879 _pushb2ctxcheckheads(pushop, bundler)
879 _pushb2ctxcheckheads(pushop, bundler)
880
880
881 b2caps = bundle2.bundle2caps(pushop.remote)
881 b2caps = bundle2.bundle2caps(pushop.remote)
882 version = '01'
882 version = '01'
883 cgversions = b2caps.get('changegroup')
883 cgversions = b2caps.get('changegroup')
884 if cgversions: # 3.1 and 3.2 ship with an empty value
884 if cgversions: # 3.1 and 3.2 ship with an empty value
885 cgversions = [v for v in cgversions
885 cgversions = [v for v in cgversions
886 if v in changegroup.supportedoutgoingversions(
886 if v in changegroup.supportedoutgoingversions(
887 pushop.repo)]
887 pushop.repo)]
888 if not cgversions:
888 if not cgversions:
889 raise ValueError(_('no common changegroup version'))
889 raise ValueError(_('no common changegroup version'))
890 version = max(cgversions)
890 version = max(cgversions)
891 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
891 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
892 'push')
892 'push')
893 cgpart = bundler.newpart('changegroup', data=cgstream)
893 cgpart = bundler.newpart('changegroup', data=cgstream)
894 if cgversions:
894 if cgversions:
895 cgpart.addparam('version', version)
895 cgpart.addparam('version', version)
896 if 'treemanifest' in pushop.repo.requirements:
896 if 'treemanifest' in pushop.repo.requirements:
897 cgpart.addparam('treemanifest', '1')
897 cgpart.addparam('treemanifest', '1')
898 def handlereply(op):
898 def handlereply(op):
899 """extract addchangegroup returns from server reply"""
899 """extract addchangegroup returns from server reply"""
900 cgreplies = op.records.getreplies(cgpart.id)
900 cgreplies = op.records.getreplies(cgpart.id)
901 assert len(cgreplies['changegroup']) == 1
901 assert len(cgreplies['changegroup']) == 1
902 pushop.cgresult = cgreplies['changegroup'][0]['return']
902 pushop.cgresult = cgreplies['changegroup'][0]['return']
903 return handlereply
903 return handlereply
904
904
905 @b2partsgenerator('phase')
905 @b2partsgenerator('phase')
906 def _pushb2phases(pushop, bundler):
906 def _pushb2phases(pushop, bundler):
907 """handle phase push through bundle2"""
907 """handle phase push through bundle2"""
908 if 'phases' in pushop.stepsdone:
908 if 'phases' in pushop.stepsdone:
909 return
909 return
910 b2caps = bundle2.bundle2caps(pushop.remote)
910 b2caps = bundle2.bundle2caps(pushop.remote)
911 ui = pushop.repo.ui
911 ui = pushop.repo.ui
912
912
913 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
913 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
914 haspushkey = 'pushkey' in b2caps
914 haspushkey = 'pushkey' in b2caps
915 hasphaseheads = 'heads' in b2caps.get('phases', ())
915 hasphaseheads = 'heads' in b2caps.get('phases', ())
916
916
917 if hasphaseheads and not legacyphase:
917 if hasphaseheads and not legacyphase:
918 return _pushb2phaseheads(pushop, bundler)
918 return _pushb2phaseheads(pushop, bundler)
919 elif haspushkey:
919 elif haspushkey:
920 return _pushb2phasespushkey(pushop, bundler)
920 return _pushb2phasespushkey(pushop, bundler)
921
921
922 def _pushb2phaseheads(pushop, bundler):
922 def _pushb2phaseheads(pushop, bundler):
923 """push phase information through a bundle2 - binary part"""
923 """push phase information through a bundle2 - binary part"""
924 pushop.stepsdone.add('phases')
924 pushop.stepsdone.add('phases')
925 if pushop.outdatedphases:
925 if pushop.outdatedphases:
926 updates = [[] for p in phases.allphases]
926 updates = [[] for p in phases.allphases]
927 updates[0].extend(h.node() for h in pushop.outdatedphases)
927 updates[0].extend(h.node() for h in pushop.outdatedphases)
928 phasedata = phases.binaryencode(updates)
928 phasedata = phases.binaryencode(updates)
929 bundler.newpart('phase-heads', data=phasedata)
929 bundler.newpart('phase-heads', data=phasedata)
930
930
931 def _pushb2phasespushkey(pushop, bundler):
931 def _pushb2phasespushkey(pushop, bundler):
932 """push phase information through a bundle2 - pushkey part"""
932 """push phase information through a bundle2 - pushkey part"""
933 pushop.stepsdone.add('phases')
933 pushop.stepsdone.add('phases')
934 part2node = []
934 part2node = []
935
935
936 def handlefailure(pushop, exc):
936 def handlefailure(pushop, exc):
937 targetid = int(exc.partid)
937 targetid = int(exc.partid)
938 for partid, node in part2node:
938 for partid, node in part2node:
939 if partid == targetid:
939 if partid == targetid:
940 raise error.Abort(_('updating %s to public failed') % node)
940 raise error.Abort(_('updating %s to public failed') % node)
941
941
942 enc = pushkey.encode
942 enc = pushkey.encode
943 for newremotehead in pushop.outdatedphases:
943 for newremotehead in pushop.outdatedphases:
944 part = bundler.newpart('pushkey')
944 part = bundler.newpart('pushkey')
945 part.addparam('namespace', enc('phases'))
945 part.addparam('namespace', enc('phases'))
946 part.addparam('key', enc(newremotehead.hex()))
946 part.addparam('key', enc(newremotehead.hex()))
947 part.addparam('old', enc('%d' % phases.draft))
947 part.addparam('old', enc('%d' % phases.draft))
948 part.addparam('new', enc('%d' % phases.public))
948 part.addparam('new', enc('%d' % phases.public))
949 part2node.append((part.id, newremotehead))
949 part2node.append((part.id, newremotehead))
950 pushop.pkfailcb[part.id] = handlefailure
950 pushop.pkfailcb[part.id] = handlefailure
951
951
952 def handlereply(op):
952 def handlereply(op):
953 for partid, node in part2node:
953 for partid, node in part2node:
954 partrep = op.records.getreplies(partid)
954 partrep = op.records.getreplies(partid)
955 results = partrep['pushkey']
955 results = partrep['pushkey']
956 assert len(results) <= 1
956 assert len(results) <= 1
957 msg = None
957 msg = None
958 if not results:
958 if not results:
959 msg = _('server ignored update of %s to public!\n') % node
959 msg = _('server ignored update of %s to public!\n') % node
960 elif not int(results[0]['return']):
960 elif not int(results[0]['return']):
961 msg = _('updating %s to public failed!\n') % node
961 msg = _('updating %s to public failed!\n') % node
962 if msg is not None:
962 if msg is not None:
963 pushop.ui.warn(msg)
963 pushop.ui.warn(msg)
964 return handlereply
964 return handlereply
965
965
966 @b2partsgenerator('obsmarkers')
966 @b2partsgenerator('obsmarkers')
967 def _pushb2obsmarkers(pushop, bundler):
967 def _pushb2obsmarkers(pushop, bundler):
968 if 'obsmarkers' in pushop.stepsdone:
968 if 'obsmarkers' in pushop.stepsdone:
969 return
969 return
970 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
970 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
971 if obsolete.commonversion(remoteversions) is None:
971 if obsolete.commonversion(remoteversions) is None:
972 return
972 return
973 pushop.stepsdone.add('obsmarkers')
973 pushop.stepsdone.add('obsmarkers')
974 if pushop.outobsmarkers:
974 if pushop.outobsmarkers:
975 markers = sorted(pushop.outobsmarkers)
975 markers = sorted(pushop.outobsmarkers)
976 bundle2.buildobsmarkerspart(bundler, markers)
976 bundle2.buildobsmarkerspart(bundler, markers)
977
977
978 @b2partsgenerator('bookmarks')
978 @b2partsgenerator('bookmarks')
979 def _pushb2bookmarks(pushop, bundler):
979 def _pushb2bookmarks(pushop, bundler):
980 """handle bookmark push through bundle2"""
980 """handle bookmark push through bundle2"""
981 if 'bookmarks' in pushop.stepsdone:
981 if 'bookmarks' in pushop.stepsdone:
982 return
982 return
983 b2caps = bundle2.bundle2caps(pushop.remote)
983 b2caps = bundle2.bundle2caps(pushop.remote)
984
984
985 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
985 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
986 legacybooks = 'bookmarks' in legacy
986 legacybooks = 'bookmarks' in legacy
987
987
988 if not legacybooks and 'bookmarks' in b2caps:
988 if not legacybooks and 'bookmarks' in b2caps:
989 return _pushb2bookmarkspart(pushop, bundler)
989 return _pushb2bookmarkspart(pushop, bundler)
990 elif 'pushkey' in b2caps:
990 elif 'pushkey' in b2caps:
991 return _pushb2bookmarkspushkey(pushop, bundler)
991 return _pushb2bookmarkspushkey(pushop, bundler)
992
992
993 def _bmaction(old, new):
993 def _bmaction(old, new):
994 """small utility for bookmark pushing"""
994 """small utility for bookmark pushing"""
995 if not old:
995 if not old:
996 return 'export'
996 return 'export'
997 elif not new:
997 elif not new:
998 return 'delete'
998 return 'delete'
999 return 'update'
999 return 'update'
1000
1000
1001 def _pushb2bookmarkspart(pushop, bundler):
1001 def _pushb2bookmarkspart(pushop, bundler):
1002 pushop.stepsdone.add('bookmarks')
1002 pushop.stepsdone.add('bookmarks')
1003 if not pushop.outbookmarks:
1003 if not pushop.outbookmarks:
1004 return
1004 return
1005
1005
1006 allactions = []
1006 allactions = []
1007 data = []
1007 data = []
1008 for book, old, new in pushop.outbookmarks:
1008 for book, old, new in pushop.outbookmarks:
1009 new = bin(new)
1009 new = bin(new)
1010 data.append((book, new))
1010 data.append((book, new))
1011 allactions.append((book, _bmaction(old, new)))
1011 allactions.append((book, _bmaction(old, new)))
1012 checkdata = bookmod.binaryencode(data)
1012 checkdata = bookmod.binaryencode(data)
1013 bundler.newpart('bookmarks', data=checkdata)
1013 bundler.newpart('bookmarks', data=checkdata)
1014
1014
1015 def handlereply(op):
1015 def handlereply(op):
1016 ui = pushop.ui
1016 ui = pushop.ui
1017 # if success
1017 # if success
1018 for book, action in allactions:
1018 for book, action in allactions:
1019 ui.status(bookmsgmap[action][0] % book)
1019 ui.status(bookmsgmap[action][0] % book)
1020
1020
1021 return handlereply
1021 return handlereply
1022
1022
1023 def _pushb2bookmarkspushkey(pushop, bundler):
1023 def _pushb2bookmarkspushkey(pushop, bundler):
1024 pushop.stepsdone.add('bookmarks')
1024 pushop.stepsdone.add('bookmarks')
1025 part2book = []
1025 part2book = []
1026 enc = pushkey.encode
1026 enc = pushkey.encode
1027
1027
1028 def handlefailure(pushop, exc):
1028 def handlefailure(pushop, exc):
1029 targetid = int(exc.partid)
1029 targetid = int(exc.partid)
1030 for partid, book, action in part2book:
1030 for partid, book, action in part2book:
1031 if partid == targetid:
1031 if partid == targetid:
1032 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1032 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1033 # we should not be called for part we did not generated
1033 # we should not be called for part we did not generated
1034 assert False
1034 assert False
1035
1035
1036 for book, old, new in pushop.outbookmarks:
1036 for book, old, new in pushop.outbookmarks:
1037 part = bundler.newpart('pushkey')
1037 part = bundler.newpart('pushkey')
1038 part.addparam('namespace', enc('bookmarks'))
1038 part.addparam('namespace', enc('bookmarks'))
1039 part.addparam('key', enc(book))
1039 part.addparam('key', enc(book))
1040 part.addparam('old', enc(old))
1040 part.addparam('old', enc(old))
1041 part.addparam('new', enc(new))
1041 part.addparam('new', enc(new))
1042 action = 'update'
1042 action = 'update'
1043 if not old:
1043 if not old:
1044 action = 'export'
1044 action = 'export'
1045 elif not new:
1045 elif not new:
1046 action = 'delete'
1046 action = 'delete'
1047 part2book.append((part.id, book, action))
1047 part2book.append((part.id, book, action))
1048 pushop.pkfailcb[part.id] = handlefailure
1048 pushop.pkfailcb[part.id] = handlefailure
1049
1049
1050 def handlereply(op):
1050 def handlereply(op):
1051 ui = pushop.ui
1051 ui = pushop.ui
1052 for partid, book, action in part2book:
1052 for partid, book, action in part2book:
1053 partrep = op.records.getreplies(partid)
1053 partrep = op.records.getreplies(partid)
1054 results = partrep['pushkey']
1054 results = partrep['pushkey']
1055 assert len(results) <= 1
1055 assert len(results) <= 1
1056 if not results:
1056 if not results:
1057 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
1057 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
1058 else:
1058 else:
1059 ret = int(results[0]['return'])
1059 ret = int(results[0]['return'])
1060 if ret:
1060 if ret:
1061 ui.status(bookmsgmap[action][0] % book)
1061 ui.status(bookmsgmap[action][0] % book)
1062 else:
1062 else:
1063 ui.warn(bookmsgmap[action][1] % book)
1063 ui.warn(bookmsgmap[action][1] % book)
1064 if pushop.bkresult is not None:
1064 if pushop.bkresult is not None:
1065 pushop.bkresult = 1
1065 pushop.bkresult = 1
1066 return handlereply
1066 return handlereply
1067
1067
1068 @b2partsgenerator('pushvars', idx=0)
1068 @b2partsgenerator('pushvars', idx=0)
1069 def _getbundlesendvars(pushop, bundler):
1069 def _getbundlesendvars(pushop, bundler):
1070 '''send shellvars via bundle2'''
1070 '''send shellvars via bundle2'''
1071 pushvars = pushop.pushvars
1071 pushvars = pushop.pushvars
1072 if pushvars:
1072 if pushvars:
1073 shellvars = {}
1073 shellvars = {}
1074 for raw in pushvars:
1074 for raw in pushvars:
1075 if '=' not in raw:
1075 if '=' not in raw:
1076 msg = ("unable to parse variable '%s', should follow "
1076 msg = ("unable to parse variable '%s', should follow "
1077 "'KEY=VALUE' or 'KEY=' format")
1077 "'KEY=VALUE' or 'KEY=' format")
1078 raise error.Abort(msg % raw)
1078 raise error.Abort(msg % raw)
1079 k, v = raw.split('=', 1)
1079 k, v = raw.split('=', 1)
1080 shellvars[k] = v
1080 shellvars[k] = v
1081
1081
1082 part = bundler.newpart('pushvars')
1082 part = bundler.newpart('pushvars')
1083
1083
1084 for key, value in shellvars.iteritems():
1084 for key, value in shellvars.iteritems():
1085 part.addparam(key, value, mandatory=False)
1085 part.addparam(key, value, mandatory=False)
1086
1086
1087 def _pushbundle2(pushop):
1087 def _pushbundle2(pushop):
1088 """push data to the remote using bundle2
1088 """push data to the remote using bundle2
1089
1089
1090 The only currently supported type of data is changegroup but this will
1090 The only currently supported type of data is changegroup but this will
1091 evolve in the future."""
1091 evolve in the future."""
1092 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1092 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1093 pushback = (pushop.trmanager
1093 pushback = (pushop.trmanager
1094 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1094 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1095
1095
1096 # create reply capability
1096 # create reply capability
1097 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1097 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1098 allowpushback=pushback,
1098 allowpushback=pushback,
1099 role='client'))
1099 role='client'))
1100 bundler.newpart('replycaps', data=capsblob)
1100 bundler.newpart('replycaps', data=capsblob)
1101 replyhandlers = []
1101 replyhandlers = []
1102 for partgenname in b2partsgenorder:
1102 for partgenname in b2partsgenorder:
1103 partgen = b2partsgenmapping[partgenname]
1103 partgen = b2partsgenmapping[partgenname]
1104 ret = partgen(pushop, bundler)
1104 ret = partgen(pushop, bundler)
1105 if callable(ret):
1105 if callable(ret):
1106 replyhandlers.append(ret)
1106 replyhandlers.append(ret)
1107 # do not push if nothing to push
1107 # do not push if nothing to push
1108 if bundler.nbparts <= 1:
1108 if bundler.nbparts <= 1:
1109 return
1109 return
1110 stream = util.chunkbuffer(bundler.getchunks())
1110 stream = util.chunkbuffer(bundler.getchunks())
1111 try:
1111 try:
1112 try:
1112 try:
1113 with pushop.remote.commandexecutor() as e:
1113 with pushop.remote.commandexecutor() as e:
1114 reply = e.callcommand('unbundle', {
1114 reply = e.callcommand('unbundle', {
1115 'bundle': stream,
1115 'bundle': stream,
1116 'heads': ['force'],
1116 'heads': ['force'],
1117 'url': pushop.remote.url(),
1117 'url': pushop.remote.url(),
1118 }).result()
1118 }).result()
1119 except error.BundleValueError as exc:
1119 except error.BundleValueError as exc:
1120 raise error.Abort(_('missing support for %s') % exc)
1120 raise error.Abort(_('missing support for %s') % exc)
1121 try:
1121 try:
1122 trgetter = None
1122 trgetter = None
1123 if pushback:
1123 if pushback:
1124 trgetter = pushop.trmanager.transaction
1124 trgetter = pushop.trmanager.transaction
1125 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1125 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1126 except error.BundleValueError as exc:
1126 except error.BundleValueError as exc:
1127 raise error.Abort(_('missing support for %s') % exc)
1127 raise error.Abort(_('missing support for %s') % exc)
1128 except bundle2.AbortFromPart as exc:
1128 except bundle2.AbortFromPart as exc:
1129 pushop.ui.status(_('remote: %s\n') % exc)
1129 pushop.ui.status(_('remote: %s\n') % exc)
1130 if exc.hint is not None:
1130 if exc.hint is not None:
1131 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1131 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1132 raise error.Abort(_('push failed on remote'))
1132 raise error.Abort(_('push failed on remote'))
1133 except error.PushkeyFailed as exc:
1133 except error.PushkeyFailed as exc:
1134 partid = int(exc.partid)
1134 partid = int(exc.partid)
1135 if partid not in pushop.pkfailcb:
1135 if partid not in pushop.pkfailcb:
1136 raise
1136 raise
1137 pushop.pkfailcb[partid](pushop, exc)
1137 pushop.pkfailcb[partid](pushop, exc)
1138 for rephand in replyhandlers:
1138 for rephand in replyhandlers:
1139 rephand(op)
1139 rephand(op)
1140
1140
1141 def _pushchangeset(pushop):
1141 def _pushchangeset(pushop):
1142 """Make the actual push of changeset bundle to remote repo"""
1142 """Make the actual push of changeset bundle to remote repo"""
1143 if 'changesets' in pushop.stepsdone:
1143 if 'changesets' in pushop.stepsdone:
1144 return
1144 return
1145 pushop.stepsdone.add('changesets')
1145 pushop.stepsdone.add('changesets')
1146 if not _pushcheckoutgoing(pushop):
1146 if not _pushcheckoutgoing(pushop):
1147 return
1147 return
1148
1148
1149 # Should have verified this in push().
1149 # Should have verified this in push().
1150 assert pushop.remote.capable('unbundle')
1150 assert pushop.remote.capable('unbundle')
1151
1151
1152 pushop.repo.prepushoutgoinghooks(pushop)
1152 pushop.repo.prepushoutgoinghooks(pushop)
1153 outgoing = pushop.outgoing
1153 outgoing = pushop.outgoing
1154 # TODO: get bundlecaps from remote
1154 # TODO: get bundlecaps from remote
1155 bundlecaps = None
1155 bundlecaps = None
1156 # create a changegroup from local
1156 # create a changegroup from local
1157 if pushop.revs is None and not (outgoing.excluded
1157 if pushop.revs is None and not (outgoing.excluded
1158 or pushop.repo.changelog.filteredrevs):
1158 or pushop.repo.changelog.filteredrevs):
1159 # push everything,
1159 # push everything,
1160 # use the fast path, no race possible on push
1160 # use the fast path, no race possible on push
1161 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1161 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1162 fastpath=True, bundlecaps=bundlecaps)
1162 fastpath=True, bundlecaps=bundlecaps)
1163 else:
1163 else:
1164 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1164 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1165 'push', bundlecaps=bundlecaps)
1165 'push', bundlecaps=bundlecaps)
1166
1166
1167 # apply changegroup to remote
1167 # apply changegroup to remote
1168 # local repo finds heads on server, finds out what
1168 # local repo finds heads on server, finds out what
1169 # revs it must push. once revs transferred, if server
1169 # revs it must push. once revs transferred, if server
1170 # finds it has different heads (someone else won
1170 # finds it has different heads (someone else won
1171 # commit/push race), server aborts.
1171 # commit/push race), server aborts.
1172 if pushop.force:
1172 if pushop.force:
1173 remoteheads = ['force']
1173 remoteheads = ['force']
1174 else:
1174 else:
1175 remoteheads = pushop.remoteheads
1175 remoteheads = pushop.remoteheads
1176 # ssh: return remote's addchangegroup()
1176 # ssh: return remote's addchangegroup()
1177 # http: return remote's addchangegroup() or 0 for error
1177 # http: return remote's addchangegroup() or 0 for error
1178 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1178 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1179 pushop.repo.url())
1179 pushop.repo.url())
1180
1180
1181 def _pushsyncphase(pushop):
1181 def _pushsyncphase(pushop):
1182 """synchronise phase information locally and remotely"""
1182 """synchronise phase information locally and remotely"""
1183 cheads = pushop.commonheads
1183 cheads = pushop.commonheads
1184 # even when we don't push, exchanging phase data is useful
1184 # even when we don't push, exchanging phase data is useful
1185 remotephases = listkeys(pushop.remote, 'phases')
1185 remotephases = listkeys(pushop.remote, 'phases')
1186 if (pushop.ui.configbool('ui', '_usedassubrepo')
1186 if (pushop.ui.configbool('ui', '_usedassubrepo')
1187 and remotephases # server supports phases
1187 and remotephases # server supports phases
1188 and pushop.cgresult is None # nothing was pushed
1188 and pushop.cgresult is None # nothing was pushed
1189 and remotephases.get('publishing', False)):
1189 and remotephases.get('publishing', False)):
1190 # When:
1190 # When:
1191 # - this is a subrepo push
1191 # - this is a subrepo push
1192 # - and remote support phase
1192 # - and remote support phase
1193 # - and no changeset was pushed
1193 # - and no changeset was pushed
1194 # - and remote is publishing
1194 # - and remote is publishing
1195 # We may be in issue 3871 case!
1195 # We may be in issue 3871 case!
1196 # We drop the possible phase synchronisation done by
1196 # We drop the possible phase synchronisation done by
1197 # courtesy to publish changesets possibly locally draft
1197 # courtesy to publish changesets possibly locally draft
1198 # on the remote.
1198 # on the remote.
1199 remotephases = {'publishing': 'True'}
1199 remotephases = {'publishing': 'True'}
1200 if not remotephases: # old server or public only reply from non-publishing
1200 if not remotephases: # old server or public only reply from non-publishing
1201 _localphasemove(pushop, cheads)
1201 _localphasemove(pushop, cheads)
1202 # don't push any phase data as there is nothing to push
1202 # don't push any phase data as there is nothing to push
1203 else:
1203 else:
1204 ana = phases.analyzeremotephases(pushop.repo, cheads,
1204 ana = phases.analyzeremotephases(pushop.repo, cheads,
1205 remotephases)
1205 remotephases)
1206 pheads, droots = ana
1206 pheads, droots = ana
1207 ### Apply remote phase on local
1207 ### Apply remote phase on local
1208 if remotephases.get('publishing', False):
1208 if remotephases.get('publishing', False):
1209 _localphasemove(pushop, cheads)
1209 _localphasemove(pushop, cheads)
1210 else: # publish = False
1210 else: # publish = False
1211 _localphasemove(pushop, pheads)
1211 _localphasemove(pushop, pheads)
1212 _localphasemove(pushop, cheads, phases.draft)
1212 _localphasemove(pushop, cheads, phases.draft)
1213 ### Apply local phase on remote
1213 ### Apply local phase on remote
1214
1214
1215 if pushop.cgresult:
1215 if pushop.cgresult:
1216 if 'phases' in pushop.stepsdone:
1216 if 'phases' in pushop.stepsdone:
1217 # phases already pushed though bundle2
1217 # phases already pushed though bundle2
1218 return
1218 return
1219 outdated = pushop.outdatedphases
1219 outdated = pushop.outdatedphases
1220 else:
1220 else:
1221 outdated = pushop.fallbackoutdatedphases
1221 outdated = pushop.fallbackoutdatedphases
1222
1222
1223 pushop.stepsdone.add('phases')
1223 pushop.stepsdone.add('phases')
1224
1224
1225 # filter heads already turned public by the push
1225 # filter heads already turned public by the push
1226 outdated = [c for c in outdated if c.node() not in pheads]
1226 outdated = [c for c in outdated if c.node() not in pheads]
1227 # fallback to independent pushkey command
1227 # fallback to independent pushkey command
1228 for newremotehead in outdated:
1228 for newremotehead in outdated:
1229 with pushop.remote.commandexecutor() as e:
1229 with pushop.remote.commandexecutor() as e:
1230 r = e.callcommand('pushkey', {
1230 r = e.callcommand('pushkey', {
1231 'namespace': 'phases',
1231 'namespace': 'phases',
1232 'key': newremotehead.hex(),
1232 'key': newremotehead.hex(),
1233 'old': '%d' % phases.draft,
1233 'old': '%d' % phases.draft,
1234 'new': '%d' % phases.public
1234 'new': '%d' % phases.public
1235 }).result()
1235 }).result()
1236
1236
1237 if not r:
1237 if not r:
1238 pushop.ui.warn(_('updating %s to public failed!\n')
1238 pushop.ui.warn(_('updating %s to public failed!\n')
1239 % newremotehead)
1239 % newremotehead)
1240
1240
1241 def _localphasemove(pushop, nodes, phase=phases.public):
1241 def _localphasemove(pushop, nodes, phase=phases.public):
1242 """move <nodes> to <phase> in the local source repo"""
1242 """move <nodes> to <phase> in the local source repo"""
1243 if pushop.trmanager:
1243 if pushop.trmanager:
1244 phases.advanceboundary(pushop.repo,
1244 phases.advanceboundary(pushop.repo,
1245 pushop.trmanager.transaction(),
1245 pushop.trmanager.transaction(),
1246 phase,
1246 phase,
1247 nodes)
1247 nodes)
1248 else:
1248 else:
1249 # repo is not locked, do not change any phases!
1249 # repo is not locked, do not change any phases!
1250 # Informs the user that phases should have been moved when
1250 # Informs the user that phases should have been moved when
1251 # applicable.
1251 # applicable.
1252 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1252 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1253 phasestr = phases.phasenames[phase]
1253 phasestr = phases.phasenames[phase]
1254 if actualmoves:
1254 if actualmoves:
1255 pushop.ui.status(_('cannot lock source repo, skipping '
1255 pushop.ui.status(_('cannot lock source repo, skipping '
1256 'local %s phase update\n') % phasestr)
1256 'local %s phase update\n') % phasestr)
1257
1257
1258 def _pushobsolete(pushop):
1258 def _pushobsolete(pushop):
1259 """utility function to push obsolete markers to a remote"""
1259 """utility function to push obsolete markers to a remote"""
1260 if 'obsmarkers' in pushop.stepsdone:
1260 if 'obsmarkers' in pushop.stepsdone:
1261 return
1261 return
1262 repo = pushop.repo
1262 repo = pushop.repo
1263 remote = pushop.remote
1263 remote = pushop.remote
1264 pushop.stepsdone.add('obsmarkers')
1264 pushop.stepsdone.add('obsmarkers')
1265 if pushop.outobsmarkers:
1265 if pushop.outobsmarkers:
1266 pushop.ui.debug('try to push obsolete markers to remote\n')
1266 pushop.ui.debug('try to push obsolete markers to remote\n')
1267 rslts = []
1267 rslts = []
1268 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1268 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1269 for key in sorted(remotedata, reverse=True):
1269 for key in sorted(remotedata, reverse=True):
1270 # reverse sort to ensure we end with dump0
1270 # reverse sort to ensure we end with dump0
1271 data = remotedata[key]
1271 data = remotedata[key]
1272 rslts.append(remote.pushkey('obsolete', key, '', data))
1272 rslts.append(remote.pushkey('obsolete', key, '', data))
1273 if [r for r in rslts if not r]:
1273 if [r for r in rslts if not r]:
1274 msg = _('failed to push some obsolete markers!\n')
1274 msg = _('failed to push some obsolete markers!\n')
1275 repo.ui.warn(msg)
1275 repo.ui.warn(msg)
1276
1276
1277 def _pushbookmark(pushop):
1277 def _pushbookmark(pushop):
1278 """Update bookmark position on remote"""
1278 """Update bookmark position on remote"""
1279 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1279 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1280 return
1280 return
1281 pushop.stepsdone.add('bookmarks')
1281 pushop.stepsdone.add('bookmarks')
1282 ui = pushop.ui
1282 ui = pushop.ui
1283 remote = pushop.remote
1283 remote = pushop.remote
1284
1284
1285 for b, old, new in pushop.outbookmarks:
1285 for b, old, new in pushop.outbookmarks:
1286 action = 'update'
1286 action = 'update'
1287 if not old:
1287 if not old:
1288 action = 'export'
1288 action = 'export'
1289 elif not new:
1289 elif not new:
1290 action = 'delete'
1290 action = 'delete'
1291
1291
1292 with remote.commandexecutor() as e:
1292 with remote.commandexecutor() as e:
1293 r = e.callcommand('pushkey', {
1293 r = e.callcommand('pushkey', {
1294 'namespace': 'bookmarks',
1294 'namespace': 'bookmarks',
1295 'key': b,
1295 'key': b,
1296 'old': old,
1296 'old': old,
1297 'new': new,
1297 'new': new,
1298 }).result()
1298 }).result()
1299
1299
1300 if r:
1300 if r:
1301 ui.status(bookmsgmap[action][0] % b)
1301 ui.status(bookmsgmap[action][0] % b)
1302 else:
1302 else:
1303 ui.warn(bookmsgmap[action][1] % b)
1303 ui.warn(bookmsgmap[action][1] % b)
1304 # discovery can have set the value form invalid entry
1304 # discovery can have set the value form invalid entry
1305 if pushop.bkresult is not None:
1305 if pushop.bkresult is not None:
1306 pushop.bkresult = 1
1306 pushop.bkresult = 1
1307
1307
1308 class pulloperation(object):
1308 class pulloperation(object):
1309 """A object that represent a single pull operation
1309 """A object that represent a single pull operation
1310
1310
1311 It purpose is to carry pull related state and very common operation.
1311 It purpose is to carry pull related state and very common operation.
1312
1312
1313 A new should be created at the beginning of each pull and discarded
1313 A new should be created at the beginning of each pull and discarded
1314 afterward.
1314 afterward.
1315 """
1315 """
1316
1316
1317 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1317 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1318 remotebookmarks=None, streamclonerequested=None,
1318 remotebookmarks=None, streamclonerequested=None,
1319 includepats=None, excludepats=None):
1319 includepats=None, excludepats=None, depth=None):
1320 # repo we pull into
1320 # repo we pull into
1321 self.repo = repo
1321 self.repo = repo
1322 # repo we pull from
1322 # repo we pull from
1323 self.remote = remote
1323 self.remote = remote
1324 # revision we try to pull (None is "all")
1324 # revision we try to pull (None is "all")
1325 self.heads = heads
1325 self.heads = heads
1326 # bookmark pulled explicitly
1326 # bookmark pulled explicitly
1327 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1327 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1328 for bookmark in bookmarks]
1328 for bookmark in bookmarks]
1329 # do we force pull?
1329 # do we force pull?
1330 self.force = force
1330 self.force = force
1331 # whether a streaming clone was requested
1331 # whether a streaming clone was requested
1332 self.streamclonerequested = streamclonerequested
1332 self.streamclonerequested = streamclonerequested
1333 # transaction manager
1333 # transaction manager
1334 self.trmanager = None
1334 self.trmanager = None
1335 # set of common changeset between local and remote before pull
1335 # set of common changeset between local and remote before pull
1336 self.common = None
1336 self.common = None
1337 # set of pulled head
1337 # set of pulled head
1338 self.rheads = None
1338 self.rheads = None
1339 # list of missing changeset to fetch remotely
1339 # list of missing changeset to fetch remotely
1340 self.fetch = None
1340 self.fetch = None
1341 # remote bookmarks data
1341 # remote bookmarks data
1342 self.remotebookmarks = remotebookmarks
1342 self.remotebookmarks = remotebookmarks
1343 # result of changegroup pulling (used as return code by pull)
1343 # result of changegroup pulling (used as return code by pull)
1344 self.cgresult = None
1344 self.cgresult = None
1345 # list of step already done
1345 # list of step already done
1346 self.stepsdone = set()
1346 self.stepsdone = set()
1347 # Whether we attempted a clone from pre-generated bundles.
1347 # Whether we attempted a clone from pre-generated bundles.
1348 self.clonebundleattempted = False
1348 self.clonebundleattempted = False
1349 # Set of file patterns to include.
1349 # Set of file patterns to include.
1350 self.includepats = includepats
1350 self.includepats = includepats
1351 # Set of file patterns to exclude.
1351 # Set of file patterns to exclude.
1352 self.excludepats = excludepats
1352 self.excludepats = excludepats
1353 # Number of ancestor changesets to pull from each pulled head.
1354 self.depth = depth
1353
1355
1354 @util.propertycache
1356 @util.propertycache
1355 def pulledsubset(self):
1357 def pulledsubset(self):
1356 """heads of the set of changeset target by the pull"""
1358 """heads of the set of changeset target by the pull"""
1357 # compute target subset
1359 # compute target subset
1358 if self.heads is None:
1360 if self.heads is None:
1359 # We pulled every thing possible
1361 # We pulled every thing possible
1360 # sync on everything common
1362 # sync on everything common
1361 c = set(self.common)
1363 c = set(self.common)
1362 ret = list(self.common)
1364 ret = list(self.common)
1363 for n in self.rheads:
1365 for n in self.rheads:
1364 if n not in c:
1366 if n not in c:
1365 ret.append(n)
1367 ret.append(n)
1366 return ret
1368 return ret
1367 else:
1369 else:
1368 # We pulled a specific subset
1370 # We pulled a specific subset
1369 # sync on this subset
1371 # sync on this subset
1370 return self.heads
1372 return self.heads
1371
1373
1372 @util.propertycache
1374 @util.propertycache
1373 def canusebundle2(self):
1375 def canusebundle2(self):
1374 return not _forcebundle1(self)
1376 return not _forcebundle1(self)
1375
1377
1376 @util.propertycache
1378 @util.propertycache
1377 def remotebundle2caps(self):
1379 def remotebundle2caps(self):
1378 return bundle2.bundle2caps(self.remote)
1380 return bundle2.bundle2caps(self.remote)
1379
1381
1380 def gettransaction(self):
1382 def gettransaction(self):
1381 # deprecated; talk to trmanager directly
1383 # deprecated; talk to trmanager directly
1382 return self.trmanager.transaction()
1384 return self.trmanager.transaction()
1383
1385
1384 class transactionmanager(util.transactional):
1386 class transactionmanager(util.transactional):
1385 """An object to manage the life cycle of a transaction
1387 """An object to manage the life cycle of a transaction
1386
1388
1387 It creates the transaction on demand and calls the appropriate hooks when
1389 It creates the transaction on demand and calls the appropriate hooks when
1388 closing the transaction."""
1390 closing the transaction."""
1389 def __init__(self, repo, source, url):
1391 def __init__(self, repo, source, url):
1390 self.repo = repo
1392 self.repo = repo
1391 self.source = source
1393 self.source = source
1392 self.url = url
1394 self.url = url
1393 self._tr = None
1395 self._tr = None
1394
1396
1395 def transaction(self):
1397 def transaction(self):
1396 """Return an open transaction object, constructing if necessary"""
1398 """Return an open transaction object, constructing if necessary"""
1397 if not self._tr:
1399 if not self._tr:
1398 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1400 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1399 self._tr = self.repo.transaction(trname)
1401 self._tr = self.repo.transaction(trname)
1400 self._tr.hookargs['source'] = self.source
1402 self._tr.hookargs['source'] = self.source
1401 self._tr.hookargs['url'] = self.url
1403 self._tr.hookargs['url'] = self.url
1402 return self._tr
1404 return self._tr
1403
1405
1404 def close(self):
1406 def close(self):
1405 """close transaction if created"""
1407 """close transaction if created"""
1406 if self._tr is not None:
1408 if self._tr is not None:
1407 self._tr.close()
1409 self._tr.close()
1408
1410
1409 def release(self):
1411 def release(self):
1410 """release transaction if created"""
1412 """release transaction if created"""
1411 if self._tr is not None:
1413 if self._tr is not None:
1412 self._tr.release()
1414 self._tr.release()
1413
1415
1414 def listkeys(remote, namespace):
1416 def listkeys(remote, namespace):
1415 with remote.commandexecutor() as e:
1417 with remote.commandexecutor() as e:
1416 return e.callcommand('listkeys', {'namespace': namespace}).result()
1418 return e.callcommand('listkeys', {'namespace': namespace}).result()
1417
1419
1418 def _fullpullbundle2(repo, pullop):
1420 def _fullpullbundle2(repo, pullop):
1419 # The server may send a partial reply, i.e. when inlining
1421 # The server may send a partial reply, i.e. when inlining
1420 # pre-computed bundles. In that case, update the common
1422 # pre-computed bundles. In that case, update the common
1421 # set based on the results and pull another bundle.
1423 # set based on the results and pull another bundle.
1422 #
1424 #
1423 # There are two indicators that the process is finished:
1425 # There are two indicators that the process is finished:
1424 # - no changeset has been added, or
1426 # - no changeset has been added, or
1425 # - all remote heads are known locally.
1427 # - all remote heads are known locally.
1426 # The head check must use the unfiltered view as obsoletion
1428 # The head check must use the unfiltered view as obsoletion
1427 # markers can hide heads.
1429 # markers can hide heads.
1428 unfi = repo.unfiltered()
1430 unfi = repo.unfiltered()
1429 unficl = unfi.changelog
1431 unficl = unfi.changelog
1430 def headsofdiff(h1, h2):
1432 def headsofdiff(h1, h2):
1431 """Returns heads(h1 % h2)"""
1433 """Returns heads(h1 % h2)"""
1432 res = unfi.set('heads(%ln %% %ln)', h1, h2)
1434 res = unfi.set('heads(%ln %% %ln)', h1, h2)
1433 return set(ctx.node() for ctx in res)
1435 return set(ctx.node() for ctx in res)
1434 def headsofunion(h1, h2):
1436 def headsofunion(h1, h2):
1435 """Returns heads((h1 + h2) - null)"""
1437 """Returns heads((h1 + h2) - null)"""
1436 res = unfi.set('heads((%ln + %ln - null))', h1, h2)
1438 res = unfi.set('heads((%ln + %ln - null))', h1, h2)
1437 return set(ctx.node() for ctx in res)
1439 return set(ctx.node() for ctx in res)
1438 while True:
1440 while True:
1439 old_heads = unficl.heads()
1441 old_heads = unficl.heads()
1440 clstart = len(unficl)
1442 clstart = len(unficl)
1441 _pullbundle2(pullop)
1443 _pullbundle2(pullop)
1442 if repository.NARROW_REQUIREMENT in repo.requirements:
1444 if repository.NARROW_REQUIREMENT in repo.requirements:
1443 # XXX narrow clones filter the heads on the server side during
1445 # XXX narrow clones filter the heads on the server side during
1444 # XXX getbundle and result in partial replies as well.
1446 # XXX getbundle and result in partial replies as well.
1445 # XXX Disable pull bundles in this case as band aid to avoid
1447 # XXX Disable pull bundles in this case as band aid to avoid
1446 # XXX extra round trips.
1448 # XXX extra round trips.
1447 break
1449 break
1448 if clstart == len(unficl):
1450 if clstart == len(unficl):
1449 break
1451 break
1450 if all(unficl.hasnode(n) for n in pullop.rheads):
1452 if all(unficl.hasnode(n) for n in pullop.rheads):
1451 break
1453 break
1452 new_heads = headsofdiff(unficl.heads(), old_heads)
1454 new_heads = headsofdiff(unficl.heads(), old_heads)
1453 pullop.common = headsofunion(new_heads, pullop.common)
1455 pullop.common = headsofunion(new_heads, pullop.common)
1454 pullop.rheads = set(pullop.rheads) - pullop.common
1456 pullop.rheads = set(pullop.rheads) - pullop.common
1455
1457
1456 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1458 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1457 streamclonerequested=None, includepats=None, excludepats=None):
1459 streamclonerequested=None, includepats=None, excludepats=None,
1460 depth=None):
1458 """Fetch repository data from a remote.
1461 """Fetch repository data from a remote.
1459
1462
1460 This is the main function used to retrieve data from a remote repository.
1463 This is the main function used to retrieve data from a remote repository.
1461
1464
1462 ``repo`` is the local repository to clone into.
1465 ``repo`` is the local repository to clone into.
1463 ``remote`` is a peer instance.
1466 ``remote`` is a peer instance.
1464 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1467 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1465 default) means to pull everything from the remote.
1468 default) means to pull everything from the remote.
1466 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1469 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1467 default, all remote bookmarks are pulled.
1470 default, all remote bookmarks are pulled.
1468 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1471 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1469 initialization.
1472 initialization.
1470 ``streamclonerequested`` is a boolean indicating whether a "streaming
1473 ``streamclonerequested`` is a boolean indicating whether a "streaming
1471 clone" is requested. A "streaming clone" is essentially a raw file copy
1474 clone" is requested. A "streaming clone" is essentially a raw file copy
1472 of revlogs from the server. This only works when the local repository is
1475 of revlogs from the server. This only works when the local repository is
1473 empty. The default value of ``None`` means to respect the server
1476 empty. The default value of ``None`` means to respect the server
1474 configuration for preferring stream clones.
1477 configuration for preferring stream clones.
1475 ``includepats`` and ``excludepats`` define explicit file patterns to
1478 ``includepats`` and ``excludepats`` define explicit file patterns to
1476 include and exclude in storage, respectively. If not defined, narrow
1479 include and exclude in storage, respectively. If not defined, narrow
1477 patterns from the repo instance are used, if available.
1480 patterns from the repo instance are used, if available.
1481 ``depth`` is an integer indicating the DAG depth of history we're
1482 interested in. If defined, for each revision specified in ``heads``, we
1483 will fetch up to this many of its ancestors and data associated with them.
1478
1484
1479 Returns the ``pulloperation`` created for this pull.
1485 Returns the ``pulloperation`` created for this pull.
1480 """
1486 """
1481 if opargs is None:
1487 if opargs is None:
1482 opargs = {}
1488 opargs = {}
1483
1489
1484 # We allow the narrow patterns to be passed in explicitly to provide more
1490 # We allow the narrow patterns to be passed in explicitly to provide more
1485 # flexibility for API consumers.
1491 # flexibility for API consumers.
1486 if includepats or excludepats:
1492 if includepats or excludepats:
1487 includepats = includepats or set()
1493 includepats = includepats or set()
1488 excludepats = excludepats or set()
1494 excludepats = excludepats or set()
1489 else:
1495 else:
1490 includepats, excludepats = repo.narrowpats
1496 includepats, excludepats = repo.narrowpats
1491
1497
1492 narrowspec.validatepatterns(includepats)
1498 narrowspec.validatepatterns(includepats)
1493 narrowspec.validatepatterns(excludepats)
1499 narrowspec.validatepatterns(excludepats)
1494
1500
1495 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1501 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1496 streamclonerequested=streamclonerequested,
1502 streamclonerequested=streamclonerequested,
1497 includepats=includepats, excludepats=excludepats,
1503 includepats=includepats, excludepats=excludepats,
1504 depth=depth,
1498 **pycompat.strkwargs(opargs))
1505 **pycompat.strkwargs(opargs))
1499
1506
1500 peerlocal = pullop.remote.local()
1507 peerlocal = pullop.remote.local()
1501 if peerlocal:
1508 if peerlocal:
1502 missing = set(peerlocal.requirements) - pullop.repo.supported
1509 missing = set(peerlocal.requirements) - pullop.repo.supported
1503 if missing:
1510 if missing:
1504 msg = _("required features are not"
1511 msg = _("required features are not"
1505 " supported in the destination:"
1512 " supported in the destination:"
1506 " %s") % (', '.join(sorted(missing)))
1513 " %s") % (', '.join(sorted(missing)))
1507 raise error.Abort(msg)
1514 raise error.Abort(msg)
1508
1515
1509 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1516 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1510 with repo.wlock(), repo.lock(), pullop.trmanager:
1517 with repo.wlock(), repo.lock(), pullop.trmanager:
1511 # Use the modern wire protocol, if available.
1518 # Use the modern wire protocol, if available.
1512 if remote.capable('command-changesetdata'):
1519 if remote.capable('command-changesetdata'):
1513 exchangev2.pull(pullop)
1520 exchangev2.pull(pullop)
1514 else:
1521 else:
1515 # This should ideally be in _pullbundle2(). However, it needs to run
1522 # This should ideally be in _pullbundle2(). However, it needs to run
1516 # before discovery to avoid extra work.
1523 # before discovery to avoid extra work.
1517 _maybeapplyclonebundle(pullop)
1524 _maybeapplyclonebundle(pullop)
1518 streamclone.maybeperformlegacystreamclone(pullop)
1525 streamclone.maybeperformlegacystreamclone(pullop)
1519 _pulldiscovery(pullop)
1526 _pulldiscovery(pullop)
1520 if pullop.canusebundle2:
1527 if pullop.canusebundle2:
1521 _fullpullbundle2(repo, pullop)
1528 _fullpullbundle2(repo, pullop)
1522 _pullchangeset(pullop)
1529 _pullchangeset(pullop)
1523 _pullphase(pullop)
1530 _pullphase(pullop)
1524 _pullbookmarks(pullop)
1531 _pullbookmarks(pullop)
1525 _pullobsolete(pullop)
1532 _pullobsolete(pullop)
1526
1533
1527 # storing remotenames
1534 # storing remotenames
1528 if repo.ui.configbool('experimental', 'remotenames'):
1535 if repo.ui.configbool('experimental', 'remotenames'):
1529 logexchange.pullremotenames(repo, remote)
1536 logexchange.pullremotenames(repo, remote)
1530
1537
1531 return pullop
1538 return pullop
1532
1539
1533 # list of steps to perform discovery before pull
1540 # list of steps to perform discovery before pull
1534 pulldiscoveryorder = []
1541 pulldiscoveryorder = []
1535
1542
1536 # Mapping between step name and function
1543 # Mapping between step name and function
1537 #
1544 #
1538 # This exists to help extensions wrap steps if necessary
1545 # This exists to help extensions wrap steps if necessary
1539 pulldiscoverymapping = {}
1546 pulldiscoverymapping = {}
1540
1547
1541 def pulldiscovery(stepname):
1548 def pulldiscovery(stepname):
1542 """decorator for function performing discovery before pull
1549 """decorator for function performing discovery before pull
1543
1550
1544 The function is added to the step -> function mapping and appended to the
1551 The function is added to the step -> function mapping and appended to the
1545 list of steps. Beware that decorated function will be added in order (this
1552 list of steps. Beware that decorated function will be added in order (this
1546 may matter).
1553 may matter).
1547
1554
1548 You can only use this decorator for a new step, if you want to wrap a step
1555 You can only use this decorator for a new step, if you want to wrap a step
1549 from an extension, change the pulldiscovery dictionary directly."""
1556 from an extension, change the pulldiscovery dictionary directly."""
1550 def dec(func):
1557 def dec(func):
1551 assert stepname not in pulldiscoverymapping
1558 assert stepname not in pulldiscoverymapping
1552 pulldiscoverymapping[stepname] = func
1559 pulldiscoverymapping[stepname] = func
1553 pulldiscoveryorder.append(stepname)
1560 pulldiscoveryorder.append(stepname)
1554 return func
1561 return func
1555 return dec
1562 return dec
1556
1563
1557 def _pulldiscovery(pullop):
1564 def _pulldiscovery(pullop):
1558 """Run all discovery steps"""
1565 """Run all discovery steps"""
1559 for stepname in pulldiscoveryorder:
1566 for stepname in pulldiscoveryorder:
1560 step = pulldiscoverymapping[stepname]
1567 step = pulldiscoverymapping[stepname]
1561 step(pullop)
1568 step(pullop)
1562
1569
1563 @pulldiscovery('b1:bookmarks')
1570 @pulldiscovery('b1:bookmarks')
1564 def _pullbookmarkbundle1(pullop):
1571 def _pullbookmarkbundle1(pullop):
1565 """fetch bookmark data in bundle1 case
1572 """fetch bookmark data in bundle1 case
1566
1573
1567 If not using bundle2, we have to fetch bookmarks before changeset
1574 If not using bundle2, we have to fetch bookmarks before changeset
1568 discovery to reduce the chance and impact of race conditions."""
1575 discovery to reduce the chance and impact of race conditions."""
1569 if pullop.remotebookmarks is not None:
1576 if pullop.remotebookmarks is not None:
1570 return
1577 return
1571 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1578 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1572 # all known bundle2 servers now support listkeys, but lets be nice with
1579 # all known bundle2 servers now support listkeys, but lets be nice with
1573 # new implementation.
1580 # new implementation.
1574 return
1581 return
1575 books = listkeys(pullop.remote, 'bookmarks')
1582 books = listkeys(pullop.remote, 'bookmarks')
1576 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1583 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1577
1584
1578
1585
1579 @pulldiscovery('changegroup')
1586 @pulldiscovery('changegroup')
1580 def _pulldiscoverychangegroup(pullop):
1587 def _pulldiscoverychangegroup(pullop):
1581 """discovery phase for the pull
1588 """discovery phase for the pull
1582
1589
1583 Current handle changeset discovery only, will change handle all discovery
1590 Current handle changeset discovery only, will change handle all discovery
1584 at some point."""
1591 at some point."""
1585 tmp = discovery.findcommonincoming(pullop.repo,
1592 tmp = discovery.findcommonincoming(pullop.repo,
1586 pullop.remote,
1593 pullop.remote,
1587 heads=pullop.heads,
1594 heads=pullop.heads,
1588 force=pullop.force)
1595 force=pullop.force)
1589 common, fetch, rheads = tmp
1596 common, fetch, rheads = tmp
1590 nm = pullop.repo.unfiltered().changelog.nodemap
1597 nm = pullop.repo.unfiltered().changelog.nodemap
1591 if fetch and rheads:
1598 if fetch and rheads:
1592 # If a remote heads is filtered locally, put in back in common.
1599 # If a remote heads is filtered locally, put in back in common.
1593 #
1600 #
1594 # This is a hackish solution to catch most of "common but locally
1601 # This is a hackish solution to catch most of "common but locally
1595 # hidden situation". We do not performs discovery on unfiltered
1602 # hidden situation". We do not performs discovery on unfiltered
1596 # repository because it end up doing a pathological amount of round
1603 # repository because it end up doing a pathological amount of round
1597 # trip for w huge amount of changeset we do not care about.
1604 # trip for w huge amount of changeset we do not care about.
1598 #
1605 #
1599 # If a set of such "common but filtered" changeset exist on the server
1606 # If a set of such "common but filtered" changeset exist on the server
1600 # but are not including a remote heads, we'll not be able to detect it,
1607 # but are not including a remote heads, we'll not be able to detect it,
1601 scommon = set(common)
1608 scommon = set(common)
1602 for n in rheads:
1609 for n in rheads:
1603 if n in nm:
1610 if n in nm:
1604 if n not in scommon:
1611 if n not in scommon:
1605 common.append(n)
1612 common.append(n)
1606 if set(rheads).issubset(set(common)):
1613 if set(rheads).issubset(set(common)):
1607 fetch = []
1614 fetch = []
1608 pullop.common = common
1615 pullop.common = common
1609 pullop.fetch = fetch
1616 pullop.fetch = fetch
1610 pullop.rheads = rheads
1617 pullop.rheads = rheads
1611
1618
1612 def _pullbundle2(pullop):
1619 def _pullbundle2(pullop):
1613 """pull data using bundle2
1620 """pull data using bundle2
1614
1621
1615 For now, the only supported data are changegroup."""
1622 For now, the only supported data are changegroup."""
1616 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1623 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1617
1624
1618 # make ui easier to access
1625 # make ui easier to access
1619 ui = pullop.repo.ui
1626 ui = pullop.repo.ui
1620
1627
1621 # At the moment we don't do stream clones over bundle2. If that is
1628 # At the moment we don't do stream clones over bundle2. If that is
1622 # implemented then here's where the check for that will go.
1629 # implemented then here's where the check for that will go.
1623 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1630 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1624
1631
1625 # declare pull perimeters
1632 # declare pull perimeters
1626 kwargs['common'] = pullop.common
1633 kwargs['common'] = pullop.common
1627 kwargs['heads'] = pullop.heads or pullop.rheads
1634 kwargs['heads'] = pullop.heads or pullop.rheads
1628
1635
1629 if streaming:
1636 if streaming:
1630 kwargs['cg'] = False
1637 kwargs['cg'] = False
1631 kwargs['stream'] = True
1638 kwargs['stream'] = True
1632 pullop.stepsdone.add('changegroup')
1639 pullop.stepsdone.add('changegroup')
1633 pullop.stepsdone.add('phases')
1640 pullop.stepsdone.add('phases')
1634
1641
1635 else:
1642 else:
1636 # pulling changegroup
1643 # pulling changegroup
1637 pullop.stepsdone.add('changegroup')
1644 pullop.stepsdone.add('changegroup')
1638
1645
1639 kwargs['cg'] = pullop.fetch
1646 kwargs['cg'] = pullop.fetch
1640
1647
1641 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1648 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1642 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1649 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1643 if (not legacyphase and hasbinaryphase):
1650 if (not legacyphase and hasbinaryphase):
1644 kwargs['phases'] = True
1651 kwargs['phases'] = True
1645 pullop.stepsdone.add('phases')
1652 pullop.stepsdone.add('phases')
1646
1653
1647 if 'listkeys' in pullop.remotebundle2caps:
1654 if 'listkeys' in pullop.remotebundle2caps:
1648 if 'phases' not in pullop.stepsdone:
1655 if 'phases' not in pullop.stepsdone:
1649 kwargs['listkeys'] = ['phases']
1656 kwargs['listkeys'] = ['phases']
1650
1657
1651 bookmarksrequested = False
1658 bookmarksrequested = False
1652 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1659 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1653 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1660 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1654
1661
1655 if pullop.remotebookmarks is not None:
1662 if pullop.remotebookmarks is not None:
1656 pullop.stepsdone.add('request-bookmarks')
1663 pullop.stepsdone.add('request-bookmarks')
1657
1664
1658 if ('request-bookmarks' not in pullop.stepsdone
1665 if ('request-bookmarks' not in pullop.stepsdone
1659 and pullop.remotebookmarks is None
1666 and pullop.remotebookmarks is None
1660 and not legacybookmark and hasbinarybook):
1667 and not legacybookmark and hasbinarybook):
1661 kwargs['bookmarks'] = True
1668 kwargs['bookmarks'] = True
1662 bookmarksrequested = True
1669 bookmarksrequested = True
1663
1670
1664 if 'listkeys' in pullop.remotebundle2caps:
1671 if 'listkeys' in pullop.remotebundle2caps:
1665 if 'request-bookmarks' not in pullop.stepsdone:
1672 if 'request-bookmarks' not in pullop.stepsdone:
1666 # make sure to always includes bookmark data when migrating
1673 # make sure to always includes bookmark data when migrating
1667 # `hg incoming --bundle` to using this function.
1674 # `hg incoming --bundle` to using this function.
1668 pullop.stepsdone.add('request-bookmarks')
1675 pullop.stepsdone.add('request-bookmarks')
1669 kwargs.setdefault('listkeys', []).append('bookmarks')
1676 kwargs.setdefault('listkeys', []).append('bookmarks')
1670
1677
1671 # If this is a full pull / clone and the server supports the clone bundles
1678 # If this is a full pull / clone and the server supports the clone bundles
1672 # feature, tell the server whether we attempted a clone bundle. The
1679 # feature, tell the server whether we attempted a clone bundle. The
1673 # presence of this flag indicates the client supports clone bundles. This
1680 # presence of this flag indicates the client supports clone bundles. This
1674 # will enable the server to treat clients that support clone bundles
1681 # will enable the server to treat clients that support clone bundles
1675 # differently from those that don't.
1682 # differently from those that don't.
1676 if (pullop.remote.capable('clonebundles')
1683 if (pullop.remote.capable('clonebundles')
1677 and pullop.heads is None and list(pullop.common) == [nullid]):
1684 and pullop.heads is None and list(pullop.common) == [nullid]):
1678 kwargs['cbattempted'] = pullop.clonebundleattempted
1685 kwargs['cbattempted'] = pullop.clonebundleattempted
1679
1686
1680 if streaming:
1687 if streaming:
1681 pullop.repo.ui.status(_('streaming all changes\n'))
1688 pullop.repo.ui.status(_('streaming all changes\n'))
1682 elif not pullop.fetch:
1689 elif not pullop.fetch:
1683 pullop.repo.ui.status(_("no changes found\n"))
1690 pullop.repo.ui.status(_("no changes found\n"))
1684 pullop.cgresult = 0
1691 pullop.cgresult = 0
1685 else:
1692 else:
1686 if pullop.heads is None and list(pullop.common) == [nullid]:
1693 if pullop.heads is None and list(pullop.common) == [nullid]:
1687 pullop.repo.ui.status(_("requesting all changes\n"))
1694 pullop.repo.ui.status(_("requesting all changes\n"))
1688 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1695 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1689 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1696 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1690 if obsolete.commonversion(remoteversions) is not None:
1697 if obsolete.commonversion(remoteversions) is not None:
1691 kwargs['obsmarkers'] = True
1698 kwargs['obsmarkers'] = True
1692 pullop.stepsdone.add('obsmarkers')
1699 pullop.stepsdone.add('obsmarkers')
1693 _pullbundle2extraprepare(pullop, kwargs)
1700 _pullbundle2extraprepare(pullop, kwargs)
1694
1701
1695 with pullop.remote.commandexecutor() as e:
1702 with pullop.remote.commandexecutor() as e:
1696 args = dict(kwargs)
1703 args = dict(kwargs)
1697 args['source'] = 'pull'
1704 args['source'] = 'pull'
1698 bundle = e.callcommand('getbundle', args).result()
1705 bundle = e.callcommand('getbundle', args).result()
1699
1706
1700 try:
1707 try:
1701 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
1708 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
1702 source='pull')
1709 source='pull')
1703 op.modes['bookmarks'] = 'records'
1710 op.modes['bookmarks'] = 'records'
1704 bundle2.processbundle(pullop.repo, bundle, op=op)
1711 bundle2.processbundle(pullop.repo, bundle, op=op)
1705 except bundle2.AbortFromPart as exc:
1712 except bundle2.AbortFromPart as exc:
1706 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1713 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1707 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1714 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1708 except error.BundleValueError as exc:
1715 except error.BundleValueError as exc:
1709 raise error.Abort(_('missing support for %s') % exc)
1716 raise error.Abort(_('missing support for %s') % exc)
1710
1717
1711 if pullop.fetch:
1718 if pullop.fetch:
1712 pullop.cgresult = bundle2.combinechangegroupresults(op)
1719 pullop.cgresult = bundle2.combinechangegroupresults(op)
1713
1720
1714 # processing phases change
1721 # processing phases change
1715 for namespace, value in op.records['listkeys']:
1722 for namespace, value in op.records['listkeys']:
1716 if namespace == 'phases':
1723 if namespace == 'phases':
1717 _pullapplyphases(pullop, value)
1724 _pullapplyphases(pullop, value)
1718
1725
1719 # processing bookmark update
1726 # processing bookmark update
1720 if bookmarksrequested:
1727 if bookmarksrequested:
1721 books = {}
1728 books = {}
1722 for record in op.records['bookmarks']:
1729 for record in op.records['bookmarks']:
1723 books[record['bookmark']] = record["node"]
1730 books[record['bookmark']] = record["node"]
1724 pullop.remotebookmarks = books
1731 pullop.remotebookmarks = books
1725 else:
1732 else:
1726 for namespace, value in op.records['listkeys']:
1733 for namespace, value in op.records['listkeys']:
1727 if namespace == 'bookmarks':
1734 if namespace == 'bookmarks':
1728 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1735 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1729
1736
1730 # bookmark data were either already there or pulled in the bundle
1737 # bookmark data were either already there or pulled in the bundle
1731 if pullop.remotebookmarks is not None:
1738 if pullop.remotebookmarks is not None:
1732 _pullbookmarks(pullop)
1739 _pullbookmarks(pullop)
1733
1740
1734 def _pullbundle2extraprepare(pullop, kwargs):
1741 def _pullbundle2extraprepare(pullop, kwargs):
1735 """hook function so that extensions can extend the getbundle call"""
1742 """hook function so that extensions can extend the getbundle call"""
1736
1743
1737 def _pullchangeset(pullop):
1744 def _pullchangeset(pullop):
1738 """pull changeset from unbundle into the local repo"""
1745 """pull changeset from unbundle into the local repo"""
1739 # We delay the open of the transaction as late as possible so we
1746 # We delay the open of the transaction as late as possible so we
1740 # don't open transaction for nothing or you break future useful
1747 # don't open transaction for nothing or you break future useful
1741 # rollback call
1748 # rollback call
1742 if 'changegroup' in pullop.stepsdone:
1749 if 'changegroup' in pullop.stepsdone:
1743 return
1750 return
1744 pullop.stepsdone.add('changegroup')
1751 pullop.stepsdone.add('changegroup')
1745 if not pullop.fetch:
1752 if not pullop.fetch:
1746 pullop.repo.ui.status(_("no changes found\n"))
1753 pullop.repo.ui.status(_("no changes found\n"))
1747 pullop.cgresult = 0
1754 pullop.cgresult = 0
1748 return
1755 return
1749 tr = pullop.gettransaction()
1756 tr = pullop.gettransaction()
1750 if pullop.heads is None and list(pullop.common) == [nullid]:
1757 if pullop.heads is None and list(pullop.common) == [nullid]:
1751 pullop.repo.ui.status(_("requesting all changes\n"))
1758 pullop.repo.ui.status(_("requesting all changes\n"))
1752 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1759 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1753 # issue1320, avoid a race if remote changed after discovery
1760 # issue1320, avoid a race if remote changed after discovery
1754 pullop.heads = pullop.rheads
1761 pullop.heads = pullop.rheads
1755
1762
1756 if pullop.remote.capable('getbundle'):
1763 if pullop.remote.capable('getbundle'):
1757 # TODO: get bundlecaps from remote
1764 # TODO: get bundlecaps from remote
1758 cg = pullop.remote.getbundle('pull', common=pullop.common,
1765 cg = pullop.remote.getbundle('pull', common=pullop.common,
1759 heads=pullop.heads or pullop.rheads)
1766 heads=pullop.heads or pullop.rheads)
1760 elif pullop.heads is None:
1767 elif pullop.heads is None:
1761 with pullop.remote.commandexecutor() as e:
1768 with pullop.remote.commandexecutor() as e:
1762 cg = e.callcommand('changegroup', {
1769 cg = e.callcommand('changegroup', {
1763 'nodes': pullop.fetch,
1770 'nodes': pullop.fetch,
1764 'source': 'pull',
1771 'source': 'pull',
1765 }).result()
1772 }).result()
1766
1773
1767 elif not pullop.remote.capable('changegroupsubset'):
1774 elif not pullop.remote.capable('changegroupsubset'):
1768 raise error.Abort(_("partial pull cannot be done because "
1775 raise error.Abort(_("partial pull cannot be done because "
1769 "other repository doesn't support "
1776 "other repository doesn't support "
1770 "changegroupsubset."))
1777 "changegroupsubset."))
1771 else:
1778 else:
1772 with pullop.remote.commandexecutor() as e:
1779 with pullop.remote.commandexecutor() as e:
1773 cg = e.callcommand('changegroupsubset', {
1780 cg = e.callcommand('changegroupsubset', {
1774 'bases': pullop.fetch,
1781 'bases': pullop.fetch,
1775 'heads': pullop.heads,
1782 'heads': pullop.heads,
1776 'source': 'pull',
1783 'source': 'pull',
1777 }).result()
1784 }).result()
1778
1785
1779 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1786 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1780 pullop.remote.url())
1787 pullop.remote.url())
1781 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1788 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1782
1789
1783 def _pullphase(pullop):
1790 def _pullphase(pullop):
1784 # Get remote phases data from remote
1791 # Get remote phases data from remote
1785 if 'phases' in pullop.stepsdone:
1792 if 'phases' in pullop.stepsdone:
1786 return
1793 return
1787 remotephases = listkeys(pullop.remote, 'phases')
1794 remotephases = listkeys(pullop.remote, 'phases')
1788 _pullapplyphases(pullop, remotephases)
1795 _pullapplyphases(pullop, remotephases)
1789
1796
1790 def _pullapplyphases(pullop, remotephases):
1797 def _pullapplyphases(pullop, remotephases):
1791 """apply phase movement from observed remote state"""
1798 """apply phase movement from observed remote state"""
1792 if 'phases' in pullop.stepsdone:
1799 if 'phases' in pullop.stepsdone:
1793 return
1800 return
1794 pullop.stepsdone.add('phases')
1801 pullop.stepsdone.add('phases')
1795 publishing = bool(remotephases.get('publishing', False))
1802 publishing = bool(remotephases.get('publishing', False))
1796 if remotephases and not publishing:
1803 if remotephases and not publishing:
1797 # remote is new and non-publishing
1804 # remote is new and non-publishing
1798 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1805 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1799 pullop.pulledsubset,
1806 pullop.pulledsubset,
1800 remotephases)
1807 remotephases)
1801 dheads = pullop.pulledsubset
1808 dheads = pullop.pulledsubset
1802 else:
1809 else:
1803 # Remote is old or publishing all common changesets
1810 # Remote is old or publishing all common changesets
1804 # should be seen as public
1811 # should be seen as public
1805 pheads = pullop.pulledsubset
1812 pheads = pullop.pulledsubset
1806 dheads = []
1813 dheads = []
1807 unfi = pullop.repo.unfiltered()
1814 unfi = pullop.repo.unfiltered()
1808 phase = unfi._phasecache.phase
1815 phase = unfi._phasecache.phase
1809 rev = unfi.changelog.nodemap.get
1816 rev = unfi.changelog.nodemap.get
1810 public = phases.public
1817 public = phases.public
1811 draft = phases.draft
1818 draft = phases.draft
1812
1819
1813 # exclude changesets already public locally and update the others
1820 # exclude changesets already public locally and update the others
1814 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1821 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1815 if pheads:
1822 if pheads:
1816 tr = pullop.gettransaction()
1823 tr = pullop.gettransaction()
1817 phases.advanceboundary(pullop.repo, tr, public, pheads)
1824 phases.advanceboundary(pullop.repo, tr, public, pheads)
1818
1825
1819 # exclude changesets already draft locally and update the others
1826 # exclude changesets already draft locally and update the others
1820 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1827 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1821 if dheads:
1828 if dheads:
1822 tr = pullop.gettransaction()
1829 tr = pullop.gettransaction()
1823 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1830 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1824
1831
1825 def _pullbookmarks(pullop):
1832 def _pullbookmarks(pullop):
1826 """process the remote bookmark information to update the local one"""
1833 """process the remote bookmark information to update the local one"""
1827 if 'bookmarks' in pullop.stepsdone:
1834 if 'bookmarks' in pullop.stepsdone:
1828 return
1835 return
1829 pullop.stepsdone.add('bookmarks')
1836 pullop.stepsdone.add('bookmarks')
1830 repo = pullop.repo
1837 repo = pullop.repo
1831 remotebookmarks = pullop.remotebookmarks
1838 remotebookmarks = pullop.remotebookmarks
1832 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1839 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1833 pullop.remote.url(),
1840 pullop.remote.url(),
1834 pullop.gettransaction,
1841 pullop.gettransaction,
1835 explicit=pullop.explicitbookmarks)
1842 explicit=pullop.explicitbookmarks)
1836
1843
1837 def _pullobsolete(pullop):
1844 def _pullobsolete(pullop):
1838 """utility function to pull obsolete markers from a remote
1845 """utility function to pull obsolete markers from a remote
1839
1846
1840 The `gettransaction` is function that return the pull transaction, creating
1847 The `gettransaction` is function that return the pull transaction, creating
1841 one if necessary. We return the transaction to inform the calling code that
1848 one if necessary. We return the transaction to inform the calling code that
1842 a new transaction have been created (when applicable).
1849 a new transaction have been created (when applicable).
1843
1850
1844 Exists mostly to allow overriding for experimentation purpose"""
1851 Exists mostly to allow overriding for experimentation purpose"""
1845 if 'obsmarkers' in pullop.stepsdone:
1852 if 'obsmarkers' in pullop.stepsdone:
1846 return
1853 return
1847 pullop.stepsdone.add('obsmarkers')
1854 pullop.stepsdone.add('obsmarkers')
1848 tr = None
1855 tr = None
1849 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1856 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1850 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1857 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1851 remoteobs = listkeys(pullop.remote, 'obsolete')
1858 remoteobs = listkeys(pullop.remote, 'obsolete')
1852 if 'dump0' in remoteobs:
1859 if 'dump0' in remoteobs:
1853 tr = pullop.gettransaction()
1860 tr = pullop.gettransaction()
1854 markers = []
1861 markers = []
1855 for key in sorted(remoteobs, reverse=True):
1862 for key in sorted(remoteobs, reverse=True):
1856 if key.startswith('dump'):
1863 if key.startswith('dump'):
1857 data = util.b85decode(remoteobs[key])
1864 data = util.b85decode(remoteobs[key])
1858 version, newmarks = obsolete._readmarkers(data)
1865 version, newmarks = obsolete._readmarkers(data)
1859 markers += newmarks
1866 markers += newmarks
1860 if markers:
1867 if markers:
1861 pullop.repo.obsstore.add(tr, markers)
1868 pullop.repo.obsstore.add(tr, markers)
1862 pullop.repo.invalidatevolatilesets()
1869 pullop.repo.invalidatevolatilesets()
1863 return tr
1870 return tr
1864
1871
1865 def applynarrowacl(repo, kwargs):
1872 def applynarrowacl(repo, kwargs):
1866 """Apply narrow fetch access control.
1873 """Apply narrow fetch access control.
1867
1874
1868 This massages the named arguments for getbundle wire protocol commands
1875 This massages the named arguments for getbundle wire protocol commands
1869 so requested data is filtered through access control rules.
1876 so requested data is filtered through access control rules.
1870 """
1877 """
1871 ui = repo.ui
1878 ui = repo.ui
1872 # TODO this assumes existence of HTTP and is a layering violation.
1879 # TODO this assumes existence of HTTP and is a layering violation.
1873 username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
1880 username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
1874 user_includes = ui.configlist(
1881 user_includes = ui.configlist(
1875 _NARROWACL_SECTION, username + '.includes',
1882 _NARROWACL_SECTION, username + '.includes',
1876 ui.configlist(_NARROWACL_SECTION, 'default.includes'))
1883 ui.configlist(_NARROWACL_SECTION, 'default.includes'))
1877 user_excludes = ui.configlist(
1884 user_excludes = ui.configlist(
1878 _NARROWACL_SECTION, username + '.excludes',
1885 _NARROWACL_SECTION, username + '.excludes',
1879 ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
1886 ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
1880 if not user_includes:
1887 if not user_includes:
1881 raise error.Abort(_("{} configuration for user {} is empty")
1888 raise error.Abort(_("{} configuration for user {} is empty")
1882 .format(_NARROWACL_SECTION, username))
1889 .format(_NARROWACL_SECTION, username))
1883
1890
1884 user_includes = [
1891 user_includes = [
1885 'path:.' if p == '*' else 'path:' + p for p in user_includes]
1892 'path:.' if p == '*' else 'path:' + p for p in user_includes]
1886 user_excludes = [
1893 user_excludes = [
1887 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
1894 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
1888
1895
1889 req_includes = set(kwargs.get(r'includepats', []))
1896 req_includes = set(kwargs.get(r'includepats', []))
1890 req_excludes = set(kwargs.get(r'excludepats', []))
1897 req_excludes = set(kwargs.get(r'excludepats', []))
1891
1898
1892 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
1899 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
1893 req_includes, req_excludes, user_includes, user_excludes)
1900 req_includes, req_excludes, user_includes, user_excludes)
1894
1901
1895 if invalid_includes:
1902 if invalid_includes:
1896 raise error.Abort(
1903 raise error.Abort(
1897 _("The following includes are not accessible for {}: {}")
1904 _("The following includes are not accessible for {}: {}")
1898 .format(username, invalid_includes))
1905 .format(username, invalid_includes))
1899
1906
1900 new_args = {}
1907 new_args = {}
1901 new_args.update(kwargs)
1908 new_args.update(kwargs)
1902 new_args[r'narrow'] = True
1909 new_args[r'narrow'] = True
1903 new_args[r'includepats'] = req_includes
1910 new_args[r'includepats'] = req_includes
1904 if req_excludes:
1911 if req_excludes:
1905 new_args[r'excludepats'] = req_excludes
1912 new_args[r'excludepats'] = req_excludes
1906
1913
1907 return new_args
1914 return new_args
1908
1915
1909 def _computeellipsis(repo, common, heads, known, match, depth=None):
1916 def _computeellipsis(repo, common, heads, known, match, depth=None):
1910 """Compute the shape of a narrowed DAG.
1917 """Compute the shape of a narrowed DAG.
1911
1918
1912 Args:
1919 Args:
1913 repo: The repository we're transferring.
1920 repo: The repository we're transferring.
1914 common: The roots of the DAG range we're transferring.
1921 common: The roots of the DAG range we're transferring.
1915 May be just [nullid], which means all ancestors of heads.
1922 May be just [nullid], which means all ancestors of heads.
1916 heads: The heads of the DAG range we're transferring.
1923 heads: The heads of the DAG range we're transferring.
1917 match: The narrowmatcher that allows us to identify relevant changes.
1924 match: The narrowmatcher that allows us to identify relevant changes.
1918 depth: If not None, only consider nodes to be full nodes if they are at
1925 depth: If not None, only consider nodes to be full nodes if they are at
1919 most depth changesets away from one of heads.
1926 most depth changesets away from one of heads.
1920
1927
1921 Returns:
1928 Returns:
1922 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
1929 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
1923
1930
1924 visitnodes: The list of nodes (either full or ellipsis) which
1931 visitnodes: The list of nodes (either full or ellipsis) which
1925 need to be sent to the client.
1932 need to be sent to the client.
1926 relevant_nodes: The set of changelog nodes which change a file inside
1933 relevant_nodes: The set of changelog nodes which change a file inside
1927 the narrowspec. The client needs these as non-ellipsis nodes.
1934 the narrowspec. The client needs these as non-ellipsis nodes.
1928 ellipsisroots: A dict of {rev: parents} that is used in
1935 ellipsisroots: A dict of {rev: parents} that is used in
1929 narrowchangegroup to produce ellipsis nodes with the
1936 narrowchangegroup to produce ellipsis nodes with the
1930 correct parents.
1937 correct parents.
1931 """
1938 """
1932 cl = repo.changelog
1939 cl = repo.changelog
1933 mfl = repo.manifestlog
1940 mfl = repo.manifestlog
1934
1941
1935 clrev = cl.rev
1942 clrev = cl.rev
1936
1943
1937 commonrevs = {clrev(n) for n in common} | {nullrev}
1944 commonrevs = {clrev(n) for n in common} | {nullrev}
1938 headsrevs = {clrev(n) for n in heads}
1945 headsrevs = {clrev(n) for n in heads}
1939
1946
1940 if depth:
1947 if depth:
1941 revdepth = {h: 0 for h in headsrevs}
1948 revdepth = {h: 0 for h in headsrevs}
1942
1949
1943 ellipsisheads = collections.defaultdict(set)
1950 ellipsisheads = collections.defaultdict(set)
1944 ellipsisroots = collections.defaultdict(set)
1951 ellipsisroots = collections.defaultdict(set)
1945
1952
1946 def addroot(head, curchange):
1953 def addroot(head, curchange):
1947 """Add a root to an ellipsis head, splitting heads with 3 roots."""
1954 """Add a root to an ellipsis head, splitting heads with 3 roots."""
1948 ellipsisroots[head].add(curchange)
1955 ellipsisroots[head].add(curchange)
1949 # Recursively split ellipsis heads with 3 roots by finding the
1956 # Recursively split ellipsis heads with 3 roots by finding the
1950 # roots' youngest common descendant which is an elided merge commit.
1957 # roots' youngest common descendant which is an elided merge commit.
1951 # That descendant takes 2 of the 3 roots as its own, and becomes a
1958 # That descendant takes 2 of the 3 roots as its own, and becomes a
1952 # root of the head.
1959 # root of the head.
1953 while len(ellipsisroots[head]) > 2:
1960 while len(ellipsisroots[head]) > 2:
1954 child, roots = splithead(head)
1961 child, roots = splithead(head)
1955 splitroots(head, child, roots)
1962 splitroots(head, child, roots)
1956 head = child # Recurse in case we just added a 3rd root
1963 head = child # Recurse in case we just added a 3rd root
1957
1964
1958 def splitroots(head, child, roots):
1965 def splitroots(head, child, roots):
1959 ellipsisroots[head].difference_update(roots)
1966 ellipsisroots[head].difference_update(roots)
1960 ellipsisroots[head].add(child)
1967 ellipsisroots[head].add(child)
1961 ellipsisroots[child].update(roots)
1968 ellipsisroots[child].update(roots)
1962 ellipsisroots[child].discard(child)
1969 ellipsisroots[child].discard(child)
1963
1970
1964 def splithead(head):
1971 def splithead(head):
1965 r1, r2, r3 = sorted(ellipsisroots[head])
1972 r1, r2, r3 = sorted(ellipsisroots[head])
1966 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
1973 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
1967 mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
1974 mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
1968 nr1, head, nr2, head)
1975 nr1, head, nr2, head)
1969 for j in mid:
1976 for j in mid:
1970 if j == nr2:
1977 if j == nr2:
1971 return nr2, (nr1, nr2)
1978 return nr2, (nr1, nr2)
1972 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
1979 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
1973 return j, (nr1, nr2)
1980 return j, (nr1, nr2)
1974 raise error.Abort(_('Failed to split up ellipsis node! head: %d, '
1981 raise error.Abort(_('Failed to split up ellipsis node! head: %d, '
1975 'roots: %d %d %d') % (head, r1, r2, r3))
1982 'roots: %d %d %d') % (head, r1, r2, r3))
1976
1983
1977 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
1984 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
1978 visit = reversed(missing)
1985 visit = reversed(missing)
1979 relevant_nodes = set()
1986 relevant_nodes = set()
1980 visitnodes = [cl.node(m) for m in missing]
1987 visitnodes = [cl.node(m) for m in missing]
1981 required = set(headsrevs) | known
1988 required = set(headsrevs) | known
1982 for rev in visit:
1989 for rev in visit:
1983 clrev = cl.changelogrevision(rev)
1990 clrev = cl.changelogrevision(rev)
1984 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
1991 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
1985 if depth is not None:
1992 if depth is not None:
1986 curdepth = revdepth[rev]
1993 curdepth = revdepth[rev]
1987 for p in ps:
1994 for p in ps:
1988 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
1995 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
1989 needed = False
1996 needed = False
1990 shallow_enough = depth is None or revdepth[rev] <= depth
1997 shallow_enough = depth is None or revdepth[rev] <= depth
1991 if shallow_enough:
1998 if shallow_enough:
1992 curmf = mfl[clrev.manifest].read()
1999 curmf = mfl[clrev.manifest].read()
1993 if ps:
2000 if ps:
1994 # We choose to not trust the changed files list in
2001 # We choose to not trust the changed files list in
1995 # changesets because it's not always correct. TODO: could
2002 # changesets because it's not always correct. TODO: could
1996 # we trust it for the non-merge case?
2003 # we trust it for the non-merge case?
1997 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2004 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
1998 needed = bool(curmf.diff(p1mf, match))
2005 needed = bool(curmf.diff(p1mf, match))
1999 if not needed and len(ps) > 1:
2006 if not needed and len(ps) > 1:
2000 # For merge changes, the list of changed files is not
2007 # For merge changes, the list of changed files is not
2001 # helpful, since we need to emit the merge if a file
2008 # helpful, since we need to emit the merge if a file
2002 # in the narrow spec has changed on either side of the
2009 # in the narrow spec has changed on either side of the
2003 # merge. As a result, we do a manifest diff to check.
2010 # merge. As a result, we do a manifest diff to check.
2004 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2011 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2005 needed = bool(curmf.diff(p2mf, match))
2012 needed = bool(curmf.diff(p2mf, match))
2006 else:
2013 else:
2007 # For a root node, we need to include the node if any
2014 # For a root node, we need to include the node if any
2008 # files in the node match the narrowspec.
2015 # files in the node match the narrowspec.
2009 needed = any(curmf.walk(match))
2016 needed = any(curmf.walk(match))
2010
2017
2011 if needed:
2018 if needed:
2012 for head in ellipsisheads[rev]:
2019 for head in ellipsisheads[rev]:
2013 addroot(head, rev)
2020 addroot(head, rev)
2014 for p in ps:
2021 for p in ps:
2015 required.add(p)
2022 required.add(p)
2016 relevant_nodes.add(cl.node(rev))
2023 relevant_nodes.add(cl.node(rev))
2017 else:
2024 else:
2018 if not ps:
2025 if not ps:
2019 ps = [nullrev]
2026 ps = [nullrev]
2020 if rev in required:
2027 if rev in required:
2021 for head in ellipsisheads[rev]:
2028 for head in ellipsisheads[rev]:
2022 addroot(head, rev)
2029 addroot(head, rev)
2023 for p in ps:
2030 for p in ps:
2024 ellipsisheads[p].add(rev)
2031 ellipsisheads[p].add(rev)
2025 else:
2032 else:
2026 for p in ps:
2033 for p in ps:
2027 ellipsisheads[p] |= ellipsisheads[rev]
2034 ellipsisheads[p] |= ellipsisheads[rev]
2028
2035
2029 # add common changesets as roots of their reachable ellipsis heads
2036 # add common changesets as roots of their reachable ellipsis heads
2030 for c in commonrevs:
2037 for c in commonrevs:
2031 for head in ellipsisheads[c]:
2038 for head in ellipsisheads[c]:
2032 addroot(head, c)
2039 addroot(head, c)
2033 return visitnodes, relevant_nodes, ellipsisroots
2040 return visitnodes, relevant_nodes, ellipsisroots
2034
2041
2035 def caps20to10(repo, role):
2042 def caps20to10(repo, role):
2036 """return a set with appropriate options to use bundle20 during getbundle"""
2043 """return a set with appropriate options to use bundle20 during getbundle"""
2037 caps = {'HG20'}
2044 caps = {'HG20'}
2038 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2045 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2039 caps.add('bundle2=' + urlreq.quote(capsblob))
2046 caps.add('bundle2=' + urlreq.quote(capsblob))
2040 return caps
2047 return caps
2041
2048
2042 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2049 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2043 getbundle2partsorder = []
2050 getbundle2partsorder = []
2044
2051
2045 # Mapping between step name and function
2052 # Mapping between step name and function
2046 #
2053 #
2047 # This exists to help extensions wrap steps if necessary
2054 # This exists to help extensions wrap steps if necessary
2048 getbundle2partsmapping = {}
2055 getbundle2partsmapping = {}
2049
2056
2050 def getbundle2partsgenerator(stepname, idx=None):
2057 def getbundle2partsgenerator(stepname, idx=None):
2051 """decorator for function generating bundle2 part for getbundle
2058 """decorator for function generating bundle2 part for getbundle
2052
2059
2053 The function is added to the step -> function mapping and appended to the
2060 The function is added to the step -> function mapping and appended to the
2054 list of steps. Beware that decorated functions will be added in order
2061 list of steps. Beware that decorated functions will be added in order
2055 (this may matter).
2062 (this may matter).
2056
2063
2057 You can only use this decorator for new steps, if you want to wrap a step
2064 You can only use this decorator for new steps, if you want to wrap a step
2058 from an extension, attack the getbundle2partsmapping dictionary directly."""
2065 from an extension, attack the getbundle2partsmapping dictionary directly."""
2059 def dec(func):
2066 def dec(func):
2060 assert stepname not in getbundle2partsmapping
2067 assert stepname not in getbundle2partsmapping
2061 getbundle2partsmapping[stepname] = func
2068 getbundle2partsmapping[stepname] = func
2062 if idx is None:
2069 if idx is None:
2063 getbundle2partsorder.append(stepname)
2070 getbundle2partsorder.append(stepname)
2064 else:
2071 else:
2065 getbundle2partsorder.insert(idx, stepname)
2072 getbundle2partsorder.insert(idx, stepname)
2066 return func
2073 return func
2067 return dec
2074 return dec
2068
2075
2069 def bundle2requested(bundlecaps):
2076 def bundle2requested(bundlecaps):
2070 if bundlecaps is not None:
2077 if bundlecaps is not None:
2071 return any(cap.startswith('HG2') for cap in bundlecaps)
2078 return any(cap.startswith('HG2') for cap in bundlecaps)
2072 return False
2079 return False
2073
2080
2074 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
2081 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
2075 **kwargs):
2082 **kwargs):
2076 """Return chunks constituting a bundle's raw data.
2083 """Return chunks constituting a bundle's raw data.
2077
2084
2078 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2085 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2079 passed.
2086 passed.
2080
2087
2081 Returns a 2-tuple of a dict with metadata about the generated bundle
2088 Returns a 2-tuple of a dict with metadata about the generated bundle
2082 and an iterator over raw chunks (of varying sizes).
2089 and an iterator over raw chunks (of varying sizes).
2083 """
2090 """
2084 kwargs = pycompat.byteskwargs(kwargs)
2091 kwargs = pycompat.byteskwargs(kwargs)
2085 info = {}
2092 info = {}
2086 usebundle2 = bundle2requested(bundlecaps)
2093 usebundle2 = bundle2requested(bundlecaps)
2087 # bundle10 case
2094 # bundle10 case
2088 if not usebundle2:
2095 if not usebundle2:
2089 if bundlecaps and not kwargs.get('cg', True):
2096 if bundlecaps and not kwargs.get('cg', True):
2090 raise ValueError(_('request for bundle10 must include changegroup'))
2097 raise ValueError(_('request for bundle10 must include changegroup'))
2091
2098
2092 if kwargs:
2099 if kwargs:
2093 raise ValueError(_('unsupported getbundle arguments: %s')
2100 raise ValueError(_('unsupported getbundle arguments: %s')
2094 % ', '.join(sorted(kwargs.keys())))
2101 % ', '.join(sorted(kwargs.keys())))
2095 outgoing = _computeoutgoing(repo, heads, common)
2102 outgoing = _computeoutgoing(repo, heads, common)
2096 info['bundleversion'] = 1
2103 info['bundleversion'] = 1
2097 return info, changegroup.makestream(repo, outgoing, '01', source,
2104 return info, changegroup.makestream(repo, outgoing, '01', source,
2098 bundlecaps=bundlecaps)
2105 bundlecaps=bundlecaps)
2099
2106
2100 # bundle20 case
2107 # bundle20 case
2101 info['bundleversion'] = 2
2108 info['bundleversion'] = 2
2102 b2caps = {}
2109 b2caps = {}
2103 for bcaps in bundlecaps:
2110 for bcaps in bundlecaps:
2104 if bcaps.startswith('bundle2='):
2111 if bcaps.startswith('bundle2='):
2105 blob = urlreq.unquote(bcaps[len('bundle2='):])
2112 blob = urlreq.unquote(bcaps[len('bundle2='):])
2106 b2caps.update(bundle2.decodecaps(blob))
2113 b2caps.update(bundle2.decodecaps(blob))
2107 bundler = bundle2.bundle20(repo.ui, b2caps)
2114 bundler = bundle2.bundle20(repo.ui, b2caps)
2108
2115
2109 kwargs['heads'] = heads
2116 kwargs['heads'] = heads
2110 kwargs['common'] = common
2117 kwargs['common'] = common
2111
2118
2112 for name in getbundle2partsorder:
2119 for name in getbundle2partsorder:
2113 func = getbundle2partsmapping[name]
2120 func = getbundle2partsmapping[name]
2114 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
2121 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
2115 **pycompat.strkwargs(kwargs))
2122 **pycompat.strkwargs(kwargs))
2116
2123
2117 info['prefercompressed'] = bundler.prefercompressed
2124 info['prefercompressed'] = bundler.prefercompressed
2118
2125
2119 return info, bundler.getchunks()
2126 return info, bundler.getchunks()
2120
2127
2121 @getbundle2partsgenerator('stream2')
2128 @getbundle2partsgenerator('stream2')
2122 def _getbundlestream2(bundler, repo, *args, **kwargs):
2129 def _getbundlestream2(bundler, repo, *args, **kwargs):
2123 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2130 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2124
2131
2125 @getbundle2partsgenerator('changegroup')
2132 @getbundle2partsgenerator('changegroup')
2126 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
2133 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
2127 b2caps=None, heads=None, common=None, **kwargs):
2134 b2caps=None, heads=None, common=None, **kwargs):
2128 """add a changegroup part to the requested bundle"""
2135 """add a changegroup part to the requested bundle"""
2129 if not kwargs.get(r'cg', True):
2136 if not kwargs.get(r'cg', True):
2130 return
2137 return
2131
2138
2132 version = '01'
2139 version = '01'
2133 cgversions = b2caps.get('changegroup')
2140 cgversions = b2caps.get('changegroup')
2134 if cgversions: # 3.1 and 3.2 ship with an empty value
2141 if cgversions: # 3.1 and 3.2 ship with an empty value
2135 cgversions = [v for v in cgversions
2142 cgversions = [v for v in cgversions
2136 if v in changegroup.supportedoutgoingversions(repo)]
2143 if v in changegroup.supportedoutgoingversions(repo)]
2137 if not cgversions:
2144 if not cgversions:
2138 raise ValueError(_('no common changegroup version'))
2145 raise ValueError(_('no common changegroup version'))
2139 version = max(cgversions)
2146 version = max(cgversions)
2140
2147
2141 outgoing = _computeoutgoing(repo, heads, common)
2148 outgoing = _computeoutgoing(repo, heads, common)
2142 if not outgoing.missing:
2149 if not outgoing.missing:
2143 return
2150 return
2144
2151
2145 if kwargs.get(r'narrow', False):
2152 if kwargs.get(r'narrow', False):
2146 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
2153 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
2147 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
2154 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
2148 filematcher = narrowspec.match(repo.root, include=include,
2155 filematcher = narrowspec.match(repo.root, include=include,
2149 exclude=exclude)
2156 exclude=exclude)
2150 else:
2157 else:
2151 filematcher = None
2158 filematcher = None
2152
2159
2153 cgstream = changegroup.makestream(repo, outgoing, version, source,
2160 cgstream = changegroup.makestream(repo, outgoing, version, source,
2154 bundlecaps=bundlecaps,
2161 bundlecaps=bundlecaps,
2155 filematcher=filematcher)
2162 filematcher=filematcher)
2156
2163
2157 part = bundler.newpart('changegroup', data=cgstream)
2164 part = bundler.newpart('changegroup', data=cgstream)
2158 if cgversions:
2165 if cgversions:
2159 part.addparam('version', version)
2166 part.addparam('version', version)
2160
2167
2161 part.addparam('nbchanges', '%d' % len(outgoing.missing),
2168 part.addparam('nbchanges', '%d' % len(outgoing.missing),
2162 mandatory=False)
2169 mandatory=False)
2163
2170
2164 if 'treemanifest' in repo.requirements:
2171 if 'treemanifest' in repo.requirements:
2165 part.addparam('treemanifest', '1')
2172 part.addparam('treemanifest', '1')
2166
2173
2167 if kwargs.get(r'narrow', False) and (include or exclude):
2174 if kwargs.get(r'narrow', False) and (include or exclude):
2168 narrowspecpart = bundler.newpart('narrow:spec')
2175 narrowspecpart = bundler.newpart('narrow:spec')
2169 if include:
2176 if include:
2170 narrowspecpart.addparam(
2177 narrowspecpart.addparam(
2171 'include', '\n'.join(include), mandatory=True)
2178 'include', '\n'.join(include), mandatory=True)
2172 if exclude:
2179 if exclude:
2173 narrowspecpart.addparam(
2180 narrowspecpart.addparam(
2174 'exclude', '\n'.join(exclude), mandatory=True)
2181 'exclude', '\n'.join(exclude), mandatory=True)
2175
2182
2176 @getbundle2partsgenerator('bookmarks')
2183 @getbundle2partsgenerator('bookmarks')
2177 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
2184 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
2178 b2caps=None, **kwargs):
2185 b2caps=None, **kwargs):
2179 """add a bookmark part to the requested bundle"""
2186 """add a bookmark part to the requested bundle"""
2180 if not kwargs.get(r'bookmarks', False):
2187 if not kwargs.get(r'bookmarks', False):
2181 return
2188 return
2182 if 'bookmarks' not in b2caps:
2189 if 'bookmarks' not in b2caps:
2183 raise ValueError(_('no common bookmarks exchange method'))
2190 raise ValueError(_('no common bookmarks exchange method'))
2184 books = bookmod.listbinbookmarks(repo)
2191 books = bookmod.listbinbookmarks(repo)
2185 data = bookmod.binaryencode(books)
2192 data = bookmod.binaryencode(books)
2186 if data:
2193 if data:
2187 bundler.newpart('bookmarks', data=data)
2194 bundler.newpart('bookmarks', data=data)
2188
2195
2189 @getbundle2partsgenerator('listkeys')
2196 @getbundle2partsgenerator('listkeys')
2190 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
2197 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
2191 b2caps=None, **kwargs):
2198 b2caps=None, **kwargs):
2192 """add parts containing listkeys namespaces to the requested bundle"""
2199 """add parts containing listkeys namespaces to the requested bundle"""
2193 listkeys = kwargs.get(r'listkeys', ())
2200 listkeys = kwargs.get(r'listkeys', ())
2194 for namespace in listkeys:
2201 for namespace in listkeys:
2195 part = bundler.newpart('listkeys')
2202 part = bundler.newpart('listkeys')
2196 part.addparam('namespace', namespace)
2203 part.addparam('namespace', namespace)
2197 keys = repo.listkeys(namespace).items()
2204 keys = repo.listkeys(namespace).items()
2198 part.data = pushkey.encodekeys(keys)
2205 part.data = pushkey.encodekeys(keys)
2199
2206
2200 @getbundle2partsgenerator('obsmarkers')
2207 @getbundle2partsgenerator('obsmarkers')
2201 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
2208 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
2202 b2caps=None, heads=None, **kwargs):
2209 b2caps=None, heads=None, **kwargs):
2203 """add an obsolescence markers part to the requested bundle"""
2210 """add an obsolescence markers part to the requested bundle"""
2204 if kwargs.get(r'obsmarkers', False):
2211 if kwargs.get(r'obsmarkers', False):
2205 if heads is None:
2212 if heads is None:
2206 heads = repo.heads()
2213 heads = repo.heads()
2207 subset = [c.node() for c in repo.set('::%ln', heads)]
2214 subset = [c.node() for c in repo.set('::%ln', heads)]
2208 markers = repo.obsstore.relevantmarkers(subset)
2215 markers = repo.obsstore.relevantmarkers(subset)
2209 markers = sorted(markers)
2216 markers = sorted(markers)
2210 bundle2.buildobsmarkerspart(bundler, markers)
2217 bundle2.buildobsmarkerspart(bundler, markers)
2211
2218
2212 @getbundle2partsgenerator('phases')
2219 @getbundle2partsgenerator('phases')
2213 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
2220 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
2214 b2caps=None, heads=None, **kwargs):
2221 b2caps=None, heads=None, **kwargs):
2215 """add phase heads part to the requested bundle"""
2222 """add phase heads part to the requested bundle"""
2216 if kwargs.get(r'phases', False):
2223 if kwargs.get(r'phases', False):
2217 if not 'heads' in b2caps.get('phases'):
2224 if not 'heads' in b2caps.get('phases'):
2218 raise ValueError(_('no common phases exchange method'))
2225 raise ValueError(_('no common phases exchange method'))
2219 if heads is None:
2226 if heads is None:
2220 heads = repo.heads()
2227 heads = repo.heads()
2221
2228
2222 headsbyphase = collections.defaultdict(set)
2229 headsbyphase = collections.defaultdict(set)
2223 if repo.publishing():
2230 if repo.publishing():
2224 headsbyphase[phases.public] = heads
2231 headsbyphase[phases.public] = heads
2225 else:
2232 else:
2226 # find the appropriate heads to move
2233 # find the appropriate heads to move
2227
2234
2228 phase = repo._phasecache.phase
2235 phase = repo._phasecache.phase
2229 node = repo.changelog.node
2236 node = repo.changelog.node
2230 rev = repo.changelog.rev
2237 rev = repo.changelog.rev
2231 for h in heads:
2238 for h in heads:
2232 headsbyphase[phase(repo, rev(h))].add(h)
2239 headsbyphase[phase(repo, rev(h))].add(h)
2233 seenphases = list(headsbyphase.keys())
2240 seenphases = list(headsbyphase.keys())
2234
2241
2235 # We do not handle anything but public and draft phase for now)
2242 # We do not handle anything but public and draft phase for now)
2236 if seenphases:
2243 if seenphases:
2237 assert max(seenphases) <= phases.draft
2244 assert max(seenphases) <= phases.draft
2238
2245
2239 # if client is pulling non-public changesets, we need to find
2246 # if client is pulling non-public changesets, we need to find
2240 # intermediate public heads.
2247 # intermediate public heads.
2241 draftheads = headsbyphase.get(phases.draft, set())
2248 draftheads = headsbyphase.get(phases.draft, set())
2242 if draftheads:
2249 if draftheads:
2243 publicheads = headsbyphase.get(phases.public, set())
2250 publicheads = headsbyphase.get(phases.public, set())
2244
2251
2245 revset = 'heads(only(%ln, %ln) and public())'
2252 revset = 'heads(only(%ln, %ln) and public())'
2246 extraheads = repo.revs(revset, draftheads, publicheads)
2253 extraheads = repo.revs(revset, draftheads, publicheads)
2247 for r in extraheads:
2254 for r in extraheads:
2248 headsbyphase[phases.public].add(node(r))
2255 headsbyphase[phases.public].add(node(r))
2249
2256
2250 # transform data in a format used by the encoding function
2257 # transform data in a format used by the encoding function
2251 phasemapping = []
2258 phasemapping = []
2252 for phase in phases.allphases:
2259 for phase in phases.allphases:
2253 phasemapping.append(sorted(headsbyphase[phase]))
2260 phasemapping.append(sorted(headsbyphase[phase]))
2254
2261
2255 # generate the actual part
2262 # generate the actual part
2256 phasedata = phases.binaryencode(phasemapping)
2263 phasedata = phases.binaryencode(phasemapping)
2257 bundler.newpart('phase-heads', data=phasedata)
2264 bundler.newpart('phase-heads', data=phasedata)
2258
2265
2259 @getbundle2partsgenerator('hgtagsfnodes')
2266 @getbundle2partsgenerator('hgtagsfnodes')
2260 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
2267 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
2261 b2caps=None, heads=None, common=None,
2268 b2caps=None, heads=None, common=None,
2262 **kwargs):
2269 **kwargs):
2263 """Transfer the .hgtags filenodes mapping.
2270 """Transfer the .hgtags filenodes mapping.
2264
2271
2265 Only values for heads in this bundle will be transferred.
2272 Only values for heads in this bundle will be transferred.
2266
2273
2267 The part data consists of pairs of 20 byte changeset node and .hgtags
2274 The part data consists of pairs of 20 byte changeset node and .hgtags
2268 filenodes raw values.
2275 filenodes raw values.
2269 """
2276 """
2270 # Don't send unless:
2277 # Don't send unless:
2271 # - changeset are being exchanged,
2278 # - changeset are being exchanged,
2272 # - the client supports it.
2279 # - the client supports it.
2273 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
2280 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
2274 return
2281 return
2275
2282
2276 outgoing = _computeoutgoing(repo, heads, common)
2283 outgoing = _computeoutgoing(repo, heads, common)
2277 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2284 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2278
2285
2279 @getbundle2partsgenerator('cache:rev-branch-cache')
2286 @getbundle2partsgenerator('cache:rev-branch-cache')
2280 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
2287 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
2281 b2caps=None, heads=None, common=None,
2288 b2caps=None, heads=None, common=None,
2282 **kwargs):
2289 **kwargs):
2283 """Transfer the rev-branch-cache mapping
2290 """Transfer the rev-branch-cache mapping
2284
2291
2285 The payload is a series of data related to each branch
2292 The payload is a series of data related to each branch
2286
2293
2287 1) branch name length
2294 1) branch name length
2288 2) number of open heads
2295 2) number of open heads
2289 3) number of closed heads
2296 3) number of closed heads
2290 4) open heads nodes
2297 4) open heads nodes
2291 5) closed heads nodes
2298 5) closed heads nodes
2292 """
2299 """
2293 # Don't send unless:
2300 # Don't send unless:
2294 # - changeset are being exchanged,
2301 # - changeset are being exchanged,
2295 # - the client supports it.
2302 # - the client supports it.
2296 # - narrow bundle isn't in play (not currently compatible).
2303 # - narrow bundle isn't in play (not currently compatible).
2297 if (not kwargs.get(r'cg', True)
2304 if (not kwargs.get(r'cg', True)
2298 or 'rev-branch-cache' not in b2caps
2305 or 'rev-branch-cache' not in b2caps
2299 or kwargs.get(r'narrow', False)
2306 or kwargs.get(r'narrow', False)
2300 or repo.ui.has_section(_NARROWACL_SECTION)):
2307 or repo.ui.has_section(_NARROWACL_SECTION)):
2301 return
2308 return
2302
2309
2303 outgoing = _computeoutgoing(repo, heads, common)
2310 outgoing = _computeoutgoing(repo, heads, common)
2304 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2311 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2305
2312
2306 def check_heads(repo, their_heads, context):
2313 def check_heads(repo, their_heads, context):
2307 """check if the heads of a repo have been modified
2314 """check if the heads of a repo have been modified
2308
2315
2309 Used by peer for unbundling.
2316 Used by peer for unbundling.
2310 """
2317 """
2311 heads = repo.heads()
2318 heads = repo.heads()
2312 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
2319 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
2313 if not (their_heads == ['force'] or their_heads == heads or
2320 if not (their_heads == ['force'] or their_heads == heads or
2314 their_heads == ['hashed', heads_hash]):
2321 their_heads == ['hashed', heads_hash]):
2315 # someone else committed/pushed/unbundled while we
2322 # someone else committed/pushed/unbundled while we
2316 # were transferring data
2323 # were transferring data
2317 raise error.PushRaced('repository changed while %s - '
2324 raise error.PushRaced('repository changed while %s - '
2318 'please try again' % context)
2325 'please try again' % context)
2319
2326
2320 def unbundle(repo, cg, heads, source, url):
2327 def unbundle(repo, cg, heads, source, url):
2321 """Apply a bundle to a repo.
2328 """Apply a bundle to a repo.
2322
2329
2323 this function makes sure the repo is locked during the application and have
2330 this function makes sure the repo is locked during the application and have
2324 mechanism to check that no push race occurred between the creation of the
2331 mechanism to check that no push race occurred between the creation of the
2325 bundle and its application.
2332 bundle and its application.
2326
2333
2327 If the push was raced as PushRaced exception is raised."""
2334 If the push was raced as PushRaced exception is raised."""
2328 r = 0
2335 r = 0
2329 # need a transaction when processing a bundle2 stream
2336 # need a transaction when processing a bundle2 stream
2330 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2337 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2331 lockandtr = [None, None, None]
2338 lockandtr = [None, None, None]
2332 recordout = None
2339 recordout = None
2333 # quick fix for output mismatch with bundle2 in 3.4
2340 # quick fix for output mismatch with bundle2 in 3.4
2334 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
2341 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
2335 if url.startswith('remote:http:') or url.startswith('remote:https:'):
2342 if url.startswith('remote:http:') or url.startswith('remote:https:'):
2336 captureoutput = True
2343 captureoutput = True
2337 try:
2344 try:
2338 # note: outside bundle1, 'heads' is expected to be empty and this
2345 # note: outside bundle1, 'heads' is expected to be empty and this
2339 # 'check_heads' call wil be a no-op
2346 # 'check_heads' call wil be a no-op
2340 check_heads(repo, heads, 'uploading changes')
2347 check_heads(repo, heads, 'uploading changes')
2341 # push can proceed
2348 # push can proceed
2342 if not isinstance(cg, bundle2.unbundle20):
2349 if not isinstance(cg, bundle2.unbundle20):
2343 # legacy case: bundle1 (changegroup 01)
2350 # legacy case: bundle1 (changegroup 01)
2344 txnname = "\n".join([source, util.hidepassword(url)])
2351 txnname = "\n".join([source, util.hidepassword(url)])
2345 with repo.lock(), repo.transaction(txnname) as tr:
2352 with repo.lock(), repo.transaction(txnname) as tr:
2346 op = bundle2.applybundle(repo, cg, tr, source, url)
2353 op = bundle2.applybundle(repo, cg, tr, source, url)
2347 r = bundle2.combinechangegroupresults(op)
2354 r = bundle2.combinechangegroupresults(op)
2348 else:
2355 else:
2349 r = None
2356 r = None
2350 try:
2357 try:
2351 def gettransaction():
2358 def gettransaction():
2352 if not lockandtr[2]:
2359 if not lockandtr[2]:
2353 lockandtr[0] = repo.wlock()
2360 lockandtr[0] = repo.wlock()
2354 lockandtr[1] = repo.lock()
2361 lockandtr[1] = repo.lock()
2355 lockandtr[2] = repo.transaction(source)
2362 lockandtr[2] = repo.transaction(source)
2356 lockandtr[2].hookargs['source'] = source
2363 lockandtr[2].hookargs['source'] = source
2357 lockandtr[2].hookargs['url'] = url
2364 lockandtr[2].hookargs['url'] = url
2358 lockandtr[2].hookargs['bundle2'] = '1'
2365 lockandtr[2].hookargs['bundle2'] = '1'
2359 return lockandtr[2]
2366 return lockandtr[2]
2360
2367
2361 # Do greedy locking by default until we're satisfied with lazy
2368 # Do greedy locking by default until we're satisfied with lazy
2362 # locking.
2369 # locking.
2363 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2370 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2364 gettransaction()
2371 gettransaction()
2365
2372
2366 op = bundle2.bundleoperation(repo, gettransaction,
2373 op = bundle2.bundleoperation(repo, gettransaction,
2367 captureoutput=captureoutput,
2374 captureoutput=captureoutput,
2368 source='push')
2375 source='push')
2369 try:
2376 try:
2370 op = bundle2.processbundle(repo, cg, op=op)
2377 op = bundle2.processbundle(repo, cg, op=op)
2371 finally:
2378 finally:
2372 r = op.reply
2379 r = op.reply
2373 if captureoutput and r is not None:
2380 if captureoutput and r is not None:
2374 repo.ui.pushbuffer(error=True, subproc=True)
2381 repo.ui.pushbuffer(error=True, subproc=True)
2375 def recordout(output):
2382 def recordout(output):
2376 r.newpart('output', data=output, mandatory=False)
2383 r.newpart('output', data=output, mandatory=False)
2377 if lockandtr[2] is not None:
2384 if lockandtr[2] is not None:
2378 lockandtr[2].close()
2385 lockandtr[2].close()
2379 except BaseException as exc:
2386 except BaseException as exc:
2380 exc.duringunbundle2 = True
2387 exc.duringunbundle2 = True
2381 if captureoutput and r is not None:
2388 if captureoutput and r is not None:
2382 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2389 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2383 def recordout(output):
2390 def recordout(output):
2384 part = bundle2.bundlepart('output', data=output,
2391 part = bundle2.bundlepart('output', data=output,
2385 mandatory=False)
2392 mandatory=False)
2386 parts.append(part)
2393 parts.append(part)
2387 raise
2394 raise
2388 finally:
2395 finally:
2389 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2396 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2390 if recordout is not None:
2397 if recordout is not None:
2391 recordout(repo.ui.popbuffer())
2398 recordout(repo.ui.popbuffer())
2392 return r
2399 return r
2393
2400
2394 def _maybeapplyclonebundle(pullop):
2401 def _maybeapplyclonebundle(pullop):
2395 """Apply a clone bundle from a remote, if possible."""
2402 """Apply a clone bundle from a remote, if possible."""
2396
2403
2397 repo = pullop.repo
2404 repo = pullop.repo
2398 remote = pullop.remote
2405 remote = pullop.remote
2399
2406
2400 if not repo.ui.configbool('ui', 'clonebundles'):
2407 if not repo.ui.configbool('ui', 'clonebundles'):
2401 return
2408 return
2402
2409
2403 # Only run if local repo is empty.
2410 # Only run if local repo is empty.
2404 if len(repo):
2411 if len(repo):
2405 return
2412 return
2406
2413
2407 if pullop.heads:
2414 if pullop.heads:
2408 return
2415 return
2409
2416
2410 if not remote.capable('clonebundles'):
2417 if not remote.capable('clonebundles'):
2411 return
2418 return
2412
2419
2413 with remote.commandexecutor() as e:
2420 with remote.commandexecutor() as e:
2414 res = e.callcommand('clonebundles', {}).result()
2421 res = e.callcommand('clonebundles', {}).result()
2415
2422
2416 # If we call the wire protocol command, that's good enough to record the
2423 # If we call the wire protocol command, that's good enough to record the
2417 # attempt.
2424 # attempt.
2418 pullop.clonebundleattempted = True
2425 pullop.clonebundleattempted = True
2419
2426
2420 entries = parseclonebundlesmanifest(repo, res)
2427 entries = parseclonebundlesmanifest(repo, res)
2421 if not entries:
2428 if not entries:
2422 repo.ui.note(_('no clone bundles available on remote; '
2429 repo.ui.note(_('no clone bundles available on remote; '
2423 'falling back to regular clone\n'))
2430 'falling back to regular clone\n'))
2424 return
2431 return
2425
2432
2426 entries = filterclonebundleentries(
2433 entries = filterclonebundleentries(
2427 repo, entries, streamclonerequested=pullop.streamclonerequested)
2434 repo, entries, streamclonerequested=pullop.streamclonerequested)
2428
2435
2429 if not entries:
2436 if not entries:
2430 # There is a thundering herd concern here. However, if a server
2437 # There is a thundering herd concern here. However, if a server
2431 # operator doesn't advertise bundles appropriate for its clients,
2438 # operator doesn't advertise bundles appropriate for its clients,
2432 # they deserve what's coming. Furthermore, from a client's
2439 # they deserve what's coming. Furthermore, from a client's
2433 # perspective, no automatic fallback would mean not being able to
2440 # perspective, no automatic fallback would mean not being able to
2434 # clone!
2441 # clone!
2435 repo.ui.warn(_('no compatible clone bundles available on server; '
2442 repo.ui.warn(_('no compatible clone bundles available on server; '
2436 'falling back to regular clone\n'))
2443 'falling back to regular clone\n'))
2437 repo.ui.warn(_('(you may want to report this to the server '
2444 repo.ui.warn(_('(you may want to report this to the server '
2438 'operator)\n'))
2445 'operator)\n'))
2439 return
2446 return
2440
2447
2441 entries = sortclonebundleentries(repo.ui, entries)
2448 entries = sortclonebundleentries(repo.ui, entries)
2442
2449
2443 url = entries[0]['URL']
2450 url = entries[0]['URL']
2444 repo.ui.status(_('applying clone bundle from %s\n') % url)
2451 repo.ui.status(_('applying clone bundle from %s\n') % url)
2445 if trypullbundlefromurl(repo.ui, repo, url):
2452 if trypullbundlefromurl(repo.ui, repo, url):
2446 repo.ui.status(_('finished applying clone bundle\n'))
2453 repo.ui.status(_('finished applying clone bundle\n'))
2447 # Bundle failed.
2454 # Bundle failed.
2448 #
2455 #
2449 # We abort by default to avoid the thundering herd of
2456 # We abort by default to avoid the thundering herd of
2450 # clients flooding a server that was expecting expensive
2457 # clients flooding a server that was expecting expensive
2451 # clone load to be offloaded.
2458 # clone load to be offloaded.
2452 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2459 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2453 repo.ui.warn(_('falling back to normal clone\n'))
2460 repo.ui.warn(_('falling back to normal clone\n'))
2454 else:
2461 else:
2455 raise error.Abort(_('error applying bundle'),
2462 raise error.Abort(_('error applying bundle'),
2456 hint=_('if this error persists, consider contacting '
2463 hint=_('if this error persists, consider contacting '
2457 'the server operator or disable clone '
2464 'the server operator or disable clone '
2458 'bundles via '
2465 'bundles via '
2459 '"--config ui.clonebundles=false"'))
2466 '"--config ui.clonebundles=false"'))
2460
2467
2461 def parseclonebundlesmanifest(repo, s):
2468 def parseclonebundlesmanifest(repo, s):
2462 """Parses the raw text of a clone bundles manifest.
2469 """Parses the raw text of a clone bundles manifest.
2463
2470
2464 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2471 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2465 to the URL and other keys are the attributes for the entry.
2472 to the URL and other keys are the attributes for the entry.
2466 """
2473 """
2467 m = []
2474 m = []
2468 for line in s.splitlines():
2475 for line in s.splitlines():
2469 fields = line.split()
2476 fields = line.split()
2470 if not fields:
2477 if not fields:
2471 continue
2478 continue
2472 attrs = {'URL': fields[0]}
2479 attrs = {'URL': fields[0]}
2473 for rawattr in fields[1:]:
2480 for rawattr in fields[1:]:
2474 key, value = rawattr.split('=', 1)
2481 key, value = rawattr.split('=', 1)
2475 key = urlreq.unquote(key)
2482 key = urlreq.unquote(key)
2476 value = urlreq.unquote(value)
2483 value = urlreq.unquote(value)
2477 attrs[key] = value
2484 attrs[key] = value
2478
2485
2479 # Parse BUNDLESPEC into components. This makes client-side
2486 # Parse BUNDLESPEC into components. This makes client-side
2480 # preferences easier to specify since you can prefer a single
2487 # preferences easier to specify since you can prefer a single
2481 # component of the BUNDLESPEC.
2488 # component of the BUNDLESPEC.
2482 if key == 'BUNDLESPEC':
2489 if key == 'BUNDLESPEC':
2483 try:
2490 try:
2484 bundlespec = parsebundlespec(repo, value)
2491 bundlespec = parsebundlespec(repo, value)
2485 attrs['COMPRESSION'] = bundlespec.compression
2492 attrs['COMPRESSION'] = bundlespec.compression
2486 attrs['VERSION'] = bundlespec.version
2493 attrs['VERSION'] = bundlespec.version
2487 except error.InvalidBundleSpecification:
2494 except error.InvalidBundleSpecification:
2488 pass
2495 pass
2489 except error.UnsupportedBundleSpecification:
2496 except error.UnsupportedBundleSpecification:
2490 pass
2497 pass
2491
2498
2492 m.append(attrs)
2499 m.append(attrs)
2493
2500
2494 return m
2501 return m
2495
2502
2496 def isstreamclonespec(bundlespec):
2503 def isstreamclonespec(bundlespec):
2497 # Stream clone v1
2504 # Stream clone v1
2498 if (bundlespec.wirecompression == 'UN' and bundlespec.wireversion == 's1'):
2505 if (bundlespec.wirecompression == 'UN' and bundlespec.wireversion == 's1'):
2499 return True
2506 return True
2500
2507
2501 # Stream clone v2
2508 # Stream clone v2
2502 if (bundlespec.wirecompression == 'UN' and \
2509 if (bundlespec.wirecompression == 'UN' and \
2503 bundlespec.wireversion == '02' and \
2510 bundlespec.wireversion == '02' and \
2504 bundlespec.contentopts.get('streamv2')):
2511 bundlespec.contentopts.get('streamv2')):
2505 return True
2512 return True
2506
2513
2507 return False
2514 return False
2508
2515
2509 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2516 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2510 """Remove incompatible clone bundle manifest entries.
2517 """Remove incompatible clone bundle manifest entries.
2511
2518
2512 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2519 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2513 and returns a new list consisting of only the entries that this client
2520 and returns a new list consisting of only the entries that this client
2514 should be able to apply.
2521 should be able to apply.
2515
2522
2516 There is no guarantee we'll be able to apply all returned entries because
2523 There is no guarantee we'll be able to apply all returned entries because
2517 the metadata we use to filter on may be missing or wrong.
2524 the metadata we use to filter on may be missing or wrong.
2518 """
2525 """
2519 newentries = []
2526 newentries = []
2520 for entry in entries:
2527 for entry in entries:
2521 spec = entry.get('BUNDLESPEC')
2528 spec = entry.get('BUNDLESPEC')
2522 if spec:
2529 if spec:
2523 try:
2530 try:
2524 bundlespec = parsebundlespec(repo, spec, strict=True)
2531 bundlespec = parsebundlespec(repo, spec, strict=True)
2525
2532
2526 # If a stream clone was requested, filter out non-streamclone
2533 # If a stream clone was requested, filter out non-streamclone
2527 # entries.
2534 # entries.
2528 if streamclonerequested and not isstreamclonespec(bundlespec):
2535 if streamclonerequested and not isstreamclonespec(bundlespec):
2529 repo.ui.debug('filtering %s because not a stream clone\n' %
2536 repo.ui.debug('filtering %s because not a stream clone\n' %
2530 entry['URL'])
2537 entry['URL'])
2531 continue
2538 continue
2532
2539
2533 except error.InvalidBundleSpecification as e:
2540 except error.InvalidBundleSpecification as e:
2534 repo.ui.debug(stringutil.forcebytestr(e) + '\n')
2541 repo.ui.debug(stringutil.forcebytestr(e) + '\n')
2535 continue
2542 continue
2536 except error.UnsupportedBundleSpecification as e:
2543 except error.UnsupportedBundleSpecification as e:
2537 repo.ui.debug('filtering %s because unsupported bundle '
2544 repo.ui.debug('filtering %s because unsupported bundle '
2538 'spec: %s\n' % (
2545 'spec: %s\n' % (
2539 entry['URL'], stringutil.forcebytestr(e)))
2546 entry['URL'], stringutil.forcebytestr(e)))
2540 continue
2547 continue
2541 # If we don't have a spec and requested a stream clone, we don't know
2548 # If we don't have a spec and requested a stream clone, we don't know
2542 # what the entry is so don't attempt to apply it.
2549 # what the entry is so don't attempt to apply it.
2543 elif streamclonerequested:
2550 elif streamclonerequested:
2544 repo.ui.debug('filtering %s because cannot determine if a stream '
2551 repo.ui.debug('filtering %s because cannot determine if a stream '
2545 'clone bundle\n' % entry['URL'])
2552 'clone bundle\n' % entry['URL'])
2546 continue
2553 continue
2547
2554
2548 if 'REQUIRESNI' in entry and not sslutil.hassni:
2555 if 'REQUIRESNI' in entry and not sslutil.hassni:
2549 repo.ui.debug('filtering %s because SNI not supported\n' %
2556 repo.ui.debug('filtering %s because SNI not supported\n' %
2550 entry['URL'])
2557 entry['URL'])
2551 continue
2558 continue
2552
2559
2553 newentries.append(entry)
2560 newentries.append(entry)
2554
2561
2555 return newentries
2562 return newentries
2556
2563
2557 class clonebundleentry(object):
2564 class clonebundleentry(object):
2558 """Represents an item in a clone bundles manifest.
2565 """Represents an item in a clone bundles manifest.
2559
2566
2560 This rich class is needed to support sorting since sorted() in Python 3
2567 This rich class is needed to support sorting since sorted() in Python 3
2561 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2568 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2562 won't work.
2569 won't work.
2563 """
2570 """
2564
2571
2565 def __init__(self, value, prefers):
2572 def __init__(self, value, prefers):
2566 self.value = value
2573 self.value = value
2567 self.prefers = prefers
2574 self.prefers = prefers
2568
2575
2569 def _cmp(self, other):
2576 def _cmp(self, other):
2570 for prefkey, prefvalue in self.prefers:
2577 for prefkey, prefvalue in self.prefers:
2571 avalue = self.value.get(prefkey)
2578 avalue = self.value.get(prefkey)
2572 bvalue = other.value.get(prefkey)
2579 bvalue = other.value.get(prefkey)
2573
2580
2574 # Special case for b missing attribute and a matches exactly.
2581 # Special case for b missing attribute and a matches exactly.
2575 if avalue is not None and bvalue is None and avalue == prefvalue:
2582 if avalue is not None and bvalue is None and avalue == prefvalue:
2576 return -1
2583 return -1
2577
2584
2578 # Special case for a missing attribute and b matches exactly.
2585 # Special case for a missing attribute and b matches exactly.
2579 if bvalue is not None and avalue is None and bvalue == prefvalue:
2586 if bvalue is not None and avalue is None and bvalue == prefvalue:
2580 return 1
2587 return 1
2581
2588
2582 # We can't compare unless attribute present on both.
2589 # We can't compare unless attribute present on both.
2583 if avalue is None or bvalue is None:
2590 if avalue is None or bvalue is None:
2584 continue
2591 continue
2585
2592
2586 # Same values should fall back to next attribute.
2593 # Same values should fall back to next attribute.
2587 if avalue == bvalue:
2594 if avalue == bvalue:
2588 continue
2595 continue
2589
2596
2590 # Exact matches come first.
2597 # Exact matches come first.
2591 if avalue == prefvalue:
2598 if avalue == prefvalue:
2592 return -1
2599 return -1
2593 if bvalue == prefvalue:
2600 if bvalue == prefvalue:
2594 return 1
2601 return 1
2595
2602
2596 # Fall back to next attribute.
2603 # Fall back to next attribute.
2597 continue
2604 continue
2598
2605
2599 # If we got here we couldn't sort by attributes and prefers. Fall
2606 # If we got here we couldn't sort by attributes and prefers. Fall
2600 # back to index order.
2607 # back to index order.
2601 return 0
2608 return 0
2602
2609
2603 def __lt__(self, other):
2610 def __lt__(self, other):
2604 return self._cmp(other) < 0
2611 return self._cmp(other) < 0
2605
2612
2606 def __gt__(self, other):
2613 def __gt__(self, other):
2607 return self._cmp(other) > 0
2614 return self._cmp(other) > 0
2608
2615
2609 def __eq__(self, other):
2616 def __eq__(self, other):
2610 return self._cmp(other) == 0
2617 return self._cmp(other) == 0
2611
2618
2612 def __le__(self, other):
2619 def __le__(self, other):
2613 return self._cmp(other) <= 0
2620 return self._cmp(other) <= 0
2614
2621
2615 def __ge__(self, other):
2622 def __ge__(self, other):
2616 return self._cmp(other) >= 0
2623 return self._cmp(other) >= 0
2617
2624
2618 def __ne__(self, other):
2625 def __ne__(self, other):
2619 return self._cmp(other) != 0
2626 return self._cmp(other) != 0
2620
2627
2621 def sortclonebundleentries(ui, entries):
2628 def sortclonebundleentries(ui, entries):
2622 prefers = ui.configlist('ui', 'clonebundleprefers')
2629 prefers = ui.configlist('ui', 'clonebundleprefers')
2623 if not prefers:
2630 if not prefers:
2624 return list(entries)
2631 return list(entries)
2625
2632
2626 prefers = [p.split('=', 1) for p in prefers]
2633 prefers = [p.split('=', 1) for p in prefers]
2627
2634
2628 items = sorted(clonebundleentry(v, prefers) for v in entries)
2635 items = sorted(clonebundleentry(v, prefers) for v in entries)
2629 return [i.value for i in items]
2636 return [i.value for i in items]
2630
2637
2631 def trypullbundlefromurl(ui, repo, url):
2638 def trypullbundlefromurl(ui, repo, url):
2632 """Attempt to apply a bundle from a URL."""
2639 """Attempt to apply a bundle from a URL."""
2633 with repo.lock(), repo.transaction('bundleurl') as tr:
2640 with repo.lock(), repo.transaction('bundleurl') as tr:
2634 try:
2641 try:
2635 fh = urlmod.open(ui, url)
2642 fh = urlmod.open(ui, url)
2636 cg = readbundle(ui, fh, 'stream')
2643 cg = readbundle(ui, fh, 'stream')
2637
2644
2638 if isinstance(cg, streamclone.streamcloneapplier):
2645 if isinstance(cg, streamclone.streamcloneapplier):
2639 cg.apply(repo)
2646 cg.apply(repo)
2640 else:
2647 else:
2641 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2648 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2642 return True
2649 return True
2643 except urlerr.httperror as e:
2650 except urlerr.httperror as e:
2644 ui.warn(_('HTTP error fetching bundle: %s\n') %
2651 ui.warn(_('HTTP error fetching bundle: %s\n') %
2645 stringutil.forcebytestr(e))
2652 stringutil.forcebytestr(e))
2646 except urlerr.urlerror as e:
2653 except urlerr.urlerror as e:
2647 ui.warn(_('error fetching bundle: %s\n') %
2654 ui.warn(_('error fetching bundle: %s\n') %
2648 stringutil.forcebytestr(e.reason))
2655 stringutil.forcebytestr(e.reason))
2649
2656
2650 return False
2657 return False
@@ -1,1225 +1,1226 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import shutil
14 import shutil
15 import stat
15 import stat
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 nullid,
19 nullid,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 bookmarks,
23 bookmarks,
24 bundlerepo,
24 bundlerepo,
25 cacheutil,
25 cacheutil,
26 cmdutil,
26 cmdutil,
27 destutil,
27 destutil,
28 discovery,
28 discovery,
29 error,
29 error,
30 exchange,
30 exchange,
31 extensions,
31 extensions,
32 httppeer,
32 httppeer,
33 localrepo,
33 localrepo,
34 lock,
34 lock,
35 logcmdutil,
35 logcmdutil,
36 logexchange,
36 logexchange,
37 merge as mergemod,
37 merge as mergemod,
38 narrowspec,
38 narrowspec,
39 node,
39 node,
40 phases,
40 phases,
41 scmutil,
41 scmutil,
42 sshpeer,
42 sshpeer,
43 statichttprepo,
43 statichttprepo,
44 ui as uimod,
44 ui as uimod,
45 unionrepo,
45 unionrepo,
46 url,
46 url,
47 util,
47 util,
48 verify as verifymod,
48 verify as verifymod,
49 vfs as vfsmod,
49 vfs as vfsmod,
50 )
50 )
51
51
52 release = lock.release
52 release = lock.release
53
53
54 # shared features
54 # shared features
55 sharedbookmarks = 'bookmarks'
55 sharedbookmarks = 'bookmarks'
56
56
57 def _local(path):
57 def _local(path):
58 path = util.expandpath(util.urllocalpath(path))
58 path = util.expandpath(util.urllocalpath(path))
59 return (os.path.isfile(path) and bundlerepo or localrepo)
59 return (os.path.isfile(path) and bundlerepo or localrepo)
60
60
61 def addbranchrevs(lrepo, other, branches, revs):
61 def addbranchrevs(lrepo, other, branches, revs):
62 peer = other.peer() # a courtesy to callers using a localrepo for other
62 peer = other.peer() # a courtesy to callers using a localrepo for other
63 hashbranch, branches = branches
63 hashbranch, branches = branches
64 if not hashbranch and not branches:
64 if not hashbranch and not branches:
65 x = revs or None
65 x = revs or None
66 if revs:
66 if revs:
67 y = revs[0]
67 y = revs[0]
68 else:
68 else:
69 y = None
69 y = None
70 return x, y
70 return x, y
71 if revs:
71 if revs:
72 revs = list(revs)
72 revs = list(revs)
73 else:
73 else:
74 revs = []
74 revs = []
75
75
76 if not peer.capable('branchmap'):
76 if not peer.capable('branchmap'):
77 if branches:
77 if branches:
78 raise error.Abort(_("remote branch lookup not supported"))
78 raise error.Abort(_("remote branch lookup not supported"))
79 revs.append(hashbranch)
79 revs.append(hashbranch)
80 return revs, revs[0]
80 return revs, revs[0]
81
81
82 with peer.commandexecutor() as e:
82 with peer.commandexecutor() as e:
83 branchmap = e.callcommand('branchmap', {}).result()
83 branchmap = e.callcommand('branchmap', {}).result()
84
84
85 def primary(branch):
85 def primary(branch):
86 if branch == '.':
86 if branch == '.':
87 if not lrepo:
87 if not lrepo:
88 raise error.Abort(_("dirstate branch not accessible"))
88 raise error.Abort(_("dirstate branch not accessible"))
89 branch = lrepo.dirstate.branch()
89 branch = lrepo.dirstate.branch()
90 if branch in branchmap:
90 if branch in branchmap:
91 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
91 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
92 return True
92 return True
93 else:
93 else:
94 return False
94 return False
95
95
96 for branch in branches:
96 for branch in branches:
97 if not primary(branch):
97 if not primary(branch):
98 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
98 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
99 if hashbranch:
99 if hashbranch:
100 if not primary(hashbranch):
100 if not primary(hashbranch):
101 revs.append(hashbranch)
101 revs.append(hashbranch)
102 return revs, revs[0]
102 return revs, revs[0]
103
103
104 def parseurl(path, branches=None):
104 def parseurl(path, branches=None):
105 '''parse url#branch, returning (url, (branch, branches))'''
105 '''parse url#branch, returning (url, (branch, branches))'''
106
106
107 u = util.url(path)
107 u = util.url(path)
108 branch = None
108 branch = None
109 if u.fragment:
109 if u.fragment:
110 branch = u.fragment
110 branch = u.fragment
111 u.fragment = None
111 u.fragment = None
112 return bytes(u), (branch, branches or [])
112 return bytes(u), (branch, branches or [])
113
113
114 schemes = {
114 schemes = {
115 'bundle': bundlerepo,
115 'bundle': bundlerepo,
116 'union': unionrepo,
116 'union': unionrepo,
117 'file': _local,
117 'file': _local,
118 'http': httppeer,
118 'http': httppeer,
119 'https': httppeer,
119 'https': httppeer,
120 'ssh': sshpeer,
120 'ssh': sshpeer,
121 'static-http': statichttprepo,
121 'static-http': statichttprepo,
122 }
122 }
123
123
124 def _peerlookup(path):
124 def _peerlookup(path):
125 u = util.url(path)
125 u = util.url(path)
126 scheme = u.scheme or 'file'
126 scheme = u.scheme or 'file'
127 thing = schemes.get(scheme) or schemes['file']
127 thing = schemes.get(scheme) or schemes['file']
128 try:
128 try:
129 return thing(path)
129 return thing(path)
130 except TypeError:
130 except TypeError:
131 # we can't test callable(thing) because 'thing' can be an unloaded
131 # we can't test callable(thing) because 'thing' can be an unloaded
132 # module that implements __call__
132 # module that implements __call__
133 if not util.safehasattr(thing, 'instance'):
133 if not util.safehasattr(thing, 'instance'):
134 raise
134 raise
135 return thing
135 return thing
136
136
137 def islocal(repo):
137 def islocal(repo):
138 '''return true if repo (or path pointing to repo) is local'''
138 '''return true if repo (or path pointing to repo) is local'''
139 if isinstance(repo, bytes):
139 if isinstance(repo, bytes):
140 try:
140 try:
141 return _peerlookup(repo).islocal(repo)
141 return _peerlookup(repo).islocal(repo)
142 except AttributeError:
142 except AttributeError:
143 return False
143 return False
144 return repo.local()
144 return repo.local()
145
145
146 def openpath(ui, path):
146 def openpath(ui, path):
147 '''open path with open if local, url.open if remote'''
147 '''open path with open if local, url.open if remote'''
148 pathurl = util.url(path, parsequery=False, parsefragment=False)
148 pathurl = util.url(path, parsequery=False, parsefragment=False)
149 if pathurl.islocal():
149 if pathurl.islocal():
150 return util.posixfile(pathurl.localpath(), 'rb')
150 return util.posixfile(pathurl.localpath(), 'rb')
151 else:
151 else:
152 return url.open(ui, path)
152 return url.open(ui, path)
153
153
154 # a list of (ui, repo) functions called for wire peer initialization
154 # a list of (ui, repo) functions called for wire peer initialization
155 wirepeersetupfuncs = []
155 wirepeersetupfuncs = []
156
156
157 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
157 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
158 intents=None, createopts=None):
158 intents=None, createopts=None):
159 """return a repository object for the specified path"""
159 """return a repository object for the specified path"""
160 obj = _peerlookup(path).instance(ui, path, create, intents=intents,
160 obj = _peerlookup(path).instance(ui, path, create, intents=intents,
161 createopts=createopts)
161 createopts=createopts)
162 ui = getattr(obj, "ui", ui)
162 ui = getattr(obj, "ui", ui)
163 if ui.configbool('devel', 'debug.extensions'):
163 if ui.configbool('devel', 'debug.extensions'):
164 log = lambda msg, *values: ui.debug('debug.extensions: ',
164 log = lambda msg, *values: ui.debug('debug.extensions: ',
165 msg % values, label='debug.extensions')
165 msg % values, label='debug.extensions')
166 else:
166 else:
167 log = lambda *a, **kw: None
167 log = lambda *a, **kw: None
168 for f in presetupfuncs or []:
168 for f in presetupfuncs or []:
169 f(ui, obj)
169 f(ui, obj)
170 log('- executing reposetup hooks\n')
170 log('- executing reposetup hooks\n')
171 with util.timedcm('all reposetup') as allreposetupstats:
171 with util.timedcm('all reposetup') as allreposetupstats:
172 for name, module in extensions.extensions(ui):
172 for name, module in extensions.extensions(ui):
173 log(' - running reposetup for %s\n' % (name,))
173 log(' - running reposetup for %s\n' % (name,))
174 hook = getattr(module, 'reposetup', None)
174 hook = getattr(module, 'reposetup', None)
175 if hook:
175 if hook:
176 with util.timedcm('reposetup %r', name) as stats:
176 with util.timedcm('reposetup %r', name) as stats:
177 hook(ui, obj)
177 hook(ui, obj)
178 log(' > reposetup for %r took %s\n', name, stats)
178 log(' > reposetup for %r took %s\n', name, stats)
179 log('> all reposetup took %s\n', allreposetupstats)
179 log('> all reposetup took %s\n', allreposetupstats)
180 if not obj.local():
180 if not obj.local():
181 for f in wirepeersetupfuncs:
181 for f in wirepeersetupfuncs:
182 f(ui, obj)
182 f(ui, obj)
183 return obj
183 return obj
184
184
185 def repository(ui, path='', create=False, presetupfuncs=None, intents=None,
185 def repository(ui, path='', create=False, presetupfuncs=None, intents=None,
186 createopts=None):
186 createopts=None):
187 """return a repository object for the specified path"""
187 """return a repository object for the specified path"""
188 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
188 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
189 intents=intents, createopts=createopts)
189 intents=intents, createopts=createopts)
190 repo = peer.local()
190 repo = peer.local()
191 if not repo:
191 if not repo:
192 raise error.Abort(_("repository '%s' is not local") %
192 raise error.Abort(_("repository '%s' is not local") %
193 (path or peer.url()))
193 (path or peer.url()))
194 return repo.filtered('visible')
194 return repo.filtered('visible')
195
195
196 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
196 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
197 '''return a repository peer for the specified path'''
197 '''return a repository peer for the specified path'''
198 rui = remoteui(uiorrepo, opts)
198 rui = remoteui(uiorrepo, opts)
199 return _peerorrepo(rui, path, create, intents=intents,
199 return _peerorrepo(rui, path, create, intents=intents,
200 createopts=createopts).peer()
200 createopts=createopts).peer()
201
201
202 def defaultdest(source):
202 def defaultdest(source):
203 '''return default destination of clone if none is given
203 '''return default destination of clone if none is given
204
204
205 >>> defaultdest(b'foo')
205 >>> defaultdest(b'foo')
206 'foo'
206 'foo'
207 >>> defaultdest(b'/foo/bar')
207 >>> defaultdest(b'/foo/bar')
208 'bar'
208 'bar'
209 >>> defaultdest(b'/')
209 >>> defaultdest(b'/')
210 ''
210 ''
211 >>> defaultdest(b'')
211 >>> defaultdest(b'')
212 ''
212 ''
213 >>> defaultdest(b'http://example.org/')
213 >>> defaultdest(b'http://example.org/')
214 ''
214 ''
215 >>> defaultdest(b'http://example.org/foo/')
215 >>> defaultdest(b'http://example.org/foo/')
216 'foo'
216 'foo'
217 '''
217 '''
218 path = util.url(source).path
218 path = util.url(source).path
219 if not path:
219 if not path:
220 return ''
220 return ''
221 return os.path.basename(os.path.normpath(path))
221 return os.path.basename(os.path.normpath(path))
222
222
223 def sharedreposource(repo):
223 def sharedreposource(repo):
224 """Returns repository object for source repository of a shared repo.
224 """Returns repository object for source repository of a shared repo.
225
225
226 If repo is not a shared repository, returns None.
226 If repo is not a shared repository, returns None.
227 """
227 """
228 if repo.sharedpath == repo.path:
228 if repo.sharedpath == repo.path:
229 return None
229 return None
230
230
231 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
231 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
232 return repo.srcrepo
232 return repo.srcrepo
233
233
234 # the sharedpath always ends in the .hg; we want the path to the repo
234 # the sharedpath always ends in the .hg; we want the path to the repo
235 source = repo.vfs.split(repo.sharedpath)[0]
235 source = repo.vfs.split(repo.sharedpath)[0]
236 srcurl, branches = parseurl(source)
236 srcurl, branches = parseurl(source)
237 srcrepo = repository(repo.ui, srcurl)
237 srcrepo = repository(repo.ui, srcurl)
238 repo.srcrepo = srcrepo
238 repo.srcrepo = srcrepo
239 return srcrepo
239 return srcrepo
240
240
241 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
241 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
242 relative=False):
242 relative=False):
243 '''create a shared repository'''
243 '''create a shared repository'''
244
244
245 if not islocal(source):
245 if not islocal(source):
246 raise error.Abort(_('can only share local repositories'))
246 raise error.Abort(_('can only share local repositories'))
247
247
248 if not dest:
248 if not dest:
249 dest = defaultdest(source)
249 dest = defaultdest(source)
250 else:
250 else:
251 dest = ui.expandpath(dest)
251 dest = ui.expandpath(dest)
252
252
253 if isinstance(source, bytes):
253 if isinstance(source, bytes):
254 origsource = ui.expandpath(source)
254 origsource = ui.expandpath(source)
255 source, branches = parseurl(origsource)
255 source, branches = parseurl(origsource)
256 srcrepo = repository(ui, source)
256 srcrepo = repository(ui, source)
257 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
257 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
258 else:
258 else:
259 srcrepo = source.local()
259 srcrepo = source.local()
260 checkout = None
260 checkout = None
261
261
262 shareditems = set()
262 shareditems = set()
263 if bookmarks:
263 if bookmarks:
264 shareditems.add(sharedbookmarks)
264 shareditems.add(sharedbookmarks)
265
265
266 r = repository(ui, dest, create=True, createopts={
266 r = repository(ui, dest, create=True, createopts={
267 'sharedrepo': srcrepo,
267 'sharedrepo': srcrepo,
268 'sharedrelative': relative,
268 'sharedrelative': relative,
269 'shareditems': shareditems,
269 'shareditems': shareditems,
270 })
270 })
271
271
272 postshare(srcrepo, r, defaultpath=defaultpath)
272 postshare(srcrepo, r, defaultpath=defaultpath)
273 _postshareupdate(r, update, checkout=checkout)
273 _postshareupdate(r, update, checkout=checkout)
274 return r
274 return r
275
275
276 def unshare(ui, repo):
276 def unshare(ui, repo):
277 """convert a shared repository to a normal one
277 """convert a shared repository to a normal one
278
278
279 Copy the store data to the repo and remove the sharedpath data.
279 Copy the store data to the repo and remove the sharedpath data.
280
280
281 Returns a new repository object representing the unshared repository.
281 Returns a new repository object representing the unshared repository.
282
282
283 The passed repository object is not usable after this function is
283 The passed repository object is not usable after this function is
284 called.
284 called.
285 """
285 """
286
286
287 destlock = lock = None
287 destlock = lock = None
288 lock = repo.lock()
288 lock = repo.lock()
289 try:
289 try:
290 # we use locks here because if we race with commit, we
290 # we use locks here because if we race with commit, we
291 # can end up with extra data in the cloned revlogs that's
291 # can end up with extra data in the cloned revlogs that's
292 # not pointed to by changesets, thus causing verify to
292 # not pointed to by changesets, thus causing verify to
293 # fail
293 # fail
294
294
295 destlock = copystore(ui, repo, repo.path)
295 destlock = copystore(ui, repo, repo.path)
296
296
297 sharefile = repo.vfs.join('sharedpath')
297 sharefile = repo.vfs.join('sharedpath')
298 util.rename(sharefile, sharefile + '.old')
298 util.rename(sharefile, sharefile + '.old')
299
299
300 repo.requirements.discard('shared')
300 repo.requirements.discard('shared')
301 repo.requirements.discard('relshared')
301 repo.requirements.discard('relshared')
302 repo._writerequirements()
302 repo._writerequirements()
303 finally:
303 finally:
304 destlock and destlock.release()
304 destlock and destlock.release()
305 lock and lock.release()
305 lock and lock.release()
306
306
307 # Removing share changes some fundamental properties of the repo instance.
307 # Removing share changes some fundamental properties of the repo instance.
308 # So we instantiate a new repo object and operate on it rather than
308 # So we instantiate a new repo object and operate on it rather than
309 # try to keep the existing repo usable.
309 # try to keep the existing repo usable.
310 newrepo = repository(repo.baseui, repo.root, create=False)
310 newrepo = repository(repo.baseui, repo.root, create=False)
311
311
312 # TODO: figure out how to access subrepos that exist, but were previously
312 # TODO: figure out how to access subrepos that exist, but were previously
313 # removed from .hgsub
313 # removed from .hgsub
314 c = newrepo['.']
314 c = newrepo['.']
315 subs = c.substate
315 subs = c.substate
316 for s in sorted(subs):
316 for s in sorted(subs):
317 c.sub(s).unshare()
317 c.sub(s).unshare()
318
318
319 localrepo.poisonrepository(repo)
319 localrepo.poisonrepository(repo)
320
320
321 return newrepo
321 return newrepo
322
322
323 def postshare(sourcerepo, destrepo, defaultpath=None):
323 def postshare(sourcerepo, destrepo, defaultpath=None):
324 """Called after a new shared repo is created.
324 """Called after a new shared repo is created.
325
325
326 The new repo only has a requirements file and pointer to the source.
326 The new repo only has a requirements file and pointer to the source.
327 This function configures additional shared data.
327 This function configures additional shared data.
328
328
329 Extensions can wrap this function and write additional entries to
329 Extensions can wrap this function and write additional entries to
330 destrepo/.hg/shared to indicate additional pieces of data to be shared.
330 destrepo/.hg/shared to indicate additional pieces of data to be shared.
331 """
331 """
332 default = defaultpath or sourcerepo.ui.config('paths', 'default')
332 default = defaultpath or sourcerepo.ui.config('paths', 'default')
333 if default:
333 if default:
334 template = ('[paths]\n'
334 template = ('[paths]\n'
335 'default = %s\n')
335 'default = %s\n')
336 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
336 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
337
337
338 def _postshareupdate(repo, update, checkout=None):
338 def _postshareupdate(repo, update, checkout=None):
339 """Maybe perform a working directory update after a shared repo is created.
339 """Maybe perform a working directory update after a shared repo is created.
340
340
341 ``update`` can be a boolean or a revision to update to.
341 ``update`` can be a boolean or a revision to update to.
342 """
342 """
343 if not update:
343 if not update:
344 return
344 return
345
345
346 repo.ui.status(_("updating working directory\n"))
346 repo.ui.status(_("updating working directory\n"))
347 if update is not True:
347 if update is not True:
348 checkout = update
348 checkout = update
349 for test in (checkout, 'default', 'tip'):
349 for test in (checkout, 'default', 'tip'):
350 if test is None:
350 if test is None:
351 continue
351 continue
352 try:
352 try:
353 uprev = repo.lookup(test)
353 uprev = repo.lookup(test)
354 break
354 break
355 except error.RepoLookupError:
355 except error.RepoLookupError:
356 continue
356 continue
357 _update(repo, uprev)
357 _update(repo, uprev)
358
358
359 def copystore(ui, srcrepo, destpath):
359 def copystore(ui, srcrepo, destpath):
360 '''copy files from store of srcrepo in destpath
360 '''copy files from store of srcrepo in destpath
361
361
362 returns destlock
362 returns destlock
363 '''
363 '''
364 destlock = None
364 destlock = None
365 try:
365 try:
366 hardlink = None
366 hardlink = None
367 topic = _('linking') if hardlink else _('copying')
367 topic = _('linking') if hardlink else _('copying')
368 with ui.makeprogress(topic) as progress:
368 with ui.makeprogress(topic) as progress:
369 num = 0
369 num = 0
370 srcpublishing = srcrepo.publishing()
370 srcpublishing = srcrepo.publishing()
371 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
371 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
372 dstvfs = vfsmod.vfs(destpath)
372 dstvfs = vfsmod.vfs(destpath)
373 for f in srcrepo.store.copylist():
373 for f in srcrepo.store.copylist():
374 if srcpublishing and f.endswith('phaseroots'):
374 if srcpublishing and f.endswith('phaseroots'):
375 continue
375 continue
376 dstbase = os.path.dirname(f)
376 dstbase = os.path.dirname(f)
377 if dstbase and not dstvfs.exists(dstbase):
377 if dstbase and not dstvfs.exists(dstbase):
378 dstvfs.mkdir(dstbase)
378 dstvfs.mkdir(dstbase)
379 if srcvfs.exists(f):
379 if srcvfs.exists(f):
380 if f.endswith('data'):
380 if f.endswith('data'):
381 # 'dstbase' may be empty (e.g. revlog format 0)
381 # 'dstbase' may be empty (e.g. revlog format 0)
382 lockfile = os.path.join(dstbase, "lock")
382 lockfile = os.path.join(dstbase, "lock")
383 # lock to avoid premature writing to the target
383 # lock to avoid premature writing to the target
384 destlock = lock.lock(dstvfs, lockfile)
384 destlock = lock.lock(dstvfs, lockfile)
385 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
385 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
386 hardlink, progress)
386 hardlink, progress)
387 num += n
387 num += n
388 if hardlink:
388 if hardlink:
389 ui.debug("linked %d files\n" % num)
389 ui.debug("linked %d files\n" % num)
390 else:
390 else:
391 ui.debug("copied %d files\n" % num)
391 ui.debug("copied %d files\n" % num)
392 return destlock
392 return destlock
393 except: # re-raises
393 except: # re-raises
394 release(destlock)
394 release(destlock)
395 raise
395 raise
396
396
397 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
397 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
398 rev=None, update=True, stream=False):
398 rev=None, update=True, stream=False):
399 """Perform a clone using a shared repo.
399 """Perform a clone using a shared repo.
400
400
401 The store for the repository will be located at <sharepath>/.hg. The
401 The store for the repository will be located at <sharepath>/.hg. The
402 specified revisions will be cloned or pulled from "source". A shared repo
402 specified revisions will be cloned or pulled from "source". A shared repo
403 will be created at "dest" and a working copy will be created if "update" is
403 will be created at "dest" and a working copy will be created if "update" is
404 True.
404 True.
405 """
405 """
406 revs = None
406 revs = None
407 if rev:
407 if rev:
408 if not srcpeer.capable('lookup'):
408 if not srcpeer.capable('lookup'):
409 raise error.Abort(_("src repository does not support "
409 raise error.Abort(_("src repository does not support "
410 "revision lookup and so doesn't "
410 "revision lookup and so doesn't "
411 "support clone by revision"))
411 "support clone by revision"))
412
412
413 # TODO this is batchable.
413 # TODO this is batchable.
414 remoterevs = []
414 remoterevs = []
415 for r in rev:
415 for r in rev:
416 with srcpeer.commandexecutor() as e:
416 with srcpeer.commandexecutor() as e:
417 remoterevs.append(e.callcommand('lookup', {
417 remoterevs.append(e.callcommand('lookup', {
418 'key': r,
418 'key': r,
419 }).result())
419 }).result())
420 revs = remoterevs
420 revs = remoterevs
421
421
422 # Obtain a lock before checking for or cloning the pooled repo otherwise
422 # Obtain a lock before checking for or cloning the pooled repo otherwise
423 # 2 clients may race creating or populating it.
423 # 2 clients may race creating or populating it.
424 pooldir = os.path.dirname(sharepath)
424 pooldir = os.path.dirname(sharepath)
425 # lock class requires the directory to exist.
425 # lock class requires the directory to exist.
426 try:
426 try:
427 util.makedir(pooldir, False)
427 util.makedir(pooldir, False)
428 except OSError as e:
428 except OSError as e:
429 if e.errno != errno.EEXIST:
429 if e.errno != errno.EEXIST:
430 raise
430 raise
431
431
432 poolvfs = vfsmod.vfs(pooldir)
432 poolvfs = vfsmod.vfs(pooldir)
433 basename = os.path.basename(sharepath)
433 basename = os.path.basename(sharepath)
434
434
435 with lock.lock(poolvfs, '%s.lock' % basename):
435 with lock.lock(poolvfs, '%s.lock' % basename):
436 if os.path.exists(sharepath):
436 if os.path.exists(sharepath):
437 ui.status(_('(sharing from existing pooled repository %s)\n') %
437 ui.status(_('(sharing from existing pooled repository %s)\n') %
438 basename)
438 basename)
439 else:
439 else:
440 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
440 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
441 # Always use pull mode because hardlinks in share mode don't work
441 # Always use pull mode because hardlinks in share mode don't work
442 # well. Never update because working copies aren't necessary in
442 # well. Never update because working copies aren't necessary in
443 # share mode.
443 # share mode.
444 clone(ui, peeropts, source, dest=sharepath, pull=True,
444 clone(ui, peeropts, source, dest=sharepath, pull=True,
445 revs=rev, update=False, stream=stream)
445 revs=rev, update=False, stream=stream)
446
446
447 # Resolve the value to put in [paths] section for the source.
447 # Resolve the value to put in [paths] section for the source.
448 if islocal(source):
448 if islocal(source):
449 defaultpath = os.path.abspath(util.urllocalpath(source))
449 defaultpath = os.path.abspath(util.urllocalpath(source))
450 else:
450 else:
451 defaultpath = source
451 defaultpath = source
452
452
453 sharerepo = repository(ui, path=sharepath)
453 sharerepo = repository(ui, path=sharepath)
454 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
454 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
455 defaultpath=defaultpath)
455 defaultpath=defaultpath)
456
456
457 # We need to perform a pull against the dest repo to fetch bookmarks
457 # We need to perform a pull against the dest repo to fetch bookmarks
458 # and other non-store data that isn't shared by default. In the case of
458 # and other non-store data that isn't shared by default. In the case of
459 # non-existing shared repo, this means we pull from the remote twice. This
459 # non-existing shared repo, this means we pull from the remote twice. This
460 # is a bit weird. But at the time it was implemented, there wasn't an easy
460 # is a bit weird. But at the time it was implemented, there wasn't an easy
461 # way to pull just non-changegroup data.
461 # way to pull just non-changegroup data.
462 destrepo = repository(ui, path=dest)
462 destrepo = repository(ui, path=dest)
463 exchange.pull(destrepo, srcpeer, heads=revs)
463 exchange.pull(destrepo, srcpeer, heads=revs)
464
464
465 _postshareupdate(destrepo, update)
465 _postshareupdate(destrepo, update)
466
466
467 return srcpeer, peer(ui, peeropts, dest)
467 return srcpeer, peer(ui, peeropts, dest)
468
468
469 # Recomputing branch cache might be slow on big repos,
469 # Recomputing branch cache might be slow on big repos,
470 # so just copy it
470 # so just copy it
471 def _copycache(srcrepo, dstcachedir, fname):
471 def _copycache(srcrepo, dstcachedir, fname):
472 """copy a cache from srcrepo to destcachedir (if it exists)"""
472 """copy a cache from srcrepo to destcachedir (if it exists)"""
473 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
473 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
474 dstbranchcache = os.path.join(dstcachedir, fname)
474 dstbranchcache = os.path.join(dstcachedir, fname)
475 if os.path.exists(srcbranchcache):
475 if os.path.exists(srcbranchcache):
476 if not os.path.exists(dstcachedir):
476 if not os.path.exists(dstcachedir):
477 os.mkdir(dstcachedir)
477 os.mkdir(dstcachedir)
478 util.copyfile(srcbranchcache, dstbranchcache)
478 util.copyfile(srcbranchcache, dstbranchcache)
479
479
480 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
480 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
481 update=True, stream=False, branch=None, shareopts=None,
481 update=True, stream=False, branch=None, shareopts=None,
482 storeincludepats=None, storeexcludepats=None):
482 storeincludepats=None, storeexcludepats=None, depth=None):
483 """Make a copy of an existing repository.
483 """Make a copy of an existing repository.
484
484
485 Create a copy of an existing repository in a new directory. The
485 Create a copy of an existing repository in a new directory. The
486 source and destination are URLs, as passed to the repository
486 source and destination are URLs, as passed to the repository
487 function. Returns a pair of repository peers, the source and
487 function. Returns a pair of repository peers, the source and
488 newly created destination.
488 newly created destination.
489
489
490 The location of the source is added to the new repository's
490 The location of the source is added to the new repository's
491 .hg/hgrc file, as the default to be used for future pulls and
491 .hg/hgrc file, as the default to be used for future pulls and
492 pushes.
492 pushes.
493
493
494 If an exception is raised, the partly cloned/updated destination
494 If an exception is raised, the partly cloned/updated destination
495 repository will be deleted.
495 repository will be deleted.
496
496
497 Arguments:
497 Arguments:
498
498
499 source: repository object or URL
499 source: repository object or URL
500
500
501 dest: URL of destination repository to create (defaults to base
501 dest: URL of destination repository to create (defaults to base
502 name of source repository)
502 name of source repository)
503
503
504 pull: always pull from source repository, even in local case or if the
504 pull: always pull from source repository, even in local case or if the
505 server prefers streaming
505 server prefers streaming
506
506
507 stream: stream raw data uncompressed from repository (fast over
507 stream: stream raw data uncompressed from repository (fast over
508 LAN, slow over WAN)
508 LAN, slow over WAN)
509
509
510 revs: revision to clone up to (implies pull=True)
510 revs: revision to clone up to (implies pull=True)
511
511
512 update: update working directory after clone completes, if
512 update: update working directory after clone completes, if
513 destination is local repository (True means update to default rev,
513 destination is local repository (True means update to default rev,
514 anything else is treated as a revision)
514 anything else is treated as a revision)
515
515
516 branch: branches to clone
516 branch: branches to clone
517
517
518 shareopts: dict of options to control auto sharing behavior. The "pool" key
518 shareopts: dict of options to control auto sharing behavior. The "pool" key
519 activates auto sharing mode and defines the directory for stores. The
519 activates auto sharing mode and defines the directory for stores. The
520 "mode" key determines how to construct the directory name of the shared
520 "mode" key determines how to construct the directory name of the shared
521 repository. "identity" means the name is derived from the node of the first
521 repository. "identity" means the name is derived from the node of the first
522 changeset in the repository. "remote" means the name is derived from the
522 changeset in the repository. "remote" means the name is derived from the
523 remote's path/URL. Defaults to "identity."
523 remote's path/URL. Defaults to "identity."
524
524
525 storeincludepats and storeexcludepats: sets of file patterns to include and
525 storeincludepats and storeexcludepats: sets of file patterns to include and
526 exclude in the repository copy, respectively. If not defined, all files
526 exclude in the repository copy, respectively. If not defined, all files
527 will be included (a "full" clone). Otherwise a "narrow" clone containing
527 will be included (a "full" clone). Otherwise a "narrow" clone containing
528 only the requested files will be performed. If ``storeincludepats`` is not
528 only the requested files will be performed. If ``storeincludepats`` is not
529 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
529 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
530 ``path:.``. If both are empty sets, no files will be cloned.
530 ``path:.``. If both are empty sets, no files will be cloned.
531 """
531 """
532
532
533 if isinstance(source, bytes):
533 if isinstance(source, bytes):
534 origsource = ui.expandpath(source)
534 origsource = ui.expandpath(source)
535 source, branches = parseurl(origsource, branch)
535 source, branches = parseurl(origsource, branch)
536 srcpeer = peer(ui, peeropts, source)
536 srcpeer = peer(ui, peeropts, source)
537 else:
537 else:
538 srcpeer = source.peer() # in case we were called with a localrepo
538 srcpeer = source.peer() # in case we were called with a localrepo
539 branches = (None, branch or [])
539 branches = (None, branch or [])
540 origsource = source = srcpeer.url()
540 origsource = source = srcpeer.url()
541 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
541 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
542
542
543 if dest is None:
543 if dest is None:
544 dest = defaultdest(source)
544 dest = defaultdest(source)
545 if dest:
545 if dest:
546 ui.status(_("destination directory: %s\n") % dest)
546 ui.status(_("destination directory: %s\n") % dest)
547 else:
547 else:
548 dest = ui.expandpath(dest)
548 dest = ui.expandpath(dest)
549
549
550 dest = util.urllocalpath(dest)
550 dest = util.urllocalpath(dest)
551 source = util.urllocalpath(source)
551 source = util.urllocalpath(source)
552
552
553 if not dest:
553 if not dest:
554 raise error.Abort(_("empty destination path is not valid"))
554 raise error.Abort(_("empty destination path is not valid"))
555
555
556 destvfs = vfsmod.vfs(dest, expandpath=True)
556 destvfs = vfsmod.vfs(dest, expandpath=True)
557 if destvfs.lexists():
557 if destvfs.lexists():
558 if not destvfs.isdir():
558 if not destvfs.isdir():
559 raise error.Abort(_("destination '%s' already exists") % dest)
559 raise error.Abort(_("destination '%s' already exists") % dest)
560 elif destvfs.listdir():
560 elif destvfs.listdir():
561 raise error.Abort(_("destination '%s' is not empty") % dest)
561 raise error.Abort(_("destination '%s' is not empty") % dest)
562
562
563 createopts = {}
563 createopts = {}
564 narrow = False
564 narrow = False
565
565
566 if storeincludepats is not None:
566 if storeincludepats is not None:
567 narrowspec.validatepatterns(storeincludepats)
567 narrowspec.validatepatterns(storeincludepats)
568 narrow = True
568 narrow = True
569
569
570 if storeexcludepats is not None:
570 if storeexcludepats is not None:
571 narrowspec.validatepatterns(storeexcludepats)
571 narrowspec.validatepatterns(storeexcludepats)
572 narrow = True
572 narrow = True
573
573
574 if narrow:
574 if narrow:
575 # Include everything by default if only exclusion patterns defined.
575 # Include everything by default if only exclusion patterns defined.
576 if storeexcludepats and not storeincludepats:
576 if storeexcludepats and not storeincludepats:
577 storeincludepats = {'path:.'}
577 storeincludepats = {'path:.'}
578
578
579 createopts['narrowfiles'] = True
579 createopts['narrowfiles'] = True
580
580
581 if srcpeer.capable(b'lfs-serve'):
581 if srcpeer.capable(b'lfs-serve'):
582 # Repository creation honors the config if it disabled the extension, so
582 # Repository creation honors the config if it disabled the extension, so
583 # we can't just announce that lfs will be enabled. This check avoids
583 # we can't just announce that lfs will be enabled. This check avoids
584 # saying that lfs will be enabled, and then saying it's an unknown
584 # saying that lfs will be enabled, and then saying it's an unknown
585 # feature. The lfs creation option is set in either case so that a
585 # feature. The lfs creation option is set in either case so that a
586 # requirement is added. If the extension is explicitly disabled but the
586 # requirement is added. If the extension is explicitly disabled but the
587 # requirement is set, the clone aborts early, before transferring any
587 # requirement is set, the clone aborts early, before transferring any
588 # data.
588 # data.
589 createopts['lfs'] = True
589 createopts['lfs'] = True
590
590
591 if extensions.disabledext('lfs'):
591 if extensions.disabledext('lfs'):
592 ui.status(_('(remote is using large file support (lfs), but it is '
592 ui.status(_('(remote is using large file support (lfs), but it is '
593 'explicitly disabled in the local configuration)\n'))
593 'explicitly disabled in the local configuration)\n'))
594 else:
594 else:
595 ui.status(_('(remote is using large file support (lfs); lfs will '
595 ui.status(_('(remote is using large file support (lfs); lfs will '
596 'be enabled for this repository)\n'))
596 'be enabled for this repository)\n'))
597
597
598 shareopts = shareopts or {}
598 shareopts = shareopts or {}
599 sharepool = shareopts.get('pool')
599 sharepool = shareopts.get('pool')
600 sharenamemode = shareopts.get('mode')
600 sharenamemode = shareopts.get('mode')
601 if sharepool and islocal(dest):
601 if sharepool and islocal(dest):
602 sharepath = None
602 sharepath = None
603 if sharenamemode == 'identity':
603 if sharenamemode == 'identity':
604 # Resolve the name from the initial changeset in the remote
604 # Resolve the name from the initial changeset in the remote
605 # repository. This returns nullid when the remote is empty. It
605 # repository. This returns nullid when the remote is empty. It
606 # raises RepoLookupError if revision 0 is filtered or otherwise
606 # raises RepoLookupError if revision 0 is filtered or otherwise
607 # not available. If we fail to resolve, sharing is not enabled.
607 # not available. If we fail to resolve, sharing is not enabled.
608 try:
608 try:
609 with srcpeer.commandexecutor() as e:
609 with srcpeer.commandexecutor() as e:
610 rootnode = e.callcommand('lookup', {
610 rootnode = e.callcommand('lookup', {
611 'key': '0',
611 'key': '0',
612 }).result()
612 }).result()
613
613
614 if rootnode != node.nullid:
614 if rootnode != node.nullid:
615 sharepath = os.path.join(sharepool, node.hex(rootnode))
615 sharepath = os.path.join(sharepool, node.hex(rootnode))
616 else:
616 else:
617 ui.status(_('(not using pooled storage: '
617 ui.status(_('(not using pooled storage: '
618 'remote appears to be empty)\n'))
618 'remote appears to be empty)\n'))
619 except error.RepoLookupError:
619 except error.RepoLookupError:
620 ui.status(_('(not using pooled storage: '
620 ui.status(_('(not using pooled storage: '
621 'unable to resolve identity of remote)\n'))
621 'unable to resolve identity of remote)\n'))
622 elif sharenamemode == 'remote':
622 elif sharenamemode == 'remote':
623 sharepath = os.path.join(
623 sharepath = os.path.join(
624 sharepool, node.hex(hashlib.sha1(source).digest()))
624 sharepool, node.hex(hashlib.sha1(source).digest()))
625 else:
625 else:
626 raise error.Abort(_('unknown share naming mode: %s') %
626 raise error.Abort(_('unknown share naming mode: %s') %
627 sharenamemode)
627 sharenamemode)
628
628
629 # TODO this is a somewhat arbitrary restriction.
629 # TODO this is a somewhat arbitrary restriction.
630 if narrow:
630 if narrow:
631 ui.status(_('(pooled storage not supported for narrow clones)\n'))
631 ui.status(_('(pooled storage not supported for narrow clones)\n'))
632 sharepath = None
632 sharepath = None
633
633
634 if sharepath:
634 if sharepath:
635 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
635 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
636 dest, pull=pull, rev=revs, update=update,
636 dest, pull=pull, rev=revs, update=update,
637 stream=stream)
637 stream=stream)
638
638
639 srclock = destlock = cleandir = None
639 srclock = destlock = cleandir = None
640 srcrepo = srcpeer.local()
640 srcrepo = srcpeer.local()
641 try:
641 try:
642 abspath = origsource
642 abspath = origsource
643 if islocal(origsource):
643 if islocal(origsource):
644 abspath = os.path.abspath(util.urllocalpath(origsource))
644 abspath = os.path.abspath(util.urllocalpath(origsource))
645
645
646 if islocal(dest):
646 if islocal(dest):
647 cleandir = dest
647 cleandir = dest
648
648
649 copy = False
649 copy = False
650 if (srcrepo and srcrepo.cancopy() and islocal(dest)
650 if (srcrepo and srcrepo.cancopy() and islocal(dest)
651 and not phases.hassecret(srcrepo)):
651 and not phases.hassecret(srcrepo)):
652 copy = not pull and not revs
652 copy = not pull and not revs
653
653
654 # TODO this is a somewhat arbitrary restriction.
654 # TODO this is a somewhat arbitrary restriction.
655 if narrow:
655 if narrow:
656 copy = False
656 copy = False
657
657
658 if copy:
658 if copy:
659 try:
659 try:
660 # we use a lock here because if we race with commit, we
660 # we use a lock here because if we race with commit, we
661 # can end up with extra data in the cloned revlogs that's
661 # can end up with extra data in the cloned revlogs that's
662 # not pointed to by changesets, thus causing verify to
662 # not pointed to by changesets, thus causing verify to
663 # fail
663 # fail
664 srclock = srcrepo.lock(wait=False)
664 srclock = srcrepo.lock(wait=False)
665 except error.LockError:
665 except error.LockError:
666 copy = False
666 copy = False
667
667
668 if copy:
668 if copy:
669 srcrepo.hook('preoutgoing', throw=True, source='clone')
669 srcrepo.hook('preoutgoing', throw=True, source='clone')
670 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
670 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
671 if not os.path.exists(dest):
671 if not os.path.exists(dest):
672 util.makedirs(dest)
672 util.makedirs(dest)
673 else:
673 else:
674 # only clean up directories we create ourselves
674 # only clean up directories we create ourselves
675 cleandir = hgdir
675 cleandir = hgdir
676 try:
676 try:
677 destpath = hgdir
677 destpath = hgdir
678 util.makedir(destpath, notindexed=True)
678 util.makedir(destpath, notindexed=True)
679 except OSError as inst:
679 except OSError as inst:
680 if inst.errno == errno.EEXIST:
680 if inst.errno == errno.EEXIST:
681 cleandir = None
681 cleandir = None
682 raise error.Abort(_("destination '%s' already exists")
682 raise error.Abort(_("destination '%s' already exists")
683 % dest)
683 % dest)
684 raise
684 raise
685
685
686 destlock = copystore(ui, srcrepo, destpath)
686 destlock = copystore(ui, srcrepo, destpath)
687 # copy bookmarks over
687 # copy bookmarks over
688 srcbookmarks = srcrepo.vfs.join('bookmarks')
688 srcbookmarks = srcrepo.vfs.join('bookmarks')
689 dstbookmarks = os.path.join(destpath, 'bookmarks')
689 dstbookmarks = os.path.join(destpath, 'bookmarks')
690 if os.path.exists(srcbookmarks):
690 if os.path.exists(srcbookmarks):
691 util.copyfile(srcbookmarks, dstbookmarks)
691 util.copyfile(srcbookmarks, dstbookmarks)
692
692
693 dstcachedir = os.path.join(destpath, 'cache')
693 dstcachedir = os.path.join(destpath, 'cache')
694 for cache in cacheutil.cachetocopy(srcrepo):
694 for cache in cacheutil.cachetocopy(srcrepo):
695 _copycache(srcrepo, dstcachedir, cache)
695 _copycache(srcrepo, dstcachedir, cache)
696
696
697 # we need to re-init the repo after manually copying the data
697 # we need to re-init the repo after manually copying the data
698 # into it
698 # into it
699 destpeer = peer(srcrepo, peeropts, dest)
699 destpeer = peer(srcrepo, peeropts, dest)
700 srcrepo.hook('outgoing', source='clone',
700 srcrepo.hook('outgoing', source='clone',
701 node=node.hex(node.nullid))
701 node=node.hex(node.nullid))
702 else:
702 else:
703 try:
703 try:
704 # only pass ui when no srcrepo
704 # only pass ui when no srcrepo
705 destpeer = peer(srcrepo or ui, peeropts, dest, create=True,
705 destpeer = peer(srcrepo or ui, peeropts, dest, create=True,
706 createopts=createopts)
706 createopts=createopts)
707 except OSError as inst:
707 except OSError as inst:
708 if inst.errno == errno.EEXIST:
708 if inst.errno == errno.EEXIST:
709 cleandir = None
709 cleandir = None
710 raise error.Abort(_("destination '%s' already exists")
710 raise error.Abort(_("destination '%s' already exists")
711 % dest)
711 % dest)
712 raise
712 raise
713
713
714 if revs:
714 if revs:
715 if not srcpeer.capable('lookup'):
715 if not srcpeer.capable('lookup'):
716 raise error.Abort(_("src repository does not support "
716 raise error.Abort(_("src repository does not support "
717 "revision lookup and so doesn't "
717 "revision lookup and so doesn't "
718 "support clone by revision"))
718 "support clone by revision"))
719
719
720 # TODO this is batchable.
720 # TODO this is batchable.
721 remoterevs = []
721 remoterevs = []
722 for rev in revs:
722 for rev in revs:
723 with srcpeer.commandexecutor() as e:
723 with srcpeer.commandexecutor() as e:
724 remoterevs.append(e.callcommand('lookup', {
724 remoterevs.append(e.callcommand('lookup', {
725 'key': rev,
725 'key': rev,
726 }).result())
726 }).result())
727 revs = remoterevs
727 revs = remoterevs
728
728
729 checkout = revs[0]
729 checkout = revs[0]
730 else:
730 else:
731 revs = None
731 revs = None
732 local = destpeer.local()
732 local = destpeer.local()
733 if local:
733 if local:
734 if narrow:
734 if narrow:
735 with local.lock():
735 with local.lock():
736 local.setnarrowpats(storeincludepats, storeexcludepats)
736 local.setnarrowpats(storeincludepats, storeexcludepats)
737
737
738 u = util.url(abspath)
738 u = util.url(abspath)
739 defaulturl = bytes(u)
739 defaulturl = bytes(u)
740 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
740 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
741 if not stream:
741 if not stream:
742 if pull:
742 if pull:
743 stream = False
743 stream = False
744 else:
744 else:
745 stream = None
745 stream = None
746 # internal config: ui.quietbookmarkmove
746 # internal config: ui.quietbookmarkmove
747 overrides = {('ui', 'quietbookmarkmove'): True}
747 overrides = {('ui', 'quietbookmarkmove'): True}
748 with local.ui.configoverride(overrides, 'clone'):
748 with local.ui.configoverride(overrides, 'clone'):
749 exchange.pull(local, srcpeer, revs,
749 exchange.pull(local, srcpeer, revs,
750 streamclonerequested=stream,
750 streamclonerequested=stream,
751 includepats=storeincludepats,
751 includepats=storeincludepats,
752 excludepats=storeexcludepats)
752 excludepats=storeexcludepats,
753 depth=depth)
753 elif srcrepo:
754 elif srcrepo:
754 # TODO lift restriction once exchange.push() accepts narrow
755 # TODO lift restriction once exchange.push() accepts narrow
755 # push.
756 # push.
756 if narrow:
757 if narrow:
757 raise error.Abort(_('narrow clone not available for '
758 raise error.Abort(_('narrow clone not available for '
758 'remote destinations'))
759 'remote destinations'))
759
760
760 exchange.push(srcrepo, destpeer, revs=revs,
761 exchange.push(srcrepo, destpeer, revs=revs,
761 bookmarks=srcrepo._bookmarks.keys())
762 bookmarks=srcrepo._bookmarks.keys())
762 else:
763 else:
763 raise error.Abort(_("clone from remote to remote not supported")
764 raise error.Abort(_("clone from remote to remote not supported")
764 )
765 )
765
766
766 cleandir = None
767 cleandir = None
767
768
768 destrepo = destpeer.local()
769 destrepo = destpeer.local()
769 if destrepo:
770 if destrepo:
770 template = uimod.samplehgrcs['cloned']
771 template = uimod.samplehgrcs['cloned']
771 u = util.url(abspath)
772 u = util.url(abspath)
772 u.passwd = None
773 u.passwd = None
773 defaulturl = bytes(u)
774 defaulturl = bytes(u)
774 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
775 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
775 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
776 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
776
777
777 if ui.configbool('experimental', 'remotenames'):
778 if ui.configbool('experimental', 'remotenames'):
778 logexchange.pullremotenames(destrepo, srcpeer)
779 logexchange.pullremotenames(destrepo, srcpeer)
779
780
780 if update:
781 if update:
781 if update is not True:
782 if update is not True:
782 with srcpeer.commandexecutor() as e:
783 with srcpeer.commandexecutor() as e:
783 checkout = e.callcommand('lookup', {
784 checkout = e.callcommand('lookup', {
784 'key': update,
785 'key': update,
785 }).result()
786 }).result()
786
787
787 uprev = None
788 uprev = None
788 status = None
789 status = None
789 if checkout is not None:
790 if checkout is not None:
790 # Some extensions (at least hg-git and hg-subversion) have
791 # Some extensions (at least hg-git and hg-subversion) have
791 # a peer.lookup() implementation that returns a name instead
792 # a peer.lookup() implementation that returns a name instead
792 # of a nodeid. We work around it here until we've figured
793 # of a nodeid. We work around it here until we've figured
793 # out a better solution.
794 # out a better solution.
794 if len(checkout) == 20 and checkout in destrepo:
795 if len(checkout) == 20 and checkout in destrepo:
795 uprev = checkout
796 uprev = checkout
796 elif scmutil.isrevsymbol(destrepo, checkout):
797 elif scmutil.isrevsymbol(destrepo, checkout):
797 uprev = scmutil.revsymbol(destrepo, checkout).node()
798 uprev = scmutil.revsymbol(destrepo, checkout).node()
798 else:
799 else:
799 if update is not True:
800 if update is not True:
800 try:
801 try:
801 uprev = destrepo.lookup(update)
802 uprev = destrepo.lookup(update)
802 except error.RepoLookupError:
803 except error.RepoLookupError:
803 pass
804 pass
804 if uprev is None:
805 if uprev is None:
805 try:
806 try:
806 uprev = destrepo._bookmarks['@']
807 uprev = destrepo._bookmarks['@']
807 update = '@'
808 update = '@'
808 bn = destrepo[uprev].branch()
809 bn = destrepo[uprev].branch()
809 if bn == 'default':
810 if bn == 'default':
810 status = _("updating to bookmark @\n")
811 status = _("updating to bookmark @\n")
811 else:
812 else:
812 status = (_("updating to bookmark @ on branch %s\n")
813 status = (_("updating to bookmark @ on branch %s\n")
813 % bn)
814 % bn)
814 except KeyError:
815 except KeyError:
815 try:
816 try:
816 uprev = destrepo.branchtip('default')
817 uprev = destrepo.branchtip('default')
817 except error.RepoLookupError:
818 except error.RepoLookupError:
818 uprev = destrepo.lookup('tip')
819 uprev = destrepo.lookup('tip')
819 if not status:
820 if not status:
820 bn = destrepo[uprev].branch()
821 bn = destrepo[uprev].branch()
821 status = _("updating to branch %s\n") % bn
822 status = _("updating to branch %s\n") % bn
822 destrepo.ui.status(status)
823 destrepo.ui.status(status)
823 _update(destrepo, uprev)
824 _update(destrepo, uprev)
824 if update in destrepo._bookmarks:
825 if update in destrepo._bookmarks:
825 bookmarks.activate(destrepo, update)
826 bookmarks.activate(destrepo, update)
826 finally:
827 finally:
827 release(srclock, destlock)
828 release(srclock, destlock)
828 if cleandir is not None:
829 if cleandir is not None:
829 shutil.rmtree(cleandir, True)
830 shutil.rmtree(cleandir, True)
830 if srcpeer is not None:
831 if srcpeer is not None:
831 srcpeer.close()
832 srcpeer.close()
832 return srcpeer, destpeer
833 return srcpeer, destpeer
833
834
834 def _showstats(repo, stats, quietempty=False):
835 def _showstats(repo, stats, quietempty=False):
835 if quietempty and stats.isempty():
836 if quietempty and stats.isempty():
836 return
837 return
837 repo.ui.status(_("%d files updated, %d files merged, "
838 repo.ui.status(_("%d files updated, %d files merged, "
838 "%d files removed, %d files unresolved\n") % (
839 "%d files removed, %d files unresolved\n") % (
839 stats.updatedcount, stats.mergedcount,
840 stats.updatedcount, stats.mergedcount,
840 stats.removedcount, stats.unresolvedcount))
841 stats.removedcount, stats.unresolvedcount))
841
842
842 def updaterepo(repo, node, overwrite, updatecheck=None):
843 def updaterepo(repo, node, overwrite, updatecheck=None):
843 """Update the working directory to node.
844 """Update the working directory to node.
844
845
845 When overwrite is set, changes are clobbered, merged else
846 When overwrite is set, changes are clobbered, merged else
846
847
847 returns stats (see pydoc mercurial.merge.applyupdates)"""
848 returns stats (see pydoc mercurial.merge.applyupdates)"""
848 return mergemod.update(repo, node, False, overwrite,
849 return mergemod.update(repo, node, False, overwrite,
849 labels=['working copy', 'destination'],
850 labels=['working copy', 'destination'],
850 updatecheck=updatecheck)
851 updatecheck=updatecheck)
851
852
852 def update(repo, node, quietempty=False, updatecheck=None):
853 def update(repo, node, quietempty=False, updatecheck=None):
853 """update the working directory to node"""
854 """update the working directory to node"""
854 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
855 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
855 _showstats(repo, stats, quietempty)
856 _showstats(repo, stats, quietempty)
856 if stats.unresolvedcount:
857 if stats.unresolvedcount:
857 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
858 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
858 return stats.unresolvedcount > 0
859 return stats.unresolvedcount > 0
859
860
860 # naming conflict in clone()
861 # naming conflict in clone()
861 _update = update
862 _update = update
862
863
863 def clean(repo, node, show_stats=True, quietempty=False):
864 def clean(repo, node, show_stats=True, quietempty=False):
864 """forcibly switch the working directory to node, clobbering changes"""
865 """forcibly switch the working directory to node, clobbering changes"""
865 stats = updaterepo(repo, node, True)
866 stats = updaterepo(repo, node, True)
866 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
867 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
867 if show_stats:
868 if show_stats:
868 _showstats(repo, stats, quietempty)
869 _showstats(repo, stats, quietempty)
869 return stats.unresolvedcount > 0
870 return stats.unresolvedcount > 0
870
871
871 # naming conflict in updatetotally()
872 # naming conflict in updatetotally()
872 _clean = clean
873 _clean = clean
873
874
874 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
875 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
875 """Update the working directory with extra care for non-file components
876 """Update the working directory with extra care for non-file components
876
877
877 This takes care of non-file components below:
878 This takes care of non-file components below:
878
879
879 :bookmark: might be advanced or (in)activated
880 :bookmark: might be advanced or (in)activated
880
881
881 This takes arguments below:
882 This takes arguments below:
882
883
883 :checkout: to which revision the working directory is updated
884 :checkout: to which revision the working directory is updated
884 :brev: a name, which might be a bookmark to be activated after updating
885 :brev: a name, which might be a bookmark to be activated after updating
885 :clean: whether changes in the working directory can be discarded
886 :clean: whether changes in the working directory can be discarded
886 :updatecheck: how to deal with a dirty working directory
887 :updatecheck: how to deal with a dirty working directory
887
888
888 Valid values for updatecheck are (None => linear):
889 Valid values for updatecheck are (None => linear):
889
890
890 * abort: abort if the working directory is dirty
891 * abort: abort if the working directory is dirty
891 * none: don't check (merge working directory changes into destination)
892 * none: don't check (merge working directory changes into destination)
892 * linear: check that update is linear before merging working directory
893 * linear: check that update is linear before merging working directory
893 changes into destination
894 changes into destination
894 * noconflict: check that the update does not result in file merges
895 * noconflict: check that the update does not result in file merges
895
896
896 This returns whether conflict is detected at updating or not.
897 This returns whether conflict is detected at updating or not.
897 """
898 """
898 if updatecheck is None:
899 if updatecheck is None:
899 updatecheck = ui.config('commands', 'update.check')
900 updatecheck = ui.config('commands', 'update.check')
900 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
901 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
901 # If not configured, or invalid value configured
902 # If not configured, or invalid value configured
902 updatecheck = 'linear'
903 updatecheck = 'linear'
903 with repo.wlock():
904 with repo.wlock():
904 movemarkfrom = None
905 movemarkfrom = None
905 warndest = False
906 warndest = False
906 if checkout is None:
907 if checkout is None:
907 updata = destutil.destupdate(repo, clean=clean)
908 updata = destutil.destupdate(repo, clean=clean)
908 checkout, movemarkfrom, brev = updata
909 checkout, movemarkfrom, brev = updata
909 warndest = True
910 warndest = True
910
911
911 if clean:
912 if clean:
912 ret = _clean(repo, checkout)
913 ret = _clean(repo, checkout)
913 else:
914 else:
914 if updatecheck == 'abort':
915 if updatecheck == 'abort':
915 cmdutil.bailifchanged(repo, merge=False)
916 cmdutil.bailifchanged(repo, merge=False)
916 updatecheck = 'none'
917 updatecheck = 'none'
917 ret = _update(repo, checkout, updatecheck=updatecheck)
918 ret = _update(repo, checkout, updatecheck=updatecheck)
918
919
919 if not ret and movemarkfrom:
920 if not ret and movemarkfrom:
920 if movemarkfrom == repo['.'].node():
921 if movemarkfrom == repo['.'].node():
921 pass # no-op update
922 pass # no-op update
922 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
923 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
923 b = ui.label(repo._activebookmark, 'bookmarks.active')
924 b = ui.label(repo._activebookmark, 'bookmarks.active')
924 ui.status(_("updating bookmark %s\n") % b)
925 ui.status(_("updating bookmark %s\n") % b)
925 else:
926 else:
926 # this can happen with a non-linear update
927 # this can happen with a non-linear update
927 b = ui.label(repo._activebookmark, 'bookmarks')
928 b = ui.label(repo._activebookmark, 'bookmarks')
928 ui.status(_("(leaving bookmark %s)\n") % b)
929 ui.status(_("(leaving bookmark %s)\n") % b)
929 bookmarks.deactivate(repo)
930 bookmarks.deactivate(repo)
930 elif brev in repo._bookmarks:
931 elif brev in repo._bookmarks:
931 if brev != repo._activebookmark:
932 if brev != repo._activebookmark:
932 b = ui.label(brev, 'bookmarks.active')
933 b = ui.label(brev, 'bookmarks.active')
933 ui.status(_("(activating bookmark %s)\n") % b)
934 ui.status(_("(activating bookmark %s)\n") % b)
934 bookmarks.activate(repo, brev)
935 bookmarks.activate(repo, brev)
935 elif brev:
936 elif brev:
936 if repo._activebookmark:
937 if repo._activebookmark:
937 b = ui.label(repo._activebookmark, 'bookmarks')
938 b = ui.label(repo._activebookmark, 'bookmarks')
938 ui.status(_("(leaving bookmark %s)\n") % b)
939 ui.status(_("(leaving bookmark %s)\n") % b)
939 bookmarks.deactivate(repo)
940 bookmarks.deactivate(repo)
940
941
941 if warndest:
942 if warndest:
942 destutil.statusotherdests(ui, repo)
943 destutil.statusotherdests(ui, repo)
943
944
944 return ret
945 return ret
945
946
946 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
947 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
947 abort=False):
948 abort=False):
948 """Branch merge with node, resolving changes. Return true if any
949 """Branch merge with node, resolving changes. Return true if any
949 unresolved conflicts."""
950 unresolved conflicts."""
950 if not abort:
951 if not abort:
951 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
952 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
952 labels=labels)
953 labels=labels)
953 else:
954 else:
954 ms = mergemod.mergestate.read(repo)
955 ms = mergemod.mergestate.read(repo)
955 if ms.active():
956 if ms.active():
956 # there were conflicts
957 # there were conflicts
957 node = ms.localctx.hex()
958 node = ms.localctx.hex()
958 else:
959 else:
959 # there were no conficts, mergestate was not stored
960 # there were no conficts, mergestate was not stored
960 node = repo['.'].hex()
961 node = repo['.'].hex()
961
962
962 repo.ui.status(_("aborting the merge, updating back to"
963 repo.ui.status(_("aborting the merge, updating back to"
963 " %s\n") % node[:12])
964 " %s\n") % node[:12])
964 stats = mergemod.update(repo, node, branchmerge=False, force=True,
965 stats = mergemod.update(repo, node, branchmerge=False, force=True,
965 labels=labels)
966 labels=labels)
966
967
967 _showstats(repo, stats)
968 _showstats(repo, stats)
968 if stats.unresolvedcount:
969 if stats.unresolvedcount:
969 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
970 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
970 "or 'hg merge --abort' to abandon\n"))
971 "or 'hg merge --abort' to abandon\n"))
971 elif remind and not abort:
972 elif remind and not abort:
972 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
973 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
973 return stats.unresolvedcount > 0
974 return stats.unresolvedcount > 0
974
975
975 def _incoming(displaychlist, subreporecurse, ui, repo, source,
976 def _incoming(displaychlist, subreporecurse, ui, repo, source,
976 opts, buffered=False):
977 opts, buffered=False):
977 """
978 """
978 Helper for incoming / gincoming.
979 Helper for incoming / gincoming.
979 displaychlist gets called with
980 displaychlist gets called with
980 (remoterepo, incomingchangesetlist, displayer) parameters,
981 (remoterepo, incomingchangesetlist, displayer) parameters,
981 and is supposed to contain only code that can't be unified.
982 and is supposed to contain only code that can't be unified.
982 """
983 """
983 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
984 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
984 other = peer(repo, opts, source)
985 other = peer(repo, opts, source)
985 ui.status(_('comparing with %s\n') % util.hidepassword(source))
986 ui.status(_('comparing with %s\n') % util.hidepassword(source))
986 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
987 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
987
988
988 if revs:
989 if revs:
989 revs = [other.lookup(rev) for rev in revs]
990 revs = [other.lookup(rev) for rev in revs]
990 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
991 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
991 revs, opts["bundle"], opts["force"])
992 revs, opts["bundle"], opts["force"])
992 try:
993 try:
993 if not chlist:
994 if not chlist:
994 ui.status(_("no changes found\n"))
995 ui.status(_("no changes found\n"))
995 return subreporecurse()
996 return subreporecurse()
996 ui.pager('incoming')
997 ui.pager('incoming')
997 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
998 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
998 buffered=buffered)
999 buffered=buffered)
999 displaychlist(other, chlist, displayer)
1000 displaychlist(other, chlist, displayer)
1000 displayer.close()
1001 displayer.close()
1001 finally:
1002 finally:
1002 cleanupfn()
1003 cleanupfn()
1003 subreporecurse()
1004 subreporecurse()
1004 return 0 # exit code is zero since we found incoming changes
1005 return 0 # exit code is zero since we found incoming changes
1005
1006
1006 def incoming(ui, repo, source, opts):
1007 def incoming(ui, repo, source, opts):
1007 def subreporecurse():
1008 def subreporecurse():
1008 ret = 1
1009 ret = 1
1009 if opts.get('subrepos'):
1010 if opts.get('subrepos'):
1010 ctx = repo[None]
1011 ctx = repo[None]
1011 for subpath in sorted(ctx.substate):
1012 for subpath in sorted(ctx.substate):
1012 sub = ctx.sub(subpath)
1013 sub = ctx.sub(subpath)
1013 ret = min(ret, sub.incoming(ui, source, opts))
1014 ret = min(ret, sub.incoming(ui, source, opts))
1014 return ret
1015 return ret
1015
1016
1016 def display(other, chlist, displayer):
1017 def display(other, chlist, displayer):
1017 limit = logcmdutil.getlimit(opts)
1018 limit = logcmdutil.getlimit(opts)
1018 if opts.get('newest_first'):
1019 if opts.get('newest_first'):
1019 chlist.reverse()
1020 chlist.reverse()
1020 count = 0
1021 count = 0
1021 for n in chlist:
1022 for n in chlist:
1022 if limit is not None and count >= limit:
1023 if limit is not None and count >= limit:
1023 break
1024 break
1024 parents = [p for p in other.changelog.parents(n) if p != nullid]
1025 parents = [p for p in other.changelog.parents(n) if p != nullid]
1025 if opts.get('no_merges') and len(parents) == 2:
1026 if opts.get('no_merges') and len(parents) == 2:
1026 continue
1027 continue
1027 count += 1
1028 count += 1
1028 displayer.show(other[n])
1029 displayer.show(other[n])
1029 return _incoming(display, subreporecurse, ui, repo, source, opts)
1030 return _incoming(display, subreporecurse, ui, repo, source, opts)
1030
1031
1031 def _outgoing(ui, repo, dest, opts):
1032 def _outgoing(ui, repo, dest, opts):
1032 path = ui.paths.getpath(dest, default=('default-push', 'default'))
1033 path = ui.paths.getpath(dest, default=('default-push', 'default'))
1033 if not path:
1034 if not path:
1034 raise error.Abort(_('default repository not configured!'),
1035 raise error.Abort(_('default repository not configured!'),
1035 hint=_("see 'hg help config.paths'"))
1036 hint=_("see 'hg help config.paths'"))
1036 dest = path.pushloc or path.loc
1037 dest = path.pushloc or path.loc
1037 branches = path.branch, opts.get('branch') or []
1038 branches = path.branch, opts.get('branch') or []
1038
1039
1039 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
1040 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
1040 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
1041 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
1041 if revs:
1042 if revs:
1042 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1043 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1043
1044
1044 other = peer(repo, opts, dest)
1045 other = peer(repo, opts, dest)
1045 outgoing = discovery.findcommonoutgoing(repo, other, revs,
1046 outgoing = discovery.findcommonoutgoing(repo, other, revs,
1046 force=opts.get('force'))
1047 force=opts.get('force'))
1047 o = outgoing.missing
1048 o = outgoing.missing
1048 if not o:
1049 if not o:
1049 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1050 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1050 return o, other
1051 return o, other
1051
1052
1052 def outgoing(ui, repo, dest, opts):
1053 def outgoing(ui, repo, dest, opts):
1053 def recurse():
1054 def recurse():
1054 ret = 1
1055 ret = 1
1055 if opts.get('subrepos'):
1056 if opts.get('subrepos'):
1056 ctx = repo[None]
1057 ctx = repo[None]
1057 for subpath in sorted(ctx.substate):
1058 for subpath in sorted(ctx.substate):
1058 sub = ctx.sub(subpath)
1059 sub = ctx.sub(subpath)
1059 ret = min(ret, sub.outgoing(ui, dest, opts))
1060 ret = min(ret, sub.outgoing(ui, dest, opts))
1060 return ret
1061 return ret
1061
1062
1062 limit = logcmdutil.getlimit(opts)
1063 limit = logcmdutil.getlimit(opts)
1063 o, other = _outgoing(ui, repo, dest, opts)
1064 o, other = _outgoing(ui, repo, dest, opts)
1064 if not o:
1065 if not o:
1065 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1066 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1066 return recurse()
1067 return recurse()
1067
1068
1068 if opts.get('newest_first'):
1069 if opts.get('newest_first'):
1069 o.reverse()
1070 o.reverse()
1070 ui.pager('outgoing')
1071 ui.pager('outgoing')
1071 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1072 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1072 count = 0
1073 count = 0
1073 for n in o:
1074 for n in o:
1074 if limit is not None and count >= limit:
1075 if limit is not None and count >= limit:
1075 break
1076 break
1076 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1077 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1077 if opts.get('no_merges') and len(parents) == 2:
1078 if opts.get('no_merges') and len(parents) == 2:
1078 continue
1079 continue
1079 count += 1
1080 count += 1
1080 displayer.show(repo[n])
1081 displayer.show(repo[n])
1081 displayer.close()
1082 displayer.close()
1082 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1083 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1083 recurse()
1084 recurse()
1084 return 0 # exit code is zero since we found outgoing changes
1085 return 0 # exit code is zero since we found outgoing changes
1085
1086
1086 def verify(repo):
1087 def verify(repo):
1087 """verify the consistency of a repository"""
1088 """verify the consistency of a repository"""
1088 ret = verifymod.verify(repo)
1089 ret = verifymod.verify(repo)
1089
1090
1090 # Broken subrepo references in hidden csets don't seem worth worrying about,
1091 # Broken subrepo references in hidden csets don't seem worth worrying about,
1091 # since they can't be pushed/pulled, and --hidden can be used if they are a
1092 # since they can't be pushed/pulled, and --hidden can be used if they are a
1092 # concern.
1093 # concern.
1093
1094
1094 # pathto() is needed for -R case
1095 # pathto() is needed for -R case
1095 revs = repo.revs("filelog(%s)",
1096 revs = repo.revs("filelog(%s)",
1096 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1097 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1097
1098
1098 if revs:
1099 if revs:
1099 repo.ui.status(_('checking subrepo links\n'))
1100 repo.ui.status(_('checking subrepo links\n'))
1100 for rev in revs:
1101 for rev in revs:
1101 ctx = repo[rev]
1102 ctx = repo[rev]
1102 try:
1103 try:
1103 for subpath in ctx.substate:
1104 for subpath in ctx.substate:
1104 try:
1105 try:
1105 ret = (ctx.sub(subpath, allowcreate=False).verify()
1106 ret = (ctx.sub(subpath, allowcreate=False).verify()
1106 or ret)
1107 or ret)
1107 except error.RepoError as e:
1108 except error.RepoError as e:
1108 repo.ui.warn(('%d: %s\n') % (rev, e))
1109 repo.ui.warn(('%d: %s\n') % (rev, e))
1109 except Exception:
1110 except Exception:
1110 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1111 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1111 node.short(ctx.node()))
1112 node.short(ctx.node()))
1112
1113
1113 return ret
1114 return ret
1114
1115
1115 def remoteui(src, opts):
1116 def remoteui(src, opts):
1116 'build a remote ui from ui or repo and opts'
1117 'build a remote ui from ui or repo and opts'
1117 if util.safehasattr(src, 'baseui'): # looks like a repository
1118 if util.safehasattr(src, 'baseui'): # looks like a repository
1118 dst = src.baseui.copy() # drop repo-specific config
1119 dst = src.baseui.copy() # drop repo-specific config
1119 src = src.ui # copy target options from repo
1120 src = src.ui # copy target options from repo
1120 else: # assume it's a global ui object
1121 else: # assume it's a global ui object
1121 dst = src.copy() # keep all global options
1122 dst = src.copy() # keep all global options
1122
1123
1123 # copy ssh-specific options
1124 # copy ssh-specific options
1124 for o in 'ssh', 'remotecmd':
1125 for o in 'ssh', 'remotecmd':
1125 v = opts.get(o) or src.config('ui', o)
1126 v = opts.get(o) or src.config('ui', o)
1126 if v:
1127 if v:
1127 dst.setconfig("ui", o, v, 'copied')
1128 dst.setconfig("ui", o, v, 'copied')
1128
1129
1129 # copy bundle-specific options
1130 # copy bundle-specific options
1130 r = src.config('bundle', 'mainreporoot')
1131 r = src.config('bundle', 'mainreporoot')
1131 if r:
1132 if r:
1132 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1133 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1133
1134
1134 # copy selected local settings to the remote ui
1135 # copy selected local settings to the remote ui
1135 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1136 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1136 for key, val in src.configitems(sect):
1137 for key, val in src.configitems(sect):
1137 dst.setconfig(sect, key, val, 'copied')
1138 dst.setconfig(sect, key, val, 'copied')
1138 v = src.config('web', 'cacerts')
1139 v = src.config('web', 'cacerts')
1139 if v:
1140 if v:
1140 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1141 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1141
1142
1142 return dst
1143 return dst
1143
1144
1144 # Files of interest
1145 # Files of interest
1145 # Used to check if the repository has changed looking at mtime and size of
1146 # Used to check if the repository has changed looking at mtime and size of
1146 # these files.
1147 # these files.
1147 foi = [('spath', '00changelog.i'),
1148 foi = [('spath', '00changelog.i'),
1148 ('spath', 'phaseroots'), # ! phase can change content at the same size
1149 ('spath', 'phaseroots'), # ! phase can change content at the same size
1149 ('spath', 'obsstore'),
1150 ('spath', 'obsstore'),
1150 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1151 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1151 ]
1152 ]
1152
1153
1153 class cachedlocalrepo(object):
1154 class cachedlocalrepo(object):
1154 """Holds a localrepository that can be cached and reused."""
1155 """Holds a localrepository that can be cached and reused."""
1155
1156
1156 def __init__(self, repo):
1157 def __init__(self, repo):
1157 """Create a new cached repo from an existing repo.
1158 """Create a new cached repo from an existing repo.
1158
1159
1159 We assume the passed in repo was recently created. If the
1160 We assume the passed in repo was recently created. If the
1160 repo has changed between when it was created and when it was
1161 repo has changed between when it was created and when it was
1161 turned into a cache, it may not refresh properly.
1162 turned into a cache, it may not refresh properly.
1162 """
1163 """
1163 assert isinstance(repo, localrepo.localrepository)
1164 assert isinstance(repo, localrepo.localrepository)
1164 self._repo = repo
1165 self._repo = repo
1165 self._state, self.mtime = self._repostate()
1166 self._state, self.mtime = self._repostate()
1166 self._filtername = repo.filtername
1167 self._filtername = repo.filtername
1167
1168
1168 def fetch(self):
1169 def fetch(self):
1169 """Refresh (if necessary) and return a repository.
1170 """Refresh (if necessary) and return a repository.
1170
1171
1171 If the cached instance is out of date, it will be recreated
1172 If the cached instance is out of date, it will be recreated
1172 automatically and returned.
1173 automatically and returned.
1173
1174
1174 Returns a tuple of the repo and a boolean indicating whether a new
1175 Returns a tuple of the repo and a boolean indicating whether a new
1175 repo instance was created.
1176 repo instance was created.
1176 """
1177 """
1177 # We compare the mtimes and sizes of some well-known files to
1178 # We compare the mtimes and sizes of some well-known files to
1178 # determine if the repo changed. This is not precise, as mtimes
1179 # determine if the repo changed. This is not precise, as mtimes
1179 # are susceptible to clock skew and imprecise filesystems and
1180 # are susceptible to clock skew and imprecise filesystems and
1180 # file content can change while maintaining the same size.
1181 # file content can change while maintaining the same size.
1181
1182
1182 state, mtime = self._repostate()
1183 state, mtime = self._repostate()
1183 if state == self._state:
1184 if state == self._state:
1184 return self._repo, False
1185 return self._repo, False
1185
1186
1186 repo = repository(self._repo.baseui, self._repo.url())
1187 repo = repository(self._repo.baseui, self._repo.url())
1187 if self._filtername:
1188 if self._filtername:
1188 self._repo = repo.filtered(self._filtername)
1189 self._repo = repo.filtered(self._filtername)
1189 else:
1190 else:
1190 self._repo = repo.unfiltered()
1191 self._repo = repo.unfiltered()
1191 self._state = state
1192 self._state = state
1192 self.mtime = mtime
1193 self.mtime = mtime
1193
1194
1194 return self._repo, True
1195 return self._repo, True
1195
1196
1196 def _repostate(self):
1197 def _repostate(self):
1197 state = []
1198 state = []
1198 maxmtime = -1
1199 maxmtime = -1
1199 for attr, fname in foi:
1200 for attr, fname in foi:
1200 prefix = getattr(self._repo, attr)
1201 prefix = getattr(self._repo, attr)
1201 p = os.path.join(prefix, fname)
1202 p = os.path.join(prefix, fname)
1202 try:
1203 try:
1203 st = os.stat(p)
1204 st = os.stat(p)
1204 except OSError:
1205 except OSError:
1205 st = os.stat(prefix)
1206 st = os.stat(prefix)
1206 state.append((st[stat.ST_MTIME], st.st_size))
1207 state.append((st[stat.ST_MTIME], st.st_size))
1207 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1208 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1208
1209
1209 return tuple(state), maxmtime
1210 return tuple(state), maxmtime
1210
1211
1211 def copy(self):
1212 def copy(self):
1212 """Obtain a copy of this class instance.
1213 """Obtain a copy of this class instance.
1213
1214
1214 A new localrepository instance is obtained. The new instance should be
1215 A new localrepository instance is obtained. The new instance should be
1215 completely independent of the original.
1216 completely independent of the original.
1216 """
1217 """
1217 repo = repository(self._repo.baseui, self._repo.origroot)
1218 repo = repository(self._repo.baseui, self._repo.origroot)
1218 if self._filtername:
1219 if self._filtername:
1219 repo = repo.filtered(self._filtername)
1220 repo = repo.filtered(self._filtername)
1220 else:
1221 else:
1221 repo = repo.unfiltered()
1222 repo = repo.unfiltered()
1222 c = cachedlocalrepo(repo)
1223 c = cachedlocalrepo(repo)
1223 c._state = self._state
1224 c._state = self._state
1224 c.mtime = self.mtime
1225 c.mtime = self.mtime
1225 return c
1226 return c
General Comments 0
You need to be logged in to leave comments. Login now