##// END OF EJS Templates
exchange: support defining narrow file patterns for pull...
Gregory Szorc -
r39589:130e5df3 default
parent child Browse files
Show More
@@ -1,2623 +1,2644 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import hashlib
11 import hashlib
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 bin,
15 bin,
16 hex,
16 hex,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 )
19 )
20 from .thirdparty import (
20 from .thirdparty import (
21 attr,
21 attr,
22 )
22 )
23 from . import (
23 from . import (
24 bookmarks as bookmod,
24 bookmarks as bookmod,
25 bundle2,
25 bundle2,
26 changegroup,
26 changegroup,
27 discovery,
27 discovery,
28 error,
28 error,
29 lock as lockmod,
29 lock as lockmod,
30 logexchange,
30 logexchange,
31 narrowspec,
31 narrowspec,
32 obsolete,
32 obsolete,
33 phases,
33 phases,
34 pushkey,
34 pushkey,
35 pycompat,
35 pycompat,
36 repository,
36 repository,
37 scmutil,
37 scmutil,
38 sslutil,
38 sslutil,
39 streamclone,
39 streamclone,
40 url as urlmod,
40 url as urlmod,
41 util,
41 util,
42 )
42 )
43 from .utils import (
43 from .utils import (
44 stringutil,
44 stringutil,
45 )
45 )
46
46
47 urlerr = util.urlerr
47 urlerr = util.urlerr
48 urlreq = util.urlreq
48 urlreq = util.urlreq
49
49
50 _NARROWACL_SECTION = 'narrowhgacl'
50 _NARROWACL_SECTION = 'narrowhgacl'
51
51
52 # Maps bundle version human names to changegroup versions.
52 # Maps bundle version human names to changegroup versions.
53 _bundlespeccgversions = {'v1': '01',
53 _bundlespeccgversions = {'v1': '01',
54 'v2': '02',
54 'v2': '02',
55 'packed1': 's1',
55 'packed1': 's1',
56 'bundle2': '02', #legacy
56 'bundle2': '02', #legacy
57 }
57 }
58
58
59 # Maps bundle version with content opts to choose which part to bundle
59 # Maps bundle version with content opts to choose which part to bundle
60 _bundlespeccontentopts = {
60 _bundlespeccontentopts = {
61 'v1': {
61 'v1': {
62 'changegroup': True,
62 'changegroup': True,
63 'cg.version': '01',
63 'cg.version': '01',
64 'obsolescence': False,
64 'obsolescence': False,
65 'phases': False,
65 'phases': False,
66 'tagsfnodescache': False,
66 'tagsfnodescache': False,
67 'revbranchcache': False
67 'revbranchcache': False
68 },
68 },
69 'v2': {
69 'v2': {
70 'changegroup': True,
70 'changegroup': True,
71 'cg.version': '02',
71 'cg.version': '02',
72 'obsolescence': False,
72 'obsolescence': False,
73 'phases': False,
73 'phases': False,
74 'tagsfnodescache': True,
74 'tagsfnodescache': True,
75 'revbranchcache': True
75 'revbranchcache': True
76 },
76 },
77 'packed1' : {
77 'packed1' : {
78 'cg.version': 's1'
78 'cg.version': 's1'
79 }
79 }
80 }
80 }
81 _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
81 _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
82
82
83 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
83 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
84 "tagsfnodescache": False,
84 "tagsfnodescache": False,
85 "revbranchcache": False}}
85 "revbranchcache": False}}
86
86
87 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
87 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
88 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
88 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
89
89
90 @attr.s
90 @attr.s
91 class bundlespec(object):
91 class bundlespec(object):
92 compression = attr.ib()
92 compression = attr.ib()
93 wirecompression = attr.ib()
93 wirecompression = attr.ib()
94 version = attr.ib()
94 version = attr.ib()
95 wireversion = attr.ib()
95 wireversion = attr.ib()
96 params = attr.ib()
96 params = attr.ib()
97 contentopts = attr.ib()
97 contentopts = attr.ib()
98
98
99 def parsebundlespec(repo, spec, strict=True):
99 def parsebundlespec(repo, spec, strict=True):
100 """Parse a bundle string specification into parts.
100 """Parse a bundle string specification into parts.
101
101
102 Bundle specifications denote a well-defined bundle/exchange format.
102 Bundle specifications denote a well-defined bundle/exchange format.
103 The content of a given specification should not change over time in
103 The content of a given specification should not change over time in
104 order to ensure that bundles produced by a newer version of Mercurial are
104 order to ensure that bundles produced by a newer version of Mercurial are
105 readable from an older version.
105 readable from an older version.
106
106
107 The string currently has the form:
107 The string currently has the form:
108
108
109 <compression>-<type>[;<parameter0>[;<parameter1>]]
109 <compression>-<type>[;<parameter0>[;<parameter1>]]
110
110
111 Where <compression> is one of the supported compression formats
111 Where <compression> is one of the supported compression formats
112 and <type> is (currently) a version string. A ";" can follow the type and
112 and <type> is (currently) a version string. A ";" can follow the type and
113 all text afterwards is interpreted as URI encoded, ";" delimited key=value
113 all text afterwards is interpreted as URI encoded, ";" delimited key=value
114 pairs.
114 pairs.
115
115
116 If ``strict`` is True (the default) <compression> is required. Otherwise,
116 If ``strict`` is True (the default) <compression> is required. Otherwise,
117 it is optional.
117 it is optional.
118
118
119 Returns a bundlespec object of (compression, version, parameters).
119 Returns a bundlespec object of (compression, version, parameters).
120 Compression will be ``None`` if not in strict mode and a compression isn't
120 Compression will be ``None`` if not in strict mode and a compression isn't
121 defined.
121 defined.
122
122
123 An ``InvalidBundleSpecification`` is raised when the specification is
123 An ``InvalidBundleSpecification`` is raised when the specification is
124 not syntactically well formed.
124 not syntactically well formed.
125
125
126 An ``UnsupportedBundleSpecification`` is raised when the compression or
126 An ``UnsupportedBundleSpecification`` is raised when the compression or
127 bundle type/version is not recognized.
127 bundle type/version is not recognized.
128
128
129 Note: this function will likely eventually return a more complex data
129 Note: this function will likely eventually return a more complex data
130 structure, including bundle2 part information.
130 structure, including bundle2 part information.
131 """
131 """
132 def parseparams(s):
132 def parseparams(s):
133 if ';' not in s:
133 if ';' not in s:
134 return s, {}
134 return s, {}
135
135
136 params = {}
136 params = {}
137 version, paramstr = s.split(';', 1)
137 version, paramstr = s.split(';', 1)
138
138
139 for p in paramstr.split(';'):
139 for p in paramstr.split(';'):
140 if '=' not in p:
140 if '=' not in p:
141 raise error.InvalidBundleSpecification(
141 raise error.InvalidBundleSpecification(
142 _('invalid bundle specification: '
142 _('invalid bundle specification: '
143 'missing "=" in parameter: %s') % p)
143 'missing "=" in parameter: %s') % p)
144
144
145 key, value = p.split('=', 1)
145 key, value = p.split('=', 1)
146 key = urlreq.unquote(key)
146 key = urlreq.unquote(key)
147 value = urlreq.unquote(value)
147 value = urlreq.unquote(value)
148 params[key] = value
148 params[key] = value
149
149
150 return version, params
150 return version, params
151
151
152
152
153 if strict and '-' not in spec:
153 if strict and '-' not in spec:
154 raise error.InvalidBundleSpecification(
154 raise error.InvalidBundleSpecification(
155 _('invalid bundle specification; '
155 _('invalid bundle specification; '
156 'must be prefixed with compression: %s') % spec)
156 'must be prefixed with compression: %s') % spec)
157
157
158 if '-' in spec:
158 if '-' in spec:
159 compression, version = spec.split('-', 1)
159 compression, version = spec.split('-', 1)
160
160
161 if compression not in util.compengines.supportedbundlenames:
161 if compression not in util.compengines.supportedbundlenames:
162 raise error.UnsupportedBundleSpecification(
162 raise error.UnsupportedBundleSpecification(
163 _('%s compression is not supported') % compression)
163 _('%s compression is not supported') % compression)
164
164
165 version, params = parseparams(version)
165 version, params = parseparams(version)
166
166
167 if version not in _bundlespeccgversions:
167 if version not in _bundlespeccgversions:
168 raise error.UnsupportedBundleSpecification(
168 raise error.UnsupportedBundleSpecification(
169 _('%s is not a recognized bundle version') % version)
169 _('%s is not a recognized bundle version') % version)
170 else:
170 else:
171 # Value could be just the compression or just the version, in which
171 # Value could be just the compression or just the version, in which
172 # case some defaults are assumed (but only when not in strict mode).
172 # case some defaults are assumed (but only when not in strict mode).
173 assert not strict
173 assert not strict
174
174
175 spec, params = parseparams(spec)
175 spec, params = parseparams(spec)
176
176
177 if spec in util.compengines.supportedbundlenames:
177 if spec in util.compengines.supportedbundlenames:
178 compression = spec
178 compression = spec
179 version = 'v1'
179 version = 'v1'
180 # Generaldelta repos require v2.
180 # Generaldelta repos require v2.
181 if 'generaldelta' in repo.requirements:
181 if 'generaldelta' in repo.requirements:
182 version = 'v2'
182 version = 'v2'
183 # Modern compression engines require v2.
183 # Modern compression engines require v2.
184 if compression not in _bundlespecv1compengines:
184 if compression not in _bundlespecv1compengines:
185 version = 'v2'
185 version = 'v2'
186 elif spec in _bundlespeccgversions:
186 elif spec in _bundlespeccgversions:
187 if spec == 'packed1':
187 if spec == 'packed1':
188 compression = 'none'
188 compression = 'none'
189 else:
189 else:
190 compression = 'bzip2'
190 compression = 'bzip2'
191 version = spec
191 version = spec
192 else:
192 else:
193 raise error.UnsupportedBundleSpecification(
193 raise error.UnsupportedBundleSpecification(
194 _('%s is not a recognized bundle specification') % spec)
194 _('%s is not a recognized bundle specification') % spec)
195
195
196 # Bundle version 1 only supports a known set of compression engines.
196 # Bundle version 1 only supports a known set of compression engines.
197 if version == 'v1' and compression not in _bundlespecv1compengines:
197 if version == 'v1' and compression not in _bundlespecv1compengines:
198 raise error.UnsupportedBundleSpecification(
198 raise error.UnsupportedBundleSpecification(
199 _('compression engine %s is not supported on v1 bundles') %
199 _('compression engine %s is not supported on v1 bundles') %
200 compression)
200 compression)
201
201
202 # The specification for packed1 can optionally declare the data formats
202 # The specification for packed1 can optionally declare the data formats
203 # required to apply it. If we see this metadata, compare against what the
203 # required to apply it. If we see this metadata, compare against what the
204 # repo supports and error if the bundle isn't compatible.
204 # repo supports and error if the bundle isn't compatible.
205 if version == 'packed1' and 'requirements' in params:
205 if version == 'packed1' and 'requirements' in params:
206 requirements = set(params['requirements'].split(','))
206 requirements = set(params['requirements'].split(','))
207 missingreqs = requirements - repo.supportedformats
207 missingreqs = requirements - repo.supportedformats
208 if missingreqs:
208 if missingreqs:
209 raise error.UnsupportedBundleSpecification(
209 raise error.UnsupportedBundleSpecification(
210 _('missing support for repository features: %s') %
210 _('missing support for repository features: %s') %
211 ', '.join(sorted(missingreqs)))
211 ', '.join(sorted(missingreqs)))
212
212
213 # Compute contentopts based on the version
213 # Compute contentopts based on the version
214 contentopts = _bundlespeccontentopts.get(version, {}).copy()
214 contentopts = _bundlespeccontentopts.get(version, {}).copy()
215
215
216 # Process the variants
216 # Process the variants
217 if "stream" in params and params["stream"] == "v2":
217 if "stream" in params and params["stream"] == "v2":
218 variant = _bundlespecvariants["streamv2"]
218 variant = _bundlespecvariants["streamv2"]
219 contentopts.update(variant)
219 contentopts.update(variant)
220
220
221 engine = util.compengines.forbundlename(compression)
221 engine = util.compengines.forbundlename(compression)
222 compression, wirecompression = engine.bundletype()
222 compression, wirecompression = engine.bundletype()
223 wireversion = _bundlespeccgversions[version]
223 wireversion = _bundlespeccgversions[version]
224
224
225 return bundlespec(compression, wirecompression, version, wireversion,
225 return bundlespec(compression, wirecompression, version, wireversion,
226 params, contentopts)
226 params, contentopts)
227
227
228 def readbundle(ui, fh, fname, vfs=None):
228 def readbundle(ui, fh, fname, vfs=None):
229 header = changegroup.readexactly(fh, 4)
229 header = changegroup.readexactly(fh, 4)
230
230
231 alg = None
231 alg = None
232 if not fname:
232 if not fname:
233 fname = "stream"
233 fname = "stream"
234 if not header.startswith('HG') and header.startswith('\0'):
234 if not header.startswith('HG') and header.startswith('\0'):
235 fh = changegroup.headerlessfixup(fh, header)
235 fh = changegroup.headerlessfixup(fh, header)
236 header = "HG10"
236 header = "HG10"
237 alg = 'UN'
237 alg = 'UN'
238 elif vfs:
238 elif vfs:
239 fname = vfs.join(fname)
239 fname = vfs.join(fname)
240
240
241 magic, version = header[0:2], header[2:4]
241 magic, version = header[0:2], header[2:4]
242
242
243 if magic != 'HG':
243 if magic != 'HG':
244 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
244 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
245 if version == '10':
245 if version == '10':
246 if alg is None:
246 if alg is None:
247 alg = changegroup.readexactly(fh, 2)
247 alg = changegroup.readexactly(fh, 2)
248 return changegroup.cg1unpacker(fh, alg)
248 return changegroup.cg1unpacker(fh, alg)
249 elif version.startswith('2'):
249 elif version.startswith('2'):
250 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
250 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
251 elif version == 'S1':
251 elif version == 'S1':
252 return streamclone.streamcloneapplier(fh)
252 return streamclone.streamcloneapplier(fh)
253 else:
253 else:
254 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
254 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
255
255
256 def getbundlespec(ui, fh):
256 def getbundlespec(ui, fh):
257 """Infer the bundlespec from a bundle file handle.
257 """Infer the bundlespec from a bundle file handle.
258
258
259 The input file handle is seeked and the original seek position is not
259 The input file handle is seeked and the original seek position is not
260 restored.
260 restored.
261 """
261 """
262 def speccompression(alg):
262 def speccompression(alg):
263 try:
263 try:
264 return util.compengines.forbundletype(alg).bundletype()[0]
264 return util.compengines.forbundletype(alg).bundletype()[0]
265 except KeyError:
265 except KeyError:
266 return None
266 return None
267
267
268 b = readbundle(ui, fh, None)
268 b = readbundle(ui, fh, None)
269 if isinstance(b, changegroup.cg1unpacker):
269 if isinstance(b, changegroup.cg1unpacker):
270 alg = b._type
270 alg = b._type
271 if alg == '_truncatedBZ':
271 if alg == '_truncatedBZ':
272 alg = 'BZ'
272 alg = 'BZ'
273 comp = speccompression(alg)
273 comp = speccompression(alg)
274 if not comp:
274 if not comp:
275 raise error.Abort(_('unknown compression algorithm: %s') % alg)
275 raise error.Abort(_('unknown compression algorithm: %s') % alg)
276 return '%s-v1' % comp
276 return '%s-v1' % comp
277 elif isinstance(b, bundle2.unbundle20):
277 elif isinstance(b, bundle2.unbundle20):
278 if 'Compression' in b.params:
278 if 'Compression' in b.params:
279 comp = speccompression(b.params['Compression'])
279 comp = speccompression(b.params['Compression'])
280 if not comp:
280 if not comp:
281 raise error.Abort(_('unknown compression algorithm: %s') % comp)
281 raise error.Abort(_('unknown compression algorithm: %s') % comp)
282 else:
282 else:
283 comp = 'none'
283 comp = 'none'
284
284
285 version = None
285 version = None
286 for part in b.iterparts():
286 for part in b.iterparts():
287 if part.type == 'changegroup':
287 if part.type == 'changegroup':
288 version = part.params['version']
288 version = part.params['version']
289 if version in ('01', '02'):
289 if version in ('01', '02'):
290 version = 'v2'
290 version = 'v2'
291 else:
291 else:
292 raise error.Abort(_('changegroup version %s does not have '
292 raise error.Abort(_('changegroup version %s does not have '
293 'a known bundlespec') % version,
293 'a known bundlespec') % version,
294 hint=_('try upgrading your Mercurial '
294 hint=_('try upgrading your Mercurial '
295 'client'))
295 'client'))
296 elif part.type == 'stream2' and version is None:
296 elif part.type == 'stream2' and version is None:
297 # A stream2 part requires to be part of a v2 bundle
297 # A stream2 part requires to be part of a v2 bundle
298 version = "v2"
298 version = "v2"
299 requirements = urlreq.unquote(part.params['requirements'])
299 requirements = urlreq.unquote(part.params['requirements'])
300 splitted = requirements.split()
300 splitted = requirements.split()
301 params = bundle2._formatrequirementsparams(splitted)
301 params = bundle2._formatrequirementsparams(splitted)
302 return 'none-v2;stream=v2;%s' % params
302 return 'none-v2;stream=v2;%s' % params
303
303
304 if not version:
304 if not version:
305 raise error.Abort(_('could not identify changegroup version in '
305 raise error.Abort(_('could not identify changegroup version in '
306 'bundle'))
306 'bundle'))
307
307
308 return '%s-%s' % (comp, version)
308 return '%s-%s' % (comp, version)
309 elif isinstance(b, streamclone.streamcloneapplier):
309 elif isinstance(b, streamclone.streamcloneapplier):
310 requirements = streamclone.readbundle1header(fh)[2]
310 requirements = streamclone.readbundle1header(fh)[2]
311 formatted = bundle2._formatrequirementsparams(requirements)
311 formatted = bundle2._formatrequirementsparams(requirements)
312 return 'none-packed1;%s' % formatted
312 return 'none-packed1;%s' % formatted
313 else:
313 else:
314 raise error.Abort(_('unknown bundle type: %s') % b)
314 raise error.Abort(_('unknown bundle type: %s') % b)
315
315
316 def _computeoutgoing(repo, heads, common):
316 def _computeoutgoing(repo, heads, common):
317 """Computes which revs are outgoing given a set of common
317 """Computes which revs are outgoing given a set of common
318 and a set of heads.
318 and a set of heads.
319
319
320 This is a separate function so extensions can have access to
320 This is a separate function so extensions can have access to
321 the logic.
321 the logic.
322
322
323 Returns a discovery.outgoing object.
323 Returns a discovery.outgoing object.
324 """
324 """
325 cl = repo.changelog
325 cl = repo.changelog
326 if common:
326 if common:
327 hasnode = cl.hasnode
327 hasnode = cl.hasnode
328 common = [n for n in common if hasnode(n)]
328 common = [n for n in common if hasnode(n)]
329 else:
329 else:
330 common = [nullid]
330 common = [nullid]
331 if not heads:
331 if not heads:
332 heads = cl.heads()
332 heads = cl.heads()
333 return discovery.outgoing(repo, common, heads)
333 return discovery.outgoing(repo, common, heads)
334
334
335 def _forcebundle1(op):
335 def _forcebundle1(op):
336 """return true if a pull/push must use bundle1
336 """return true if a pull/push must use bundle1
337
337
338 This function is used to allow testing of the older bundle version"""
338 This function is used to allow testing of the older bundle version"""
339 ui = op.repo.ui
339 ui = op.repo.ui
340 # The goal is this config is to allow developer to choose the bundle
340 # The goal is this config is to allow developer to choose the bundle
341 # version used during exchanged. This is especially handy during test.
341 # version used during exchanged. This is especially handy during test.
342 # Value is a list of bundle version to be picked from, highest version
342 # Value is a list of bundle version to be picked from, highest version
343 # should be used.
343 # should be used.
344 #
344 #
345 # developer config: devel.legacy.exchange
345 # developer config: devel.legacy.exchange
346 exchange = ui.configlist('devel', 'legacy.exchange')
346 exchange = ui.configlist('devel', 'legacy.exchange')
347 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
347 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
348 return forcebundle1 or not op.remote.capable('bundle2')
348 return forcebundle1 or not op.remote.capable('bundle2')
349
349
350 class pushoperation(object):
350 class pushoperation(object):
351 """A object that represent a single push operation
351 """A object that represent a single push operation
352
352
353 Its purpose is to carry push related state and very common operations.
353 Its purpose is to carry push related state and very common operations.
354
354
355 A new pushoperation should be created at the beginning of each push and
355 A new pushoperation should be created at the beginning of each push and
356 discarded afterward.
356 discarded afterward.
357 """
357 """
358
358
359 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
359 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
360 bookmarks=(), pushvars=None):
360 bookmarks=(), pushvars=None):
361 # repo we push from
361 # repo we push from
362 self.repo = repo
362 self.repo = repo
363 self.ui = repo.ui
363 self.ui = repo.ui
364 # repo we push to
364 # repo we push to
365 self.remote = remote
365 self.remote = remote
366 # force option provided
366 # force option provided
367 self.force = force
367 self.force = force
368 # revs to be pushed (None is "all")
368 # revs to be pushed (None is "all")
369 self.revs = revs
369 self.revs = revs
370 # bookmark explicitly pushed
370 # bookmark explicitly pushed
371 self.bookmarks = bookmarks
371 self.bookmarks = bookmarks
372 # allow push of new branch
372 # allow push of new branch
373 self.newbranch = newbranch
373 self.newbranch = newbranch
374 # step already performed
374 # step already performed
375 # (used to check what steps have been already performed through bundle2)
375 # (used to check what steps have been already performed through bundle2)
376 self.stepsdone = set()
376 self.stepsdone = set()
377 # Integer version of the changegroup push result
377 # Integer version of the changegroup push result
378 # - None means nothing to push
378 # - None means nothing to push
379 # - 0 means HTTP error
379 # - 0 means HTTP error
380 # - 1 means we pushed and remote head count is unchanged *or*
380 # - 1 means we pushed and remote head count is unchanged *or*
381 # we have outgoing changesets but refused to push
381 # we have outgoing changesets but refused to push
382 # - other values as described by addchangegroup()
382 # - other values as described by addchangegroup()
383 self.cgresult = None
383 self.cgresult = None
384 # Boolean value for the bookmark push
384 # Boolean value for the bookmark push
385 self.bkresult = None
385 self.bkresult = None
386 # discover.outgoing object (contains common and outgoing data)
386 # discover.outgoing object (contains common and outgoing data)
387 self.outgoing = None
387 self.outgoing = None
388 # all remote topological heads before the push
388 # all remote topological heads before the push
389 self.remoteheads = None
389 self.remoteheads = None
390 # Details of the remote branch pre and post push
390 # Details of the remote branch pre and post push
391 #
391 #
392 # mapping: {'branch': ([remoteheads],
392 # mapping: {'branch': ([remoteheads],
393 # [newheads],
393 # [newheads],
394 # [unsyncedheads],
394 # [unsyncedheads],
395 # [discardedheads])}
395 # [discardedheads])}
396 # - branch: the branch name
396 # - branch: the branch name
397 # - remoteheads: the list of remote heads known locally
397 # - remoteheads: the list of remote heads known locally
398 # None if the branch is new
398 # None if the branch is new
399 # - newheads: the new remote heads (known locally) with outgoing pushed
399 # - newheads: the new remote heads (known locally) with outgoing pushed
400 # - unsyncedheads: the list of remote heads unknown locally.
400 # - unsyncedheads: the list of remote heads unknown locally.
401 # - discardedheads: the list of remote heads made obsolete by the push
401 # - discardedheads: the list of remote heads made obsolete by the push
402 self.pushbranchmap = None
402 self.pushbranchmap = None
403 # testable as a boolean indicating if any nodes are missing locally.
403 # testable as a boolean indicating if any nodes are missing locally.
404 self.incoming = None
404 self.incoming = None
405 # summary of the remote phase situation
405 # summary of the remote phase situation
406 self.remotephases = None
406 self.remotephases = None
407 # phases changes that must be pushed along side the changesets
407 # phases changes that must be pushed along side the changesets
408 self.outdatedphases = None
408 self.outdatedphases = None
409 # phases changes that must be pushed if changeset push fails
409 # phases changes that must be pushed if changeset push fails
410 self.fallbackoutdatedphases = None
410 self.fallbackoutdatedphases = None
411 # outgoing obsmarkers
411 # outgoing obsmarkers
412 self.outobsmarkers = set()
412 self.outobsmarkers = set()
413 # outgoing bookmarks
413 # outgoing bookmarks
414 self.outbookmarks = []
414 self.outbookmarks = []
415 # transaction manager
415 # transaction manager
416 self.trmanager = None
416 self.trmanager = None
417 # map { pushkey partid -> callback handling failure}
417 # map { pushkey partid -> callback handling failure}
418 # used to handle exception from mandatory pushkey part failure
418 # used to handle exception from mandatory pushkey part failure
419 self.pkfailcb = {}
419 self.pkfailcb = {}
420 # an iterable of pushvars or None
420 # an iterable of pushvars or None
421 self.pushvars = pushvars
421 self.pushvars = pushvars
422
422
423 @util.propertycache
423 @util.propertycache
424 def futureheads(self):
424 def futureheads(self):
425 """future remote heads if the changeset push succeeds"""
425 """future remote heads if the changeset push succeeds"""
426 return self.outgoing.missingheads
426 return self.outgoing.missingheads
427
427
428 @util.propertycache
428 @util.propertycache
429 def fallbackheads(self):
429 def fallbackheads(self):
430 """future remote heads if the changeset push fails"""
430 """future remote heads if the changeset push fails"""
431 if self.revs is None:
431 if self.revs is None:
432 # not target to push, all common are relevant
432 # not target to push, all common are relevant
433 return self.outgoing.commonheads
433 return self.outgoing.commonheads
434 unfi = self.repo.unfiltered()
434 unfi = self.repo.unfiltered()
435 # I want cheads = heads(::missingheads and ::commonheads)
435 # I want cheads = heads(::missingheads and ::commonheads)
436 # (missingheads is revs with secret changeset filtered out)
436 # (missingheads is revs with secret changeset filtered out)
437 #
437 #
438 # This can be expressed as:
438 # This can be expressed as:
439 # cheads = ( (missingheads and ::commonheads)
439 # cheads = ( (missingheads and ::commonheads)
440 # + (commonheads and ::missingheads))"
440 # + (commonheads and ::missingheads))"
441 # )
441 # )
442 #
442 #
443 # while trying to push we already computed the following:
443 # while trying to push we already computed the following:
444 # common = (::commonheads)
444 # common = (::commonheads)
445 # missing = ((commonheads::missingheads) - commonheads)
445 # missing = ((commonheads::missingheads) - commonheads)
446 #
446 #
447 # We can pick:
447 # We can pick:
448 # * missingheads part of common (::commonheads)
448 # * missingheads part of common (::commonheads)
449 common = self.outgoing.common
449 common = self.outgoing.common
450 nm = self.repo.changelog.nodemap
450 nm = self.repo.changelog.nodemap
451 cheads = [node for node in self.revs if nm[node] in common]
451 cheads = [node for node in self.revs if nm[node] in common]
452 # and
452 # and
453 # * commonheads parents on missing
453 # * commonheads parents on missing
454 revset = unfi.set('%ln and parents(roots(%ln))',
454 revset = unfi.set('%ln and parents(roots(%ln))',
455 self.outgoing.commonheads,
455 self.outgoing.commonheads,
456 self.outgoing.missing)
456 self.outgoing.missing)
457 cheads.extend(c.node() for c in revset)
457 cheads.extend(c.node() for c in revset)
458 return cheads
458 return cheads
459
459
460 @property
460 @property
461 def commonheads(self):
461 def commonheads(self):
462 """set of all common heads after changeset bundle push"""
462 """set of all common heads after changeset bundle push"""
463 if self.cgresult:
463 if self.cgresult:
464 return self.futureheads
464 return self.futureheads
465 else:
465 else:
466 return self.fallbackheads
466 return self.fallbackheads
467
467
468 # mapping of message used when pushing bookmark
468 # mapping of message used when pushing bookmark
469 bookmsgmap = {'update': (_("updating bookmark %s\n"),
469 bookmsgmap = {'update': (_("updating bookmark %s\n"),
470 _('updating bookmark %s failed!\n')),
470 _('updating bookmark %s failed!\n')),
471 'export': (_("exporting bookmark %s\n"),
471 'export': (_("exporting bookmark %s\n"),
472 _('exporting bookmark %s failed!\n')),
472 _('exporting bookmark %s failed!\n')),
473 'delete': (_("deleting remote bookmark %s\n"),
473 'delete': (_("deleting remote bookmark %s\n"),
474 _('deleting remote bookmark %s failed!\n')),
474 _('deleting remote bookmark %s failed!\n')),
475 }
475 }
476
476
477
477
478 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
478 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
479 opargs=None):
479 opargs=None):
480 '''Push outgoing changesets (limited by revs) from a local
480 '''Push outgoing changesets (limited by revs) from a local
481 repository to remote. Return an integer:
481 repository to remote. Return an integer:
482 - None means nothing to push
482 - None means nothing to push
483 - 0 means HTTP error
483 - 0 means HTTP error
484 - 1 means we pushed and remote head count is unchanged *or*
484 - 1 means we pushed and remote head count is unchanged *or*
485 we have outgoing changesets but refused to push
485 we have outgoing changesets but refused to push
486 - other values as described by addchangegroup()
486 - other values as described by addchangegroup()
487 '''
487 '''
488 if opargs is None:
488 if opargs is None:
489 opargs = {}
489 opargs = {}
490 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
490 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
491 **pycompat.strkwargs(opargs))
491 **pycompat.strkwargs(opargs))
492 if pushop.remote.local():
492 if pushop.remote.local():
493 missing = (set(pushop.repo.requirements)
493 missing = (set(pushop.repo.requirements)
494 - pushop.remote.local().supported)
494 - pushop.remote.local().supported)
495 if missing:
495 if missing:
496 msg = _("required features are not"
496 msg = _("required features are not"
497 " supported in the destination:"
497 " supported in the destination:"
498 " %s") % (', '.join(sorted(missing)))
498 " %s") % (', '.join(sorted(missing)))
499 raise error.Abort(msg)
499 raise error.Abort(msg)
500
500
501 if not pushop.remote.canpush():
501 if not pushop.remote.canpush():
502 raise error.Abort(_("destination does not support push"))
502 raise error.Abort(_("destination does not support push"))
503
503
504 if not pushop.remote.capable('unbundle'):
504 if not pushop.remote.capable('unbundle'):
505 raise error.Abort(_('cannot push: destination does not support the '
505 raise error.Abort(_('cannot push: destination does not support the '
506 'unbundle wire protocol command'))
506 'unbundle wire protocol command'))
507
507
508 # get lock as we might write phase data
508 # get lock as we might write phase data
509 wlock = lock = None
509 wlock = lock = None
510 try:
510 try:
511 # bundle2 push may receive a reply bundle touching bookmarks or other
511 # bundle2 push may receive a reply bundle touching bookmarks or other
512 # things requiring the wlock. Take it now to ensure proper ordering.
512 # things requiring the wlock. Take it now to ensure proper ordering.
513 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
513 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
514 if (not _forcebundle1(pushop)) and maypushback:
514 if (not _forcebundle1(pushop)) and maypushback:
515 wlock = pushop.repo.wlock()
515 wlock = pushop.repo.wlock()
516 lock = pushop.repo.lock()
516 lock = pushop.repo.lock()
517 pushop.trmanager = transactionmanager(pushop.repo,
517 pushop.trmanager = transactionmanager(pushop.repo,
518 'push-response',
518 'push-response',
519 pushop.remote.url())
519 pushop.remote.url())
520 except error.LockUnavailable as err:
520 except error.LockUnavailable as err:
521 # source repo cannot be locked.
521 # source repo cannot be locked.
522 # We do not abort the push, but just disable the local phase
522 # We do not abort the push, but just disable the local phase
523 # synchronisation.
523 # synchronisation.
524 msg = 'cannot lock source repository: %s\n' % err
524 msg = 'cannot lock source repository: %s\n' % err
525 pushop.ui.debug(msg)
525 pushop.ui.debug(msg)
526
526
527 with wlock or util.nullcontextmanager(), \
527 with wlock or util.nullcontextmanager(), \
528 lock or util.nullcontextmanager(), \
528 lock or util.nullcontextmanager(), \
529 pushop.trmanager or util.nullcontextmanager():
529 pushop.trmanager or util.nullcontextmanager():
530 pushop.repo.checkpush(pushop)
530 pushop.repo.checkpush(pushop)
531 _pushdiscovery(pushop)
531 _pushdiscovery(pushop)
532 if not _forcebundle1(pushop):
532 if not _forcebundle1(pushop):
533 _pushbundle2(pushop)
533 _pushbundle2(pushop)
534 _pushchangeset(pushop)
534 _pushchangeset(pushop)
535 _pushsyncphase(pushop)
535 _pushsyncphase(pushop)
536 _pushobsolete(pushop)
536 _pushobsolete(pushop)
537 _pushbookmark(pushop)
537 _pushbookmark(pushop)
538
538
539 if repo.ui.configbool('experimental', 'remotenames'):
539 if repo.ui.configbool('experimental', 'remotenames'):
540 logexchange.pullremotenames(repo, remote)
540 logexchange.pullremotenames(repo, remote)
541
541
542 return pushop
542 return pushop
543
543
544 # list of steps to perform discovery before push
544 # list of steps to perform discovery before push
545 pushdiscoveryorder = []
545 pushdiscoveryorder = []
546
546
547 # Mapping between step name and function
547 # Mapping between step name and function
548 #
548 #
549 # This exists to help extensions wrap steps if necessary
549 # This exists to help extensions wrap steps if necessary
550 pushdiscoverymapping = {}
550 pushdiscoverymapping = {}
551
551
552 def pushdiscovery(stepname):
552 def pushdiscovery(stepname):
553 """decorator for function performing discovery before push
553 """decorator for function performing discovery before push
554
554
555 The function is added to the step -> function mapping and appended to the
555 The function is added to the step -> function mapping and appended to the
556 list of steps. Beware that decorated function will be added in order (this
556 list of steps. Beware that decorated function will be added in order (this
557 may matter).
557 may matter).
558
558
559 You can only use this decorator for a new step, if you want to wrap a step
559 You can only use this decorator for a new step, if you want to wrap a step
560 from an extension, change the pushdiscovery dictionary directly."""
560 from an extension, change the pushdiscovery dictionary directly."""
561 def dec(func):
561 def dec(func):
562 assert stepname not in pushdiscoverymapping
562 assert stepname not in pushdiscoverymapping
563 pushdiscoverymapping[stepname] = func
563 pushdiscoverymapping[stepname] = func
564 pushdiscoveryorder.append(stepname)
564 pushdiscoveryorder.append(stepname)
565 return func
565 return func
566 return dec
566 return dec
567
567
568 def _pushdiscovery(pushop):
568 def _pushdiscovery(pushop):
569 """Run all discovery steps"""
569 """Run all discovery steps"""
570 for stepname in pushdiscoveryorder:
570 for stepname in pushdiscoveryorder:
571 step = pushdiscoverymapping[stepname]
571 step = pushdiscoverymapping[stepname]
572 step(pushop)
572 step(pushop)
573
573
574 @pushdiscovery('changeset')
574 @pushdiscovery('changeset')
575 def _pushdiscoverychangeset(pushop):
575 def _pushdiscoverychangeset(pushop):
576 """discover the changeset that need to be pushed"""
576 """discover the changeset that need to be pushed"""
577 fci = discovery.findcommonincoming
577 fci = discovery.findcommonincoming
578 if pushop.revs:
578 if pushop.revs:
579 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
579 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
580 ancestorsof=pushop.revs)
580 ancestorsof=pushop.revs)
581 else:
581 else:
582 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
582 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
583 common, inc, remoteheads = commoninc
583 common, inc, remoteheads = commoninc
584 fco = discovery.findcommonoutgoing
584 fco = discovery.findcommonoutgoing
585 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
585 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
586 commoninc=commoninc, force=pushop.force)
586 commoninc=commoninc, force=pushop.force)
587 pushop.outgoing = outgoing
587 pushop.outgoing = outgoing
588 pushop.remoteheads = remoteheads
588 pushop.remoteheads = remoteheads
589 pushop.incoming = inc
589 pushop.incoming = inc
590
590
591 @pushdiscovery('phase')
591 @pushdiscovery('phase')
592 def _pushdiscoveryphase(pushop):
592 def _pushdiscoveryphase(pushop):
593 """discover the phase that needs to be pushed
593 """discover the phase that needs to be pushed
594
594
595 (computed for both success and failure case for changesets push)"""
595 (computed for both success and failure case for changesets push)"""
596 outgoing = pushop.outgoing
596 outgoing = pushop.outgoing
597 unfi = pushop.repo.unfiltered()
597 unfi = pushop.repo.unfiltered()
598 remotephases = listkeys(pushop.remote, 'phases')
598 remotephases = listkeys(pushop.remote, 'phases')
599
599
600 if (pushop.ui.configbool('ui', '_usedassubrepo')
600 if (pushop.ui.configbool('ui', '_usedassubrepo')
601 and remotephases # server supports phases
601 and remotephases # server supports phases
602 and not pushop.outgoing.missing # no changesets to be pushed
602 and not pushop.outgoing.missing # no changesets to be pushed
603 and remotephases.get('publishing', False)):
603 and remotephases.get('publishing', False)):
604 # When:
604 # When:
605 # - this is a subrepo push
605 # - this is a subrepo push
606 # - and remote support phase
606 # - and remote support phase
607 # - and no changeset are to be pushed
607 # - and no changeset are to be pushed
608 # - and remote is publishing
608 # - and remote is publishing
609 # We may be in issue 3781 case!
609 # We may be in issue 3781 case!
610 # We drop the possible phase synchronisation done by
610 # We drop the possible phase synchronisation done by
611 # courtesy to publish changesets possibly locally draft
611 # courtesy to publish changesets possibly locally draft
612 # on the remote.
612 # on the remote.
613 pushop.outdatedphases = []
613 pushop.outdatedphases = []
614 pushop.fallbackoutdatedphases = []
614 pushop.fallbackoutdatedphases = []
615 return
615 return
616
616
617 pushop.remotephases = phases.remotephasessummary(pushop.repo,
617 pushop.remotephases = phases.remotephasessummary(pushop.repo,
618 pushop.fallbackheads,
618 pushop.fallbackheads,
619 remotephases)
619 remotephases)
620 droots = pushop.remotephases.draftroots
620 droots = pushop.remotephases.draftroots
621
621
622 extracond = ''
622 extracond = ''
623 if not pushop.remotephases.publishing:
623 if not pushop.remotephases.publishing:
624 extracond = ' and public()'
624 extracond = ' and public()'
625 revset = 'heads((%%ln::%%ln) %s)' % extracond
625 revset = 'heads((%%ln::%%ln) %s)' % extracond
626 # Get the list of all revs draft on remote by public here.
626 # Get the list of all revs draft on remote by public here.
627 # XXX Beware that revset break if droots is not strictly
627 # XXX Beware that revset break if droots is not strictly
628 # XXX root we may want to ensure it is but it is costly
628 # XXX root we may want to ensure it is but it is costly
629 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
629 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
630 if not outgoing.missing:
630 if not outgoing.missing:
631 future = fallback
631 future = fallback
632 else:
632 else:
633 # adds changeset we are going to push as draft
633 # adds changeset we are going to push as draft
634 #
634 #
635 # should not be necessary for publishing server, but because of an
635 # should not be necessary for publishing server, but because of an
636 # issue fixed in xxxxx we have to do it anyway.
636 # issue fixed in xxxxx we have to do it anyway.
637 fdroots = list(unfi.set('roots(%ln + %ln::)',
637 fdroots = list(unfi.set('roots(%ln + %ln::)',
638 outgoing.missing, droots))
638 outgoing.missing, droots))
639 fdroots = [f.node() for f in fdroots]
639 fdroots = [f.node() for f in fdroots]
640 future = list(unfi.set(revset, fdroots, pushop.futureheads))
640 future = list(unfi.set(revset, fdroots, pushop.futureheads))
641 pushop.outdatedphases = future
641 pushop.outdatedphases = future
642 pushop.fallbackoutdatedphases = fallback
642 pushop.fallbackoutdatedphases = fallback
643
643
644 @pushdiscovery('obsmarker')
644 @pushdiscovery('obsmarker')
645 def _pushdiscoveryobsmarkers(pushop):
645 def _pushdiscoveryobsmarkers(pushop):
646 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
646 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
647 return
647 return
648
648
649 if not pushop.repo.obsstore:
649 if not pushop.repo.obsstore:
650 return
650 return
651
651
652 if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):
652 if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):
653 return
653 return
654
654
655 repo = pushop.repo
655 repo = pushop.repo
656 # very naive computation, that can be quite expensive on big repo.
656 # very naive computation, that can be quite expensive on big repo.
657 # However: evolution is currently slow on them anyway.
657 # However: evolution is currently slow on them anyway.
658 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
658 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
659 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
659 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
660
660
661 @pushdiscovery('bookmarks')
661 @pushdiscovery('bookmarks')
662 def _pushdiscoverybookmarks(pushop):
662 def _pushdiscoverybookmarks(pushop):
663 ui = pushop.ui
663 ui = pushop.ui
664 repo = pushop.repo.unfiltered()
664 repo = pushop.repo.unfiltered()
665 remote = pushop.remote
665 remote = pushop.remote
666 ui.debug("checking for updated bookmarks\n")
666 ui.debug("checking for updated bookmarks\n")
667 ancestors = ()
667 ancestors = ()
668 if pushop.revs:
668 if pushop.revs:
669 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
669 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
670 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
670 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
671
671
672 remotebookmark = listkeys(remote, 'bookmarks')
672 remotebookmark = listkeys(remote, 'bookmarks')
673
673
674 explicit = set([repo._bookmarks.expandname(bookmark)
674 explicit = set([repo._bookmarks.expandname(bookmark)
675 for bookmark in pushop.bookmarks])
675 for bookmark in pushop.bookmarks])
676
676
677 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
677 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
678 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
678 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
679
679
680 def safehex(x):
680 def safehex(x):
681 if x is None:
681 if x is None:
682 return x
682 return x
683 return hex(x)
683 return hex(x)
684
684
685 def hexifycompbookmarks(bookmarks):
685 def hexifycompbookmarks(bookmarks):
686 return [(b, safehex(scid), safehex(dcid))
686 return [(b, safehex(scid), safehex(dcid))
687 for (b, scid, dcid) in bookmarks]
687 for (b, scid, dcid) in bookmarks]
688
688
689 comp = [hexifycompbookmarks(marks) for marks in comp]
689 comp = [hexifycompbookmarks(marks) for marks in comp]
690 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
690 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
691
691
692 def _processcompared(pushop, pushed, explicit, remotebms, comp):
692 def _processcompared(pushop, pushed, explicit, remotebms, comp):
693 """take decision on bookmark to pull from the remote bookmark
693 """take decision on bookmark to pull from the remote bookmark
694
694
695 Exist to help extensions who want to alter this behavior.
695 Exist to help extensions who want to alter this behavior.
696 """
696 """
697 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
697 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
698
698
699 repo = pushop.repo
699 repo = pushop.repo
700
700
701 for b, scid, dcid in advsrc:
701 for b, scid, dcid in advsrc:
702 if b in explicit:
702 if b in explicit:
703 explicit.remove(b)
703 explicit.remove(b)
704 if not pushed or repo[scid].rev() in pushed:
704 if not pushed or repo[scid].rev() in pushed:
705 pushop.outbookmarks.append((b, dcid, scid))
705 pushop.outbookmarks.append((b, dcid, scid))
706 # search added bookmark
706 # search added bookmark
707 for b, scid, dcid in addsrc:
707 for b, scid, dcid in addsrc:
708 if b in explicit:
708 if b in explicit:
709 explicit.remove(b)
709 explicit.remove(b)
710 pushop.outbookmarks.append((b, '', scid))
710 pushop.outbookmarks.append((b, '', scid))
711 # search for overwritten bookmark
711 # search for overwritten bookmark
712 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
712 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
713 if b in explicit:
713 if b in explicit:
714 explicit.remove(b)
714 explicit.remove(b)
715 pushop.outbookmarks.append((b, dcid, scid))
715 pushop.outbookmarks.append((b, dcid, scid))
716 # search for bookmark to delete
716 # search for bookmark to delete
717 for b, scid, dcid in adddst:
717 for b, scid, dcid in adddst:
718 if b in explicit:
718 if b in explicit:
719 explicit.remove(b)
719 explicit.remove(b)
720 # treat as "deleted locally"
720 # treat as "deleted locally"
721 pushop.outbookmarks.append((b, dcid, ''))
721 pushop.outbookmarks.append((b, dcid, ''))
722 # identical bookmarks shouldn't get reported
722 # identical bookmarks shouldn't get reported
723 for b, scid, dcid in same:
723 for b, scid, dcid in same:
724 if b in explicit:
724 if b in explicit:
725 explicit.remove(b)
725 explicit.remove(b)
726
726
727 if explicit:
727 if explicit:
728 explicit = sorted(explicit)
728 explicit = sorted(explicit)
729 # we should probably list all of them
729 # we should probably list all of them
730 pushop.ui.warn(_('bookmark %s does not exist on the local '
730 pushop.ui.warn(_('bookmark %s does not exist on the local '
731 'or remote repository!\n') % explicit[0])
731 'or remote repository!\n') % explicit[0])
732 pushop.bkresult = 2
732 pushop.bkresult = 2
733
733
734 pushop.outbookmarks.sort()
734 pushop.outbookmarks.sort()
735
735
736 def _pushcheckoutgoing(pushop):
736 def _pushcheckoutgoing(pushop):
737 outgoing = pushop.outgoing
737 outgoing = pushop.outgoing
738 unfi = pushop.repo.unfiltered()
738 unfi = pushop.repo.unfiltered()
739 if not outgoing.missing:
739 if not outgoing.missing:
740 # nothing to push
740 # nothing to push
741 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
741 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
742 return False
742 return False
743 # something to push
743 # something to push
744 if not pushop.force:
744 if not pushop.force:
745 # if repo.obsstore == False --> no obsolete
745 # if repo.obsstore == False --> no obsolete
746 # then, save the iteration
746 # then, save the iteration
747 if unfi.obsstore:
747 if unfi.obsstore:
748 # this message are here for 80 char limit reason
748 # this message are here for 80 char limit reason
749 mso = _("push includes obsolete changeset: %s!")
749 mso = _("push includes obsolete changeset: %s!")
750 mspd = _("push includes phase-divergent changeset: %s!")
750 mspd = _("push includes phase-divergent changeset: %s!")
751 mscd = _("push includes content-divergent changeset: %s!")
751 mscd = _("push includes content-divergent changeset: %s!")
752 mst = {"orphan": _("push includes orphan changeset: %s!"),
752 mst = {"orphan": _("push includes orphan changeset: %s!"),
753 "phase-divergent": mspd,
753 "phase-divergent": mspd,
754 "content-divergent": mscd}
754 "content-divergent": mscd}
755 # If we are to push if there is at least one
755 # If we are to push if there is at least one
756 # obsolete or unstable changeset in missing, at
756 # obsolete or unstable changeset in missing, at
757 # least one of the missinghead will be obsolete or
757 # least one of the missinghead will be obsolete or
758 # unstable. So checking heads only is ok
758 # unstable. So checking heads only is ok
759 for node in outgoing.missingheads:
759 for node in outgoing.missingheads:
760 ctx = unfi[node]
760 ctx = unfi[node]
761 if ctx.obsolete():
761 if ctx.obsolete():
762 raise error.Abort(mso % ctx)
762 raise error.Abort(mso % ctx)
763 elif ctx.isunstable():
763 elif ctx.isunstable():
764 # TODO print more than one instability in the abort
764 # TODO print more than one instability in the abort
765 # message
765 # message
766 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
766 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
767
767
768 discovery.checkheads(pushop)
768 discovery.checkheads(pushop)
769 return True
769 return True
770
770
771 # List of names of steps to perform for an outgoing bundle2, order matters.
771 # List of names of steps to perform for an outgoing bundle2, order matters.
772 b2partsgenorder = []
772 b2partsgenorder = []
773
773
774 # Mapping between step name and function
774 # Mapping between step name and function
775 #
775 #
776 # This exists to help extensions wrap steps if necessary
776 # This exists to help extensions wrap steps if necessary
777 b2partsgenmapping = {}
777 b2partsgenmapping = {}
778
778
779 def b2partsgenerator(stepname, idx=None):
779 def b2partsgenerator(stepname, idx=None):
780 """decorator for function generating bundle2 part
780 """decorator for function generating bundle2 part
781
781
782 The function is added to the step -> function mapping and appended to the
782 The function is added to the step -> function mapping and appended to the
783 list of steps. Beware that decorated functions will be added in order
783 list of steps. Beware that decorated functions will be added in order
784 (this may matter).
784 (this may matter).
785
785
786 You can only use this decorator for new steps, if you want to wrap a step
786 You can only use this decorator for new steps, if you want to wrap a step
787 from an extension, attack the b2partsgenmapping dictionary directly."""
787 from an extension, attack the b2partsgenmapping dictionary directly."""
788 def dec(func):
788 def dec(func):
789 assert stepname not in b2partsgenmapping
789 assert stepname not in b2partsgenmapping
790 b2partsgenmapping[stepname] = func
790 b2partsgenmapping[stepname] = func
791 if idx is None:
791 if idx is None:
792 b2partsgenorder.append(stepname)
792 b2partsgenorder.append(stepname)
793 else:
793 else:
794 b2partsgenorder.insert(idx, stepname)
794 b2partsgenorder.insert(idx, stepname)
795 return func
795 return func
796 return dec
796 return dec
797
797
798 def _pushb2ctxcheckheads(pushop, bundler):
798 def _pushb2ctxcheckheads(pushop, bundler):
799 """Generate race condition checking parts
799 """Generate race condition checking parts
800
800
801 Exists as an independent function to aid extensions
801 Exists as an independent function to aid extensions
802 """
802 """
803 # * 'force' do not check for push race,
803 # * 'force' do not check for push race,
804 # * if we don't push anything, there are nothing to check.
804 # * if we don't push anything, there are nothing to check.
805 if not pushop.force and pushop.outgoing.missingheads:
805 if not pushop.force and pushop.outgoing.missingheads:
806 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
806 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
807 emptyremote = pushop.pushbranchmap is None
807 emptyremote = pushop.pushbranchmap is None
808 if not allowunrelated or emptyremote:
808 if not allowunrelated or emptyremote:
809 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
809 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
810 else:
810 else:
811 affected = set()
811 affected = set()
812 for branch, heads in pushop.pushbranchmap.iteritems():
812 for branch, heads in pushop.pushbranchmap.iteritems():
813 remoteheads, newheads, unsyncedheads, discardedheads = heads
813 remoteheads, newheads, unsyncedheads, discardedheads = heads
814 if remoteheads is not None:
814 if remoteheads is not None:
815 remote = set(remoteheads)
815 remote = set(remoteheads)
816 affected |= set(discardedheads) & remote
816 affected |= set(discardedheads) & remote
817 affected |= remote - set(newheads)
817 affected |= remote - set(newheads)
818 if affected:
818 if affected:
819 data = iter(sorted(affected))
819 data = iter(sorted(affected))
820 bundler.newpart('check:updated-heads', data=data)
820 bundler.newpart('check:updated-heads', data=data)
821
821
822 def _pushing(pushop):
822 def _pushing(pushop):
823 """return True if we are pushing anything"""
823 """return True if we are pushing anything"""
824 return bool(pushop.outgoing.missing
824 return bool(pushop.outgoing.missing
825 or pushop.outdatedphases
825 or pushop.outdatedphases
826 or pushop.outobsmarkers
826 or pushop.outobsmarkers
827 or pushop.outbookmarks)
827 or pushop.outbookmarks)
828
828
829 @b2partsgenerator('check-bookmarks')
829 @b2partsgenerator('check-bookmarks')
830 def _pushb2checkbookmarks(pushop, bundler):
830 def _pushb2checkbookmarks(pushop, bundler):
831 """insert bookmark move checking"""
831 """insert bookmark move checking"""
832 if not _pushing(pushop) or pushop.force:
832 if not _pushing(pushop) or pushop.force:
833 return
833 return
834 b2caps = bundle2.bundle2caps(pushop.remote)
834 b2caps = bundle2.bundle2caps(pushop.remote)
835 hasbookmarkcheck = 'bookmarks' in b2caps
835 hasbookmarkcheck = 'bookmarks' in b2caps
836 if not (pushop.outbookmarks and hasbookmarkcheck):
836 if not (pushop.outbookmarks and hasbookmarkcheck):
837 return
837 return
838 data = []
838 data = []
839 for book, old, new in pushop.outbookmarks:
839 for book, old, new in pushop.outbookmarks:
840 old = bin(old)
840 old = bin(old)
841 data.append((book, old))
841 data.append((book, old))
842 checkdata = bookmod.binaryencode(data)
842 checkdata = bookmod.binaryencode(data)
843 bundler.newpart('check:bookmarks', data=checkdata)
843 bundler.newpart('check:bookmarks', data=checkdata)
844
844
845 @b2partsgenerator('check-phases')
845 @b2partsgenerator('check-phases')
846 def _pushb2checkphases(pushop, bundler):
846 def _pushb2checkphases(pushop, bundler):
847 """insert phase move checking"""
847 """insert phase move checking"""
848 if not _pushing(pushop) or pushop.force:
848 if not _pushing(pushop) or pushop.force:
849 return
849 return
850 b2caps = bundle2.bundle2caps(pushop.remote)
850 b2caps = bundle2.bundle2caps(pushop.remote)
851 hasphaseheads = 'heads' in b2caps.get('phases', ())
851 hasphaseheads = 'heads' in b2caps.get('phases', ())
852 if pushop.remotephases is not None and hasphaseheads:
852 if pushop.remotephases is not None and hasphaseheads:
853 # check that the remote phase has not changed
853 # check that the remote phase has not changed
854 checks = [[] for p in phases.allphases]
854 checks = [[] for p in phases.allphases]
855 checks[phases.public].extend(pushop.remotephases.publicheads)
855 checks[phases.public].extend(pushop.remotephases.publicheads)
856 checks[phases.draft].extend(pushop.remotephases.draftroots)
856 checks[phases.draft].extend(pushop.remotephases.draftroots)
857 if any(checks):
857 if any(checks):
858 for nodes in checks:
858 for nodes in checks:
859 nodes.sort()
859 nodes.sort()
860 checkdata = phases.binaryencode(checks)
860 checkdata = phases.binaryencode(checks)
861 bundler.newpart('check:phases', data=checkdata)
861 bundler.newpart('check:phases', data=checkdata)
862
862
863 @b2partsgenerator('changeset')
863 @b2partsgenerator('changeset')
864 def _pushb2ctx(pushop, bundler):
864 def _pushb2ctx(pushop, bundler):
865 """handle changegroup push through bundle2
865 """handle changegroup push through bundle2
866
866
867 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
867 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
868 """
868 """
869 if 'changesets' in pushop.stepsdone:
869 if 'changesets' in pushop.stepsdone:
870 return
870 return
871 pushop.stepsdone.add('changesets')
871 pushop.stepsdone.add('changesets')
872 # Send known heads to the server for race detection.
872 # Send known heads to the server for race detection.
873 if not _pushcheckoutgoing(pushop):
873 if not _pushcheckoutgoing(pushop):
874 return
874 return
875 pushop.repo.prepushoutgoinghooks(pushop)
875 pushop.repo.prepushoutgoinghooks(pushop)
876
876
877 _pushb2ctxcheckheads(pushop, bundler)
877 _pushb2ctxcheckheads(pushop, bundler)
878
878
879 b2caps = bundle2.bundle2caps(pushop.remote)
879 b2caps = bundle2.bundle2caps(pushop.remote)
880 version = '01'
880 version = '01'
881 cgversions = b2caps.get('changegroup')
881 cgversions = b2caps.get('changegroup')
882 if cgversions: # 3.1 and 3.2 ship with an empty value
882 if cgversions: # 3.1 and 3.2 ship with an empty value
883 cgversions = [v for v in cgversions
883 cgversions = [v for v in cgversions
884 if v in changegroup.supportedoutgoingversions(
884 if v in changegroup.supportedoutgoingversions(
885 pushop.repo)]
885 pushop.repo)]
886 if not cgversions:
886 if not cgversions:
887 raise ValueError(_('no common changegroup version'))
887 raise ValueError(_('no common changegroup version'))
888 version = max(cgversions)
888 version = max(cgversions)
889 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
889 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
890 'push')
890 'push')
891 cgpart = bundler.newpart('changegroup', data=cgstream)
891 cgpart = bundler.newpart('changegroup', data=cgstream)
892 if cgversions:
892 if cgversions:
893 cgpart.addparam('version', version)
893 cgpart.addparam('version', version)
894 if 'treemanifest' in pushop.repo.requirements:
894 if 'treemanifest' in pushop.repo.requirements:
895 cgpart.addparam('treemanifest', '1')
895 cgpart.addparam('treemanifest', '1')
896 def handlereply(op):
896 def handlereply(op):
897 """extract addchangegroup returns from server reply"""
897 """extract addchangegroup returns from server reply"""
898 cgreplies = op.records.getreplies(cgpart.id)
898 cgreplies = op.records.getreplies(cgpart.id)
899 assert len(cgreplies['changegroup']) == 1
899 assert len(cgreplies['changegroup']) == 1
900 pushop.cgresult = cgreplies['changegroup'][0]['return']
900 pushop.cgresult = cgreplies['changegroup'][0]['return']
901 return handlereply
901 return handlereply
902
902
903 @b2partsgenerator('phase')
903 @b2partsgenerator('phase')
904 def _pushb2phases(pushop, bundler):
904 def _pushb2phases(pushop, bundler):
905 """handle phase push through bundle2"""
905 """handle phase push through bundle2"""
906 if 'phases' in pushop.stepsdone:
906 if 'phases' in pushop.stepsdone:
907 return
907 return
908 b2caps = bundle2.bundle2caps(pushop.remote)
908 b2caps = bundle2.bundle2caps(pushop.remote)
909 ui = pushop.repo.ui
909 ui = pushop.repo.ui
910
910
911 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
911 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
912 haspushkey = 'pushkey' in b2caps
912 haspushkey = 'pushkey' in b2caps
913 hasphaseheads = 'heads' in b2caps.get('phases', ())
913 hasphaseheads = 'heads' in b2caps.get('phases', ())
914
914
915 if hasphaseheads and not legacyphase:
915 if hasphaseheads and not legacyphase:
916 return _pushb2phaseheads(pushop, bundler)
916 return _pushb2phaseheads(pushop, bundler)
917 elif haspushkey:
917 elif haspushkey:
918 return _pushb2phasespushkey(pushop, bundler)
918 return _pushb2phasespushkey(pushop, bundler)
919
919
920 def _pushb2phaseheads(pushop, bundler):
920 def _pushb2phaseheads(pushop, bundler):
921 """push phase information through a bundle2 - binary part"""
921 """push phase information through a bundle2 - binary part"""
922 pushop.stepsdone.add('phases')
922 pushop.stepsdone.add('phases')
923 if pushop.outdatedphases:
923 if pushop.outdatedphases:
924 updates = [[] for p in phases.allphases]
924 updates = [[] for p in phases.allphases]
925 updates[0].extend(h.node() for h in pushop.outdatedphases)
925 updates[0].extend(h.node() for h in pushop.outdatedphases)
926 phasedata = phases.binaryencode(updates)
926 phasedata = phases.binaryencode(updates)
927 bundler.newpart('phase-heads', data=phasedata)
927 bundler.newpart('phase-heads', data=phasedata)
928
928
929 def _pushb2phasespushkey(pushop, bundler):
929 def _pushb2phasespushkey(pushop, bundler):
930 """push phase information through a bundle2 - pushkey part"""
930 """push phase information through a bundle2 - pushkey part"""
931 pushop.stepsdone.add('phases')
931 pushop.stepsdone.add('phases')
932 part2node = []
932 part2node = []
933
933
934 def handlefailure(pushop, exc):
934 def handlefailure(pushop, exc):
935 targetid = int(exc.partid)
935 targetid = int(exc.partid)
936 for partid, node in part2node:
936 for partid, node in part2node:
937 if partid == targetid:
937 if partid == targetid:
938 raise error.Abort(_('updating %s to public failed') % node)
938 raise error.Abort(_('updating %s to public failed') % node)
939
939
940 enc = pushkey.encode
940 enc = pushkey.encode
941 for newremotehead in pushop.outdatedphases:
941 for newremotehead in pushop.outdatedphases:
942 part = bundler.newpart('pushkey')
942 part = bundler.newpart('pushkey')
943 part.addparam('namespace', enc('phases'))
943 part.addparam('namespace', enc('phases'))
944 part.addparam('key', enc(newremotehead.hex()))
944 part.addparam('key', enc(newremotehead.hex()))
945 part.addparam('old', enc('%d' % phases.draft))
945 part.addparam('old', enc('%d' % phases.draft))
946 part.addparam('new', enc('%d' % phases.public))
946 part.addparam('new', enc('%d' % phases.public))
947 part2node.append((part.id, newremotehead))
947 part2node.append((part.id, newremotehead))
948 pushop.pkfailcb[part.id] = handlefailure
948 pushop.pkfailcb[part.id] = handlefailure
949
949
950 def handlereply(op):
950 def handlereply(op):
951 for partid, node in part2node:
951 for partid, node in part2node:
952 partrep = op.records.getreplies(partid)
952 partrep = op.records.getreplies(partid)
953 results = partrep['pushkey']
953 results = partrep['pushkey']
954 assert len(results) <= 1
954 assert len(results) <= 1
955 msg = None
955 msg = None
956 if not results:
956 if not results:
957 msg = _('server ignored update of %s to public!\n') % node
957 msg = _('server ignored update of %s to public!\n') % node
958 elif not int(results[0]['return']):
958 elif not int(results[0]['return']):
959 msg = _('updating %s to public failed!\n') % node
959 msg = _('updating %s to public failed!\n') % node
960 if msg is not None:
960 if msg is not None:
961 pushop.ui.warn(msg)
961 pushop.ui.warn(msg)
962 return handlereply
962 return handlereply
963
963
964 @b2partsgenerator('obsmarkers')
964 @b2partsgenerator('obsmarkers')
965 def _pushb2obsmarkers(pushop, bundler):
965 def _pushb2obsmarkers(pushop, bundler):
966 if 'obsmarkers' in pushop.stepsdone:
966 if 'obsmarkers' in pushop.stepsdone:
967 return
967 return
968 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
968 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
969 if obsolete.commonversion(remoteversions) is None:
969 if obsolete.commonversion(remoteversions) is None:
970 return
970 return
971 pushop.stepsdone.add('obsmarkers')
971 pushop.stepsdone.add('obsmarkers')
972 if pushop.outobsmarkers:
972 if pushop.outobsmarkers:
973 markers = sorted(pushop.outobsmarkers)
973 markers = sorted(pushop.outobsmarkers)
974 bundle2.buildobsmarkerspart(bundler, markers)
974 bundle2.buildobsmarkerspart(bundler, markers)
975
975
976 @b2partsgenerator('bookmarks')
976 @b2partsgenerator('bookmarks')
977 def _pushb2bookmarks(pushop, bundler):
977 def _pushb2bookmarks(pushop, bundler):
978 """handle bookmark push through bundle2"""
978 """handle bookmark push through bundle2"""
979 if 'bookmarks' in pushop.stepsdone:
979 if 'bookmarks' in pushop.stepsdone:
980 return
980 return
981 b2caps = bundle2.bundle2caps(pushop.remote)
981 b2caps = bundle2.bundle2caps(pushop.remote)
982
982
983 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
983 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
984 legacybooks = 'bookmarks' in legacy
984 legacybooks = 'bookmarks' in legacy
985
985
986 if not legacybooks and 'bookmarks' in b2caps:
986 if not legacybooks and 'bookmarks' in b2caps:
987 return _pushb2bookmarkspart(pushop, bundler)
987 return _pushb2bookmarkspart(pushop, bundler)
988 elif 'pushkey' in b2caps:
988 elif 'pushkey' in b2caps:
989 return _pushb2bookmarkspushkey(pushop, bundler)
989 return _pushb2bookmarkspushkey(pushop, bundler)
990
990
991 def _bmaction(old, new):
991 def _bmaction(old, new):
992 """small utility for bookmark pushing"""
992 """small utility for bookmark pushing"""
993 if not old:
993 if not old:
994 return 'export'
994 return 'export'
995 elif not new:
995 elif not new:
996 return 'delete'
996 return 'delete'
997 return 'update'
997 return 'update'
998
998
999 def _pushb2bookmarkspart(pushop, bundler):
999 def _pushb2bookmarkspart(pushop, bundler):
1000 pushop.stepsdone.add('bookmarks')
1000 pushop.stepsdone.add('bookmarks')
1001 if not pushop.outbookmarks:
1001 if not pushop.outbookmarks:
1002 return
1002 return
1003
1003
1004 allactions = []
1004 allactions = []
1005 data = []
1005 data = []
1006 for book, old, new in pushop.outbookmarks:
1006 for book, old, new in pushop.outbookmarks:
1007 new = bin(new)
1007 new = bin(new)
1008 data.append((book, new))
1008 data.append((book, new))
1009 allactions.append((book, _bmaction(old, new)))
1009 allactions.append((book, _bmaction(old, new)))
1010 checkdata = bookmod.binaryencode(data)
1010 checkdata = bookmod.binaryencode(data)
1011 bundler.newpart('bookmarks', data=checkdata)
1011 bundler.newpart('bookmarks', data=checkdata)
1012
1012
1013 def handlereply(op):
1013 def handlereply(op):
1014 ui = pushop.ui
1014 ui = pushop.ui
1015 # if success
1015 # if success
1016 for book, action in allactions:
1016 for book, action in allactions:
1017 ui.status(bookmsgmap[action][0] % book)
1017 ui.status(bookmsgmap[action][0] % book)
1018
1018
1019 return handlereply
1019 return handlereply
1020
1020
1021 def _pushb2bookmarkspushkey(pushop, bundler):
1021 def _pushb2bookmarkspushkey(pushop, bundler):
1022 pushop.stepsdone.add('bookmarks')
1022 pushop.stepsdone.add('bookmarks')
1023 part2book = []
1023 part2book = []
1024 enc = pushkey.encode
1024 enc = pushkey.encode
1025
1025
1026 def handlefailure(pushop, exc):
1026 def handlefailure(pushop, exc):
1027 targetid = int(exc.partid)
1027 targetid = int(exc.partid)
1028 for partid, book, action in part2book:
1028 for partid, book, action in part2book:
1029 if partid == targetid:
1029 if partid == targetid:
1030 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1030 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1031 # we should not be called for part we did not generated
1031 # we should not be called for part we did not generated
1032 assert False
1032 assert False
1033
1033
1034 for book, old, new in pushop.outbookmarks:
1034 for book, old, new in pushop.outbookmarks:
1035 part = bundler.newpart('pushkey')
1035 part = bundler.newpart('pushkey')
1036 part.addparam('namespace', enc('bookmarks'))
1036 part.addparam('namespace', enc('bookmarks'))
1037 part.addparam('key', enc(book))
1037 part.addparam('key', enc(book))
1038 part.addparam('old', enc(old))
1038 part.addparam('old', enc(old))
1039 part.addparam('new', enc(new))
1039 part.addparam('new', enc(new))
1040 action = 'update'
1040 action = 'update'
1041 if not old:
1041 if not old:
1042 action = 'export'
1042 action = 'export'
1043 elif not new:
1043 elif not new:
1044 action = 'delete'
1044 action = 'delete'
1045 part2book.append((part.id, book, action))
1045 part2book.append((part.id, book, action))
1046 pushop.pkfailcb[part.id] = handlefailure
1046 pushop.pkfailcb[part.id] = handlefailure
1047
1047
1048 def handlereply(op):
1048 def handlereply(op):
1049 ui = pushop.ui
1049 ui = pushop.ui
1050 for partid, book, action in part2book:
1050 for partid, book, action in part2book:
1051 partrep = op.records.getreplies(partid)
1051 partrep = op.records.getreplies(partid)
1052 results = partrep['pushkey']
1052 results = partrep['pushkey']
1053 assert len(results) <= 1
1053 assert len(results) <= 1
1054 if not results:
1054 if not results:
1055 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
1055 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
1056 else:
1056 else:
1057 ret = int(results[0]['return'])
1057 ret = int(results[0]['return'])
1058 if ret:
1058 if ret:
1059 ui.status(bookmsgmap[action][0] % book)
1059 ui.status(bookmsgmap[action][0] % book)
1060 else:
1060 else:
1061 ui.warn(bookmsgmap[action][1] % book)
1061 ui.warn(bookmsgmap[action][1] % book)
1062 if pushop.bkresult is not None:
1062 if pushop.bkresult is not None:
1063 pushop.bkresult = 1
1063 pushop.bkresult = 1
1064 return handlereply
1064 return handlereply
1065
1065
1066 @b2partsgenerator('pushvars', idx=0)
1066 @b2partsgenerator('pushvars', idx=0)
1067 def _getbundlesendvars(pushop, bundler):
1067 def _getbundlesendvars(pushop, bundler):
1068 '''send shellvars via bundle2'''
1068 '''send shellvars via bundle2'''
1069 pushvars = pushop.pushvars
1069 pushvars = pushop.pushvars
1070 if pushvars:
1070 if pushvars:
1071 shellvars = {}
1071 shellvars = {}
1072 for raw in pushvars:
1072 for raw in pushvars:
1073 if '=' not in raw:
1073 if '=' not in raw:
1074 msg = ("unable to parse variable '%s', should follow "
1074 msg = ("unable to parse variable '%s', should follow "
1075 "'KEY=VALUE' or 'KEY=' format")
1075 "'KEY=VALUE' or 'KEY=' format")
1076 raise error.Abort(msg % raw)
1076 raise error.Abort(msg % raw)
1077 k, v = raw.split('=', 1)
1077 k, v = raw.split('=', 1)
1078 shellvars[k] = v
1078 shellvars[k] = v
1079
1079
1080 part = bundler.newpart('pushvars')
1080 part = bundler.newpart('pushvars')
1081
1081
1082 for key, value in shellvars.iteritems():
1082 for key, value in shellvars.iteritems():
1083 part.addparam(key, value, mandatory=False)
1083 part.addparam(key, value, mandatory=False)
1084
1084
1085 def _pushbundle2(pushop):
1085 def _pushbundle2(pushop):
1086 """push data to the remote using bundle2
1086 """push data to the remote using bundle2
1087
1087
1088 The only currently supported type of data is changegroup but this will
1088 The only currently supported type of data is changegroup but this will
1089 evolve in the future."""
1089 evolve in the future."""
1090 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1090 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1091 pushback = (pushop.trmanager
1091 pushback = (pushop.trmanager
1092 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1092 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1093
1093
1094 # create reply capability
1094 # create reply capability
1095 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1095 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1096 allowpushback=pushback,
1096 allowpushback=pushback,
1097 role='client'))
1097 role='client'))
1098 bundler.newpart('replycaps', data=capsblob)
1098 bundler.newpart('replycaps', data=capsblob)
1099 replyhandlers = []
1099 replyhandlers = []
1100 for partgenname in b2partsgenorder:
1100 for partgenname in b2partsgenorder:
1101 partgen = b2partsgenmapping[partgenname]
1101 partgen = b2partsgenmapping[partgenname]
1102 ret = partgen(pushop, bundler)
1102 ret = partgen(pushop, bundler)
1103 if callable(ret):
1103 if callable(ret):
1104 replyhandlers.append(ret)
1104 replyhandlers.append(ret)
1105 # do not push if nothing to push
1105 # do not push if nothing to push
1106 if bundler.nbparts <= 1:
1106 if bundler.nbparts <= 1:
1107 return
1107 return
1108 stream = util.chunkbuffer(bundler.getchunks())
1108 stream = util.chunkbuffer(bundler.getchunks())
1109 try:
1109 try:
1110 try:
1110 try:
1111 with pushop.remote.commandexecutor() as e:
1111 with pushop.remote.commandexecutor() as e:
1112 reply = e.callcommand('unbundle', {
1112 reply = e.callcommand('unbundle', {
1113 'bundle': stream,
1113 'bundle': stream,
1114 'heads': ['force'],
1114 'heads': ['force'],
1115 'url': pushop.remote.url(),
1115 'url': pushop.remote.url(),
1116 }).result()
1116 }).result()
1117 except error.BundleValueError as exc:
1117 except error.BundleValueError as exc:
1118 raise error.Abort(_('missing support for %s') % exc)
1118 raise error.Abort(_('missing support for %s') % exc)
1119 try:
1119 try:
1120 trgetter = None
1120 trgetter = None
1121 if pushback:
1121 if pushback:
1122 trgetter = pushop.trmanager.transaction
1122 trgetter = pushop.trmanager.transaction
1123 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1123 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1124 except error.BundleValueError as exc:
1124 except error.BundleValueError as exc:
1125 raise error.Abort(_('missing support for %s') % exc)
1125 raise error.Abort(_('missing support for %s') % exc)
1126 except bundle2.AbortFromPart as exc:
1126 except bundle2.AbortFromPart as exc:
1127 pushop.ui.status(_('remote: %s\n') % exc)
1127 pushop.ui.status(_('remote: %s\n') % exc)
1128 if exc.hint is not None:
1128 if exc.hint is not None:
1129 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1129 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1130 raise error.Abort(_('push failed on remote'))
1130 raise error.Abort(_('push failed on remote'))
1131 except error.PushkeyFailed as exc:
1131 except error.PushkeyFailed as exc:
1132 partid = int(exc.partid)
1132 partid = int(exc.partid)
1133 if partid not in pushop.pkfailcb:
1133 if partid not in pushop.pkfailcb:
1134 raise
1134 raise
1135 pushop.pkfailcb[partid](pushop, exc)
1135 pushop.pkfailcb[partid](pushop, exc)
1136 for rephand in replyhandlers:
1136 for rephand in replyhandlers:
1137 rephand(op)
1137 rephand(op)
1138
1138
1139 def _pushchangeset(pushop):
1139 def _pushchangeset(pushop):
1140 """Make the actual push of changeset bundle to remote repo"""
1140 """Make the actual push of changeset bundle to remote repo"""
1141 if 'changesets' in pushop.stepsdone:
1141 if 'changesets' in pushop.stepsdone:
1142 return
1142 return
1143 pushop.stepsdone.add('changesets')
1143 pushop.stepsdone.add('changesets')
1144 if not _pushcheckoutgoing(pushop):
1144 if not _pushcheckoutgoing(pushop):
1145 return
1145 return
1146
1146
1147 # Should have verified this in push().
1147 # Should have verified this in push().
1148 assert pushop.remote.capable('unbundle')
1148 assert pushop.remote.capable('unbundle')
1149
1149
1150 pushop.repo.prepushoutgoinghooks(pushop)
1150 pushop.repo.prepushoutgoinghooks(pushop)
1151 outgoing = pushop.outgoing
1151 outgoing = pushop.outgoing
1152 # TODO: get bundlecaps from remote
1152 # TODO: get bundlecaps from remote
1153 bundlecaps = None
1153 bundlecaps = None
1154 # create a changegroup from local
1154 # create a changegroup from local
1155 if pushop.revs is None and not (outgoing.excluded
1155 if pushop.revs is None and not (outgoing.excluded
1156 or pushop.repo.changelog.filteredrevs):
1156 or pushop.repo.changelog.filteredrevs):
1157 # push everything,
1157 # push everything,
1158 # use the fast path, no race possible on push
1158 # use the fast path, no race possible on push
1159 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1159 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1160 fastpath=True, bundlecaps=bundlecaps)
1160 fastpath=True, bundlecaps=bundlecaps)
1161 else:
1161 else:
1162 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1162 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1163 'push', bundlecaps=bundlecaps)
1163 'push', bundlecaps=bundlecaps)
1164
1164
1165 # apply changegroup to remote
1165 # apply changegroup to remote
1166 # local repo finds heads on server, finds out what
1166 # local repo finds heads on server, finds out what
1167 # revs it must push. once revs transferred, if server
1167 # revs it must push. once revs transferred, if server
1168 # finds it has different heads (someone else won
1168 # finds it has different heads (someone else won
1169 # commit/push race), server aborts.
1169 # commit/push race), server aborts.
1170 if pushop.force:
1170 if pushop.force:
1171 remoteheads = ['force']
1171 remoteheads = ['force']
1172 else:
1172 else:
1173 remoteheads = pushop.remoteheads
1173 remoteheads = pushop.remoteheads
1174 # ssh: return remote's addchangegroup()
1174 # ssh: return remote's addchangegroup()
1175 # http: return remote's addchangegroup() or 0 for error
1175 # http: return remote's addchangegroup() or 0 for error
1176 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1176 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1177 pushop.repo.url())
1177 pushop.repo.url())
1178
1178
1179 def _pushsyncphase(pushop):
1179 def _pushsyncphase(pushop):
1180 """synchronise phase information locally and remotely"""
1180 """synchronise phase information locally and remotely"""
1181 cheads = pushop.commonheads
1181 cheads = pushop.commonheads
1182 # even when we don't push, exchanging phase data is useful
1182 # even when we don't push, exchanging phase data is useful
1183 remotephases = listkeys(pushop.remote, 'phases')
1183 remotephases = listkeys(pushop.remote, 'phases')
1184 if (pushop.ui.configbool('ui', '_usedassubrepo')
1184 if (pushop.ui.configbool('ui', '_usedassubrepo')
1185 and remotephases # server supports phases
1185 and remotephases # server supports phases
1186 and pushop.cgresult is None # nothing was pushed
1186 and pushop.cgresult is None # nothing was pushed
1187 and remotephases.get('publishing', False)):
1187 and remotephases.get('publishing', False)):
1188 # When:
1188 # When:
1189 # - this is a subrepo push
1189 # - this is a subrepo push
1190 # - and remote support phase
1190 # - and remote support phase
1191 # - and no changeset was pushed
1191 # - and no changeset was pushed
1192 # - and remote is publishing
1192 # - and remote is publishing
1193 # We may be in issue 3871 case!
1193 # We may be in issue 3871 case!
1194 # We drop the possible phase synchronisation done by
1194 # We drop the possible phase synchronisation done by
1195 # courtesy to publish changesets possibly locally draft
1195 # courtesy to publish changesets possibly locally draft
1196 # on the remote.
1196 # on the remote.
1197 remotephases = {'publishing': 'True'}
1197 remotephases = {'publishing': 'True'}
1198 if not remotephases: # old server or public only reply from non-publishing
1198 if not remotephases: # old server or public only reply from non-publishing
1199 _localphasemove(pushop, cheads)
1199 _localphasemove(pushop, cheads)
1200 # don't push any phase data as there is nothing to push
1200 # don't push any phase data as there is nothing to push
1201 else:
1201 else:
1202 ana = phases.analyzeremotephases(pushop.repo, cheads,
1202 ana = phases.analyzeremotephases(pushop.repo, cheads,
1203 remotephases)
1203 remotephases)
1204 pheads, droots = ana
1204 pheads, droots = ana
1205 ### Apply remote phase on local
1205 ### Apply remote phase on local
1206 if remotephases.get('publishing', False):
1206 if remotephases.get('publishing', False):
1207 _localphasemove(pushop, cheads)
1207 _localphasemove(pushop, cheads)
1208 else: # publish = False
1208 else: # publish = False
1209 _localphasemove(pushop, pheads)
1209 _localphasemove(pushop, pheads)
1210 _localphasemove(pushop, cheads, phases.draft)
1210 _localphasemove(pushop, cheads, phases.draft)
1211 ### Apply local phase on remote
1211 ### Apply local phase on remote
1212
1212
1213 if pushop.cgresult:
1213 if pushop.cgresult:
1214 if 'phases' in pushop.stepsdone:
1214 if 'phases' in pushop.stepsdone:
1215 # phases already pushed though bundle2
1215 # phases already pushed though bundle2
1216 return
1216 return
1217 outdated = pushop.outdatedphases
1217 outdated = pushop.outdatedphases
1218 else:
1218 else:
1219 outdated = pushop.fallbackoutdatedphases
1219 outdated = pushop.fallbackoutdatedphases
1220
1220
1221 pushop.stepsdone.add('phases')
1221 pushop.stepsdone.add('phases')
1222
1222
1223 # filter heads already turned public by the push
1223 # filter heads already turned public by the push
1224 outdated = [c for c in outdated if c.node() not in pheads]
1224 outdated = [c for c in outdated if c.node() not in pheads]
1225 # fallback to independent pushkey command
1225 # fallback to independent pushkey command
1226 for newremotehead in outdated:
1226 for newremotehead in outdated:
1227 with pushop.remote.commandexecutor() as e:
1227 with pushop.remote.commandexecutor() as e:
1228 r = e.callcommand('pushkey', {
1228 r = e.callcommand('pushkey', {
1229 'namespace': 'phases',
1229 'namespace': 'phases',
1230 'key': newremotehead.hex(),
1230 'key': newremotehead.hex(),
1231 'old': '%d' % phases.draft,
1231 'old': '%d' % phases.draft,
1232 'new': '%d' % phases.public
1232 'new': '%d' % phases.public
1233 }).result()
1233 }).result()
1234
1234
1235 if not r:
1235 if not r:
1236 pushop.ui.warn(_('updating %s to public failed!\n')
1236 pushop.ui.warn(_('updating %s to public failed!\n')
1237 % newremotehead)
1237 % newremotehead)
1238
1238
1239 def _localphasemove(pushop, nodes, phase=phases.public):
1239 def _localphasemove(pushop, nodes, phase=phases.public):
1240 """move <nodes> to <phase> in the local source repo"""
1240 """move <nodes> to <phase> in the local source repo"""
1241 if pushop.trmanager:
1241 if pushop.trmanager:
1242 phases.advanceboundary(pushop.repo,
1242 phases.advanceboundary(pushop.repo,
1243 pushop.trmanager.transaction(),
1243 pushop.trmanager.transaction(),
1244 phase,
1244 phase,
1245 nodes)
1245 nodes)
1246 else:
1246 else:
1247 # repo is not locked, do not change any phases!
1247 # repo is not locked, do not change any phases!
1248 # Informs the user that phases should have been moved when
1248 # Informs the user that phases should have been moved when
1249 # applicable.
1249 # applicable.
1250 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1250 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1251 phasestr = phases.phasenames[phase]
1251 phasestr = phases.phasenames[phase]
1252 if actualmoves:
1252 if actualmoves:
1253 pushop.ui.status(_('cannot lock source repo, skipping '
1253 pushop.ui.status(_('cannot lock source repo, skipping '
1254 'local %s phase update\n') % phasestr)
1254 'local %s phase update\n') % phasestr)
1255
1255
1256 def _pushobsolete(pushop):
1256 def _pushobsolete(pushop):
1257 """utility function to push obsolete markers to a remote"""
1257 """utility function to push obsolete markers to a remote"""
1258 if 'obsmarkers' in pushop.stepsdone:
1258 if 'obsmarkers' in pushop.stepsdone:
1259 return
1259 return
1260 repo = pushop.repo
1260 repo = pushop.repo
1261 remote = pushop.remote
1261 remote = pushop.remote
1262 pushop.stepsdone.add('obsmarkers')
1262 pushop.stepsdone.add('obsmarkers')
1263 if pushop.outobsmarkers:
1263 if pushop.outobsmarkers:
1264 pushop.ui.debug('try to push obsolete markers to remote\n')
1264 pushop.ui.debug('try to push obsolete markers to remote\n')
1265 rslts = []
1265 rslts = []
1266 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1266 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1267 for key in sorted(remotedata, reverse=True):
1267 for key in sorted(remotedata, reverse=True):
1268 # reverse sort to ensure we end with dump0
1268 # reverse sort to ensure we end with dump0
1269 data = remotedata[key]
1269 data = remotedata[key]
1270 rslts.append(remote.pushkey('obsolete', key, '', data))
1270 rslts.append(remote.pushkey('obsolete', key, '', data))
1271 if [r for r in rslts if not r]:
1271 if [r for r in rslts if not r]:
1272 msg = _('failed to push some obsolete markers!\n')
1272 msg = _('failed to push some obsolete markers!\n')
1273 repo.ui.warn(msg)
1273 repo.ui.warn(msg)
1274
1274
1275 def _pushbookmark(pushop):
1275 def _pushbookmark(pushop):
1276 """Update bookmark position on remote"""
1276 """Update bookmark position on remote"""
1277 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1277 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1278 return
1278 return
1279 pushop.stepsdone.add('bookmarks')
1279 pushop.stepsdone.add('bookmarks')
1280 ui = pushop.ui
1280 ui = pushop.ui
1281 remote = pushop.remote
1281 remote = pushop.remote
1282
1282
1283 for b, old, new in pushop.outbookmarks:
1283 for b, old, new in pushop.outbookmarks:
1284 action = 'update'
1284 action = 'update'
1285 if not old:
1285 if not old:
1286 action = 'export'
1286 action = 'export'
1287 elif not new:
1287 elif not new:
1288 action = 'delete'
1288 action = 'delete'
1289
1289
1290 with remote.commandexecutor() as e:
1290 with remote.commandexecutor() as e:
1291 r = e.callcommand('pushkey', {
1291 r = e.callcommand('pushkey', {
1292 'namespace': 'bookmarks',
1292 'namespace': 'bookmarks',
1293 'key': b,
1293 'key': b,
1294 'old': old,
1294 'old': old,
1295 'new': new,
1295 'new': new,
1296 }).result()
1296 }).result()
1297
1297
1298 if r:
1298 if r:
1299 ui.status(bookmsgmap[action][0] % b)
1299 ui.status(bookmsgmap[action][0] % b)
1300 else:
1300 else:
1301 ui.warn(bookmsgmap[action][1] % b)
1301 ui.warn(bookmsgmap[action][1] % b)
1302 # discovery can have set the value form invalid entry
1302 # discovery can have set the value form invalid entry
1303 if pushop.bkresult is not None:
1303 if pushop.bkresult is not None:
1304 pushop.bkresult = 1
1304 pushop.bkresult = 1
1305
1305
1306 class pulloperation(object):
1306 class pulloperation(object):
1307 """A object that represent a single pull operation
1307 """A object that represent a single pull operation
1308
1308
1309 It purpose is to carry pull related state and very common operation.
1309 It purpose is to carry pull related state and very common operation.
1310
1310
1311 A new should be created at the beginning of each pull and discarded
1311 A new should be created at the beginning of each pull and discarded
1312 afterward.
1312 afterward.
1313 """
1313 """
1314
1314
1315 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1315 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1316 remotebookmarks=None, streamclonerequested=None):
1316 remotebookmarks=None, streamclonerequested=None,
1317 includepats=None, excludepats=None):
1317 # repo we pull into
1318 # repo we pull into
1318 self.repo = repo
1319 self.repo = repo
1319 # repo we pull from
1320 # repo we pull from
1320 self.remote = remote
1321 self.remote = remote
1321 # revision we try to pull (None is "all")
1322 # revision we try to pull (None is "all")
1322 self.heads = heads
1323 self.heads = heads
1323 # bookmark pulled explicitly
1324 # bookmark pulled explicitly
1324 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1325 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1325 for bookmark in bookmarks]
1326 for bookmark in bookmarks]
1326 # do we force pull?
1327 # do we force pull?
1327 self.force = force
1328 self.force = force
1328 # whether a streaming clone was requested
1329 # whether a streaming clone was requested
1329 self.streamclonerequested = streamclonerequested
1330 self.streamclonerequested = streamclonerequested
1330 # transaction manager
1331 # transaction manager
1331 self.trmanager = None
1332 self.trmanager = None
1332 # set of common changeset between local and remote before pull
1333 # set of common changeset between local and remote before pull
1333 self.common = None
1334 self.common = None
1334 # set of pulled head
1335 # set of pulled head
1335 self.rheads = None
1336 self.rheads = None
1336 # list of missing changeset to fetch remotely
1337 # list of missing changeset to fetch remotely
1337 self.fetch = None
1338 self.fetch = None
1338 # remote bookmarks data
1339 # remote bookmarks data
1339 self.remotebookmarks = remotebookmarks
1340 self.remotebookmarks = remotebookmarks
1340 # result of changegroup pulling (used as return code by pull)
1341 # result of changegroup pulling (used as return code by pull)
1341 self.cgresult = None
1342 self.cgresult = None
1342 # list of step already done
1343 # list of step already done
1343 self.stepsdone = set()
1344 self.stepsdone = set()
1344 # Whether we attempted a clone from pre-generated bundles.
1345 # Whether we attempted a clone from pre-generated bundles.
1345 self.clonebundleattempted = False
1346 self.clonebundleattempted = False
1347 # Set of file patterns to include.
1348 self.includepats = includepats
1349 # Set of file patterns to exclude.
1350 self.excludepats = excludepats
1346
1351
1347 @util.propertycache
1352 @util.propertycache
1348 def pulledsubset(self):
1353 def pulledsubset(self):
1349 """heads of the set of changeset target by the pull"""
1354 """heads of the set of changeset target by the pull"""
1350 # compute target subset
1355 # compute target subset
1351 if self.heads is None:
1356 if self.heads is None:
1352 # We pulled every thing possible
1357 # We pulled every thing possible
1353 # sync on everything common
1358 # sync on everything common
1354 c = set(self.common)
1359 c = set(self.common)
1355 ret = list(self.common)
1360 ret = list(self.common)
1356 for n in self.rheads:
1361 for n in self.rheads:
1357 if n not in c:
1362 if n not in c:
1358 ret.append(n)
1363 ret.append(n)
1359 return ret
1364 return ret
1360 else:
1365 else:
1361 # We pulled a specific subset
1366 # We pulled a specific subset
1362 # sync on this subset
1367 # sync on this subset
1363 return self.heads
1368 return self.heads
1364
1369
1365 @util.propertycache
1370 @util.propertycache
1366 def canusebundle2(self):
1371 def canusebundle2(self):
1367 return not _forcebundle1(self)
1372 return not _forcebundle1(self)
1368
1373
1369 @util.propertycache
1374 @util.propertycache
1370 def remotebundle2caps(self):
1375 def remotebundle2caps(self):
1371 return bundle2.bundle2caps(self.remote)
1376 return bundle2.bundle2caps(self.remote)
1372
1377
1373 def gettransaction(self):
1378 def gettransaction(self):
1374 # deprecated; talk to trmanager directly
1379 # deprecated; talk to trmanager directly
1375 return self.trmanager.transaction()
1380 return self.trmanager.transaction()
1376
1381
1377 class transactionmanager(util.transactional):
1382 class transactionmanager(util.transactional):
1378 """An object to manage the life cycle of a transaction
1383 """An object to manage the life cycle of a transaction
1379
1384
1380 It creates the transaction on demand and calls the appropriate hooks when
1385 It creates the transaction on demand and calls the appropriate hooks when
1381 closing the transaction."""
1386 closing the transaction."""
1382 def __init__(self, repo, source, url):
1387 def __init__(self, repo, source, url):
1383 self.repo = repo
1388 self.repo = repo
1384 self.source = source
1389 self.source = source
1385 self.url = url
1390 self.url = url
1386 self._tr = None
1391 self._tr = None
1387
1392
1388 def transaction(self):
1393 def transaction(self):
1389 """Return an open transaction object, constructing if necessary"""
1394 """Return an open transaction object, constructing if necessary"""
1390 if not self._tr:
1395 if not self._tr:
1391 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1396 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1392 self._tr = self.repo.transaction(trname)
1397 self._tr = self.repo.transaction(trname)
1393 self._tr.hookargs['source'] = self.source
1398 self._tr.hookargs['source'] = self.source
1394 self._tr.hookargs['url'] = self.url
1399 self._tr.hookargs['url'] = self.url
1395 return self._tr
1400 return self._tr
1396
1401
1397 def close(self):
1402 def close(self):
1398 """close transaction if created"""
1403 """close transaction if created"""
1399 if self._tr is not None:
1404 if self._tr is not None:
1400 self._tr.close()
1405 self._tr.close()
1401
1406
1402 def release(self):
1407 def release(self):
1403 """release transaction if created"""
1408 """release transaction if created"""
1404 if self._tr is not None:
1409 if self._tr is not None:
1405 self._tr.release()
1410 self._tr.release()
1406
1411
1407 def listkeys(remote, namespace):
1412 def listkeys(remote, namespace):
1408 with remote.commandexecutor() as e:
1413 with remote.commandexecutor() as e:
1409 return e.callcommand('listkeys', {'namespace': namespace}).result()
1414 return e.callcommand('listkeys', {'namespace': namespace}).result()
1410
1415
1411 def _fullpullbundle2(repo, pullop):
1416 def _fullpullbundle2(repo, pullop):
1412 # The server may send a partial reply, i.e. when inlining
1417 # The server may send a partial reply, i.e. when inlining
1413 # pre-computed bundles. In that case, update the common
1418 # pre-computed bundles. In that case, update the common
1414 # set based on the results and pull another bundle.
1419 # set based on the results and pull another bundle.
1415 #
1420 #
1416 # There are two indicators that the process is finished:
1421 # There are two indicators that the process is finished:
1417 # - no changeset has been added, or
1422 # - no changeset has been added, or
1418 # - all remote heads are known locally.
1423 # - all remote heads are known locally.
1419 # The head check must use the unfiltered view as obsoletion
1424 # The head check must use the unfiltered view as obsoletion
1420 # markers can hide heads.
1425 # markers can hide heads.
1421 unfi = repo.unfiltered()
1426 unfi = repo.unfiltered()
1422 unficl = unfi.changelog
1427 unficl = unfi.changelog
1423 def headsofdiff(h1, h2):
1428 def headsofdiff(h1, h2):
1424 """Returns heads(h1 % h2)"""
1429 """Returns heads(h1 % h2)"""
1425 res = unfi.set('heads(%ln %% %ln)', h1, h2)
1430 res = unfi.set('heads(%ln %% %ln)', h1, h2)
1426 return set(ctx.node() for ctx in res)
1431 return set(ctx.node() for ctx in res)
1427 def headsofunion(h1, h2):
1432 def headsofunion(h1, h2):
1428 """Returns heads((h1 + h2) - null)"""
1433 """Returns heads((h1 + h2) - null)"""
1429 res = unfi.set('heads((%ln + %ln - null))', h1, h2)
1434 res = unfi.set('heads((%ln + %ln - null))', h1, h2)
1430 return set(ctx.node() for ctx in res)
1435 return set(ctx.node() for ctx in res)
1431 while True:
1436 while True:
1432 old_heads = unficl.heads()
1437 old_heads = unficl.heads()
1433 clstart = len(unficl)
1438 clstart = len(unficl)
1434 _pullbundle2(pullop)
1439 _pullbundle2(pullop)
1435 if repository.NARROW_REQUIREMENT in repo.requirements:
1440 if repository.NARROW_REQUIREMENT in repo.requirements:
1436 # XXX narrow clones filter the heads on the server side during
1441 # XXX narrow clones filter the heads on the server side during
1437 # XXX getbundle and result in partial replies as well.
1442 # XXX getbundle and result in partial replies as well.
1438 # XXX Disable pull bundles in this case as band aid to avoid
1443 # XXX Disable pull bundles in this case as band aid to avoid
1439 # XXX extra round trips.
1444 # XXX extra round trips.
1440 break
1445 break
1441 if clstart == len(unficl):
1446 if clstart == len(unficl):
1442 break
1447 break
1443 if all(unficl.hasnode(n) for n in pullop.rheads):
1448 if all(unficl.hasnode(n) for n in pullop.rheads):
1444 break
1449 break
1445 new_heads = headsofdiff(unficl.heads(), old_heads)
1450 new_heads = headsofdiff(unficl.heads(), old_heads)
1446 pullop.common = headsofunion(new_heads, pullop.common)
1451 pullop.common = headsofunion(new_heads, pullop.common)
1447 pullop.rheads = set(pullop.rheads) - pullop.common
1452 pullop.rheads = set(pullop.rheads) - pullop.common
1448
1453
1449 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1454 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1450 streamclonerequested=None):
1455 streamclonerequested=None, includepats=None, excludepats=None):
1451 """Fetch repository data from a remote.
1456 """Fetch repository data from a remote.
1452
1457
1453 This is the main function used to retrieve data from a remote repository.
1458 This is the main function used to retrieve data from a remote repository.
1454
1459
1455 ``repo`` is the local repository to clone into.
1460 ``repo`` is the local repository to clone into.
1456 ``remote`` is a peer instance.
1461 ``remote`` is a peer instance.
1457 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1462 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1458 default) means to pull everything from the remote.
1463 default) means to pull everything from the remote.
1459 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1464 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1460 default, all remote bookmarks are pulled.
1465 default, all remote bookmarks are pulled.
1461 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1466 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1462 initialization.
1467 initialization.
1463 ``streamclonerequested`` is a boolean indicating whether a "streaming
1468 ``streamclonerequested`` is a boolean indicating whether a "streaming
1464 clone" is requested. A "streaming clone" is essentially a raw file copy
1469 clone" is requested. A "streaming clone" is essentially a raw file copy
1465 of revlogs from the server. This only works when the local repository is
1470 of revlogs from the server. This only works when the local repository is
1466 empty. The default value of ``None`` means to respect the server
1471 empty. The default value of ``None`` means to respect the server
1467 configuration for preferring stream clones.
1472 configuration for preferring stream clones.
1473 ``includepats`` and ``excludepats`` define explicit file patterns to
1474 include and exclude in storage, respectively. If not defined, narrow
1475 patterns from the repo instance are used, if available.
1468
1476
1469 Returns the ``pulloperation`` created for this pull.
1477 Returns the ``pulloperation`` created for this pull.
1470 """
1478 """
1471 if opargs is None:
1479 if opargs is None:
1472 opargs = {}
1480 opargs = {}
1481
1482 # We allow the narrow patterns to be passed in explicitly to provide more
1483 # flexibility for API consumers.
1484 if includepats or excludepats:
1485 includepats = includepats or set()
1486 excludepats = excludepats or set()
1487 else:
1488 includepats, excludepats = repo.narrowpats
1489
1490 narrowspec.validatepatterns(includepats)
1491 narrowspec.validatepatterns(excludepats)
1492
1473 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1493 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1474 streamclonerequested=streamclonerequested,
1494 streamclonerequested=streamclonerequested,
1495 includepats=includepats, excludepats=excludepats,
1475 **pycompat.strkwargs(opargs))
1496 **pycompat.strkwargs(opargs))
1476
1497
1477 peerlocal = pullop.remote.local()
1498 peerlocal = pullop.remote.local()
1478 if peerlocal:
1499 if peerlocal:
1479 missing = set(peerlocal.requirements) - pullop.repo.supported
1500 missing = set(peerlocal.requirements) - pullop.repo.supported
1480 if missing:
1501 if missing:
1481 msg = _("required features are not"
1502 msg = _("required features are not"
1482 " supported in the destination:"
1503 " supported in the destination:"
1483 " %s") % (', '.join(sorted(missing)))
1504 " %s") % (', '.join(sorted(missing)))
1484 raise error.Abort(msg)
1505 raise error.Abort(msg)
1485
1506
1486 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1507 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1487 with repo.wlock(), repo.lock(), pullop.trmanager:
1508 with repo.wlock(), repo.lock(), pullop.trmanager:
1488 # This should ideally be in _pullbundle2(). However, it needs to run
1509 # This should ideally be in _pullbundle2(). However, it needs to run
1489 # before discovery to avoid extra work.
1510 # before discovery to avoid extra work.
1490 _maybeapplyclonebundle(pullop)
1511 _maybeapplyclonebundle(pullop)
1491 streamclone.maybeperformlegacystreamclone(pullop)
1512 streamclone.maybeperformlegacystreamclone(pullop)
1492 _pulldiscovery(pullop)
1513 _pulldiscovery(pullop)
1493 if pullop.canusebundle2:
1514 if pullop.canusebundle2:
1494 _fullpullbundle2(repo, pullop)
1515 _fullpullbundle2(repo, pullop)
1495 _pullchangeset(pullop)
1516 _pullchangeset(pullop)
1496 _pullphase(pullop)
1517 _pullphase(pullop)
1497 _pullbookmarks(pullop)
1518 _pullbookmarks(pullop)
1498 _pullobsolete(pullop)
1519 _pullobsolete(pullop)
1499
1520
1500 # storing remotenames
1521 # storing remotenames
1501 if repo.ui.configbool('experimental', 'remotenames'):
1522 if repo.ui.configbool('experimental', 'remotenames'):
1502 logexchange.pullremotenames(repo, remote)
1523 logexchange.pullremotenames(repo, remote)
1503
1524
1504 return pullop
1525 return pullop
1505
1526
1506 # list of steps to perform discovery before pull
1527 # list of steps to perform discovery before pull
1507 pulldiscoveryorder = []
1528 pulldiscoveryorder = []
1508
1529
1509 # Mapping between step name and function
1530 # Mapping between step name and function
1510 #
1531 #
1511 # This exists to help extensions wrap steps if necessary
1532 # This exists to help extensions wrap steps if necessary
1512 pulldiscoverymapping = {}
1533 pulldiscoverymapping = {}
1513
1534
1514 def pulldiscovery(stepname):
1535 def pulldiscovery(stepname):
1515 """decorator for function performing discovery before pull
1536 """decorator for function performing discovery before pull
1516
1537
1517 The function is added to the step -> function mapping and appended to the
1538 The function is added to the step -> function mapping and appended to the
1518 list of steps. Beware that decorated function will be added in order (this
1539 list of steps. Beware that decorated function will be added in order (this
1519 may matter).
1540 may matter).
1520
1541
1521 You can only use this decorator for a new step, if you want to wrap a step
1542 You can only use this decorator for a new step, if you want to wrap a step
1522 from an extension, change the pulldiscovery dictionary directly."""
1543 from an extension, change the pulldiscovery dictionary directly."""
1523 def dec(func):
1544 def dec(func):
1524 assert stepname not in pulldiscoverymapping
1545 assert stepname not in pulldiscoverymapping
1525 pulldiscoverymapping[stepname] = func
1546 pulldiscoverymapping[stepname] = func
1526 pulldiscoveryorder.append(stepname)
1547 pulldiscoveryorder.append(stepname)
1527 return func
1548 return func
1528 return dec
1549 return dec
1529
1550
1530 def _pulldiscovery(pullop):
1551 def _pulldiscovery(pullop):
1531 """Run all discovery steps"""
1552 """Run all discovery steps"""
1532 for stepname in pulldiscoveryorder:
1553 for stepname in pulldiscoveryorder:
1533 step = pulldiscoverymapping[stepname]
1554 step = pulldiscoverymapping[stepname]
1534 step(pullop)
1555 step(pullop)
1535
1556
1536 @pulldiscovery('b1:bookmarks')
1557 @pulldiscovery('b1:bookmarks')
1537 def _pullbookmarkbundle1(pullop):
1558 def _pullbookmarkbundle1(pullop):
1538 """fetch bookmark data in bundle1 case
1559 """fetch bookmark data in bundle1 case
1539
1560
1540 If not using bundle2, we have to fetch bookmarks before changeset
1561 If not using bundle2, we have to fetch bookmarks before changeset
1541 discovery to reduce the chance and impact of race conditions."""
1562 discovery to reduce the chance and impact of race conditions."""
1542 if pullop.remotebookmarks is not None:
1563 if pullop.remotebookmarks is not None:
1543 return
1564 return
1544 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1565 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1545 # all known bundle2 servers now support listkeys, but lets be nice with
1566 # all known bundle2 servers now support listkeys, but lets be nice with
1546 # new implementation.
1567 # new implementation.
1547 return
1568 return
1548 books = listkeys(pullop.remote, 'bookmarks')
1569 books = listkeys(pullop.remote, 'bookmarks')
1549 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1570 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1550
1571
1551
1572
1552 @pulldiscovery('changegroup')
1573 @pulldiscovery('changegroup')
1553 def _pulldiscoverychangegroup(pullop):
1574 def _pulldiscoverychangegroup(pullop):
1554 """discovery phase for the pull
1575 """discovery phase for the pull
1555
1576
1556 Current handle changeset discovery only, will change handle all discovery
1577 Current handle changeset discovery only, will change handle all discovery
1557 at some point."""
1578 at some point."""
1558 tmp = discovery.findcommonincoming(pullop.repo,
1579 tmp = discovery.findcommonincoming(pullop.repo,
1559 pullop.remote,
1580 pullop.remote,
1560 heads=pullop.heads,
1581 heads=pullop.heads,
1561 force=pullop.force)
1582 force=pullop.force)
1562 common, fetch, rheads = tmp
1583 common, fetch, rheads = tmp
1563 nm = pullop.repo.unfiltered().changelog.nodemap
1584 nm = pullop.repo.unfiltered().changelog.nodemap
1564 if fetch and rheads:
1585 if fetch and rheads:
1565 # If a remote heads is filtered locally, put in back in common.
1586 # If a remote heads is filtered locally, put in back in common.
1566 #
1587 #
1567 # This is a hackish solution to catch most of "common but locally
1588 # This is a hackish solution to catch most of "common but locally
1568 # hidden situation". We do not performs discovery on unfiltered
1589 # hidden situation". We do not performs discovery on unfiltered
1569 # repository because it end up doing a pathological amount of round
1590 # repository because it end up doing a pathological amount of round
1570 # trip for w huge amount of changeset we do not care about.
1591 # trip for w huge amount of changeset we do not care about.
1571 #
1592 #
1572 # If a set of such "common but filtered" changeset exist on the server
1593 # If a set of such "common but filtered" changeset exist on the server
1573 # but are not including a remote heads, we'll not be able to detect it,
1594 # but are not including a remote heads, we'll not be able to detect it,
1574 scommon = set(common)
1595 scommon = set(common)
1575 for n in rheads:
1596 for n in rheads:
1576 if n in nm:
1597 if n in nm:
1577 if n not in scommon:
1598 if n not in scommon:
1578 common.append(n)
1599 common.append(n)
1579 if set(rheads).issubset(set(common)):
1600 if set(rheads).issubset(set(common)):
1580 fetch = []
1601 fetch = []
1581 pullop.common = common
1602 pullop.common = common
1582 pullop.fetch = fetch
1603 pullop.fetch = fetch
1583 pullop.rheads = rheads
1604 pullop.rheads = rheads
1584
1605
1585 def _pullbundle2(pullop):
1606 def _pullbundle2(pullop):
1586 """pull data using bundle2
1607 """pull data using bundle2
1587
1608
1588 For now, the only supported data are changegroup."""
1609 For now, the only supported data are changegroup."""
1589 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1610 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1590
1611
1591 # make ui easier to access
1612 # make ui easier to access
1592 ui = pullop.repo.ui
1613 ui = pullop.repo.ui
1593
1614
1594 # At the moment we don't do stream clones over bundle2. If that is
1615 # At the moment we don't do stream clones over bundle2. If that is
1595 # implemented then here's where the check for that will go.
1616 # implemented then here's where the check for that will go.
1596 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1617 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1597
1618
1598 # declare pull perimeters
1619 # declare pull perimeters
1599 kwargs['common'] = pullop.common
1620 kwargs['common'] = pullop.common
1600 kwargs['heads'] = pullop.heads or pullop.rheads
1621 kwargs['heads'] = pullop.heads or pullop.rheads
1601
1622
1602 if streaming:
1623 if streaming:
1603 kwargs['cg'] = False
1624 kwargs['cg'] = False
1604 kwargs['stream'] = True
1625 kwargs['stream'] = True
1605 pullop.stepsdone.add('changegroup')
1626 pullop.stepsdone.add('changegroup')
1606 pullop.stepsdone.add('phases')
1627 pullop.stepsdone.add('phases')
1607
1628
1608 else:
1629 else:
1609 # pulling changegroup
1630 # pulling changegroup
1610 pullop.stepsdone.add('changegroup')
1631 pullop.stepsdone.add('changegroup')
1611
1632
1612 kwargs['cg'] = pullop.fetch
1633 kwargs['cg'] = pullop.fetch
1613
1634
1614 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1635 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1615 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1636 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1616 if (not legacyphase and hasbinaryphase):
1637 if (not legacyphase and hasbinaryphase):
1617 kwargs['phases'] = True
1638 kwargs['phases'] = True
1618 pullop.stepsdone.add('phases')
1639 pullop.stepsdone.add('phases')
1619
1640
1620 if 'listkeys' in pullop.remotebundle2caps:
1641 if 'listkeys' in pullop.remotebundle2caps:
1621 if 'phases' not in pullop.stepsdone:
1642 if 'phases' not in pullop.stepsdone:
1622 kwargs['listkeys'] = ['phases']
1643 kwargs['listkeys'] = ['phases']
1623
1644
1624 bookmarksrequested = False
1645 bookmarksrequested = False
1625 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1646 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1626 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1647 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1627
1648
1628 if pullop.remotebookmarks is not None:
1649 if pullop.remotebookmarks is not None:
1629 pullop.stepsdone.add('request-bookmarks')
1650 pullop.stepsdone.add('request-bookmarks')
1630
1651
1631 if ('request-bookmarks' not in pullop.stepsdone
1652 if ('request-bookmarks' not in pullop.stepsdone
1632 and pullop.remotebookmarks is None
1653 and pullop.remotebookmarks is None
1633 and not legacybookmark and hasbinarybook):
1654 and not legacybookmark and hasbinarybook):
1634 kwargs['bookmarks'] = True
1655 kwargs['bookmarks'] = True
1635 bookmarksrequested = True
1656 bookmarksrequested = True
1636
1657
1637 if 'listkeys' in pullop.remotebundle2caps:
1658 if 'listkeys' in pullop.remotebundle2caps:
1638 if 'request-bookmarks' not in pullop.stepsdone:
1659 if 'request-bookmarks' not in pullop.stepsdone:
1639 # make sure to always includes bookmark data when migrating
1660 # make sure to always includes bookmark data when migrating
1640 # `hg incoming --bundle` to using this function.
1661 # `hg incoming --bundle` to using this function.
1641 pullop.stepsdone.add('request-bookmarks')
1662 pullop.stepsdone.add('request-bookmarks')
1642 kwargs.setdefault('listkeys', []).append('bookmarks')
1663 kwargs.setdefault('listkeys', []).append('bookmarks')
1643
1664
1644 # If this is a full pull / clone and the server supports the clone bundles
1665 # If this is a full pull / clone and the server supports the clone bundles
1645 # feature, tell the server whether we attempted a clone bundle. The
1666 # feature, tell the server whether we attempted a clone bundle. The
1646 # presence of this flag indicates the client supports clone bundles. This
1667 # presence of this flag indicates the client supports clone bundles. This
1647 # will enable the server to treat clients that support clone bundles
1668 # will enable the server to treat clients that support clone bundles
1648 # differently from those that don't.
1669 # differently from those that don't.
1649 if (pullop.remote.capable('clonebundles')
1670 if (pullop.remote.capable('clonebundles')
1650 and pullop.heads is None and list(pullop.common) == [nullid]):
1671 and pullop.heads is None and list(pullop.common) == [nullid]):
1651 kwargs['cbattempted'] = pullop.clonebundleattempted
1672 kwargs['cbattempted'] = pullop.clonebundleattempted
1652
1673
1653 if streaming:
1674 if streaming:
1654 pullop.repo.ui.status(_('streaming all changes\n'))
1675 pullop.repo.ui.status(_('streaming all changes\n'))
1655 elif not pullop.fetch:
1676 elif not pullop.fetch:
1656 pullop.repo.ui.status(_("no changes found\n"))
1677 pullop.repo.ui.status(_("no changes found\n"))
1657 pullop.cgresult = 0
1678 pullop.cgresult = 0
1658 else:
1679 else:
1659 if pullop.heads is None and list(pullop.common) == [nullid]:
1680 if pullop.heads is None and list(pullop.common) == [nullid]:
1660 pullop.repo.ui.status(_("requesting all changes\n"))
1681 pullop.repo.ui.status(_("requesting all changes\n"))
1661 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1682 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1662 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1683 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1663 if obsolete.commonversion(remoteversions) is not None:
1684 if obsolete.commonversion(remoteversions) is not None:
1664 kwargs['obsmarkers'] = True
1685 kwargs['obsmarkers'] = True
1665 pullop.stepsdone.add('obsmarkers')
1686 pullop.stepsdone.add('obsmarkers')
1666 _pullbundle2extraprepare(pullop, kwargs)
1687 _pullbundle2extraprepare(pullop, kwargs)
1667
1688
1668 with pullop.remote.commandexecutor() as e:
1689 with pullop.remote.commandexecutor() as e:
1669 args = dict(kwargs)
1690 args = dict(kwargs)
1670 args['source'] = 'pull'
1691 args['source'] = 'pull'
1671 bundle = e.callcommand('getbundle', args).result()
1692 bundle = e.callcommand('getbundle', args).result()
1672
1693
1673 try:
1694 try:
1674 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
1695 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
1675 source='pull')
1696 source='pull')
1676 op.modes['bookmarks'] = 'records'
1697 op.modes['bookmarks'] = 'records'
1677 bundle2.processbundle(pullop.repo, bundle, op=op)
1698 bundle2.processbundle(pullop.repo, bundle, op=op)
1678 except bundle2.AbortFromPart as exc:
1699 except bundle2.AbortFromPart as exc:
1679 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1700 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1680 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1701 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1681 except error.BundleValueError as exc:
1702 except error.BundleValueError as exc:
1682 raise error.Abort(_('missing support for %s') % exc)
1703 raise error.Abort(_('missing support for %s') % exc)
1683
1704
1684 if pullop.fetch:
1705 if pullop.fetch:
1685 pullop.cgresult = bundle2.combinechangegroupresults(op)
1706 pullop.cgresult = bundle2.combinechangegroupresults(op)
1686
1707
1687 # processing phases change
1708 # processing phases change
1688 for namespace, value in op.records['listkeys']:
1709 for namespace, value in op.records['listkeys']:
1689 if namespace == 'phases':
1710 if namespace == 'phases':
1690 _pullapplyphases(pullop, value)
1711 _pullapplyphases(pullop, value)
1691
1712
1692 # processing bookmark update
1713 # processing bookmark update
1693 if bookmarksrequested:
1714 if bookmarksrequested:
1694 books = {}
1715 books = {}
1695 for record in op.records['bookmarks']:
1716 for record in op.records['bookmarks']:
1696 books[record['bookmark']] = record["node"]
1717 books[record['bookmark']] = record["node"]
1697 pullop.remotebookmarks = books
1718 pullop.remotebookmarks = books
1698 else:
1719 else:
1699 for namespace, value in op.records['listkeys']:
1720 for namespace, value in op.records['listkeys']:
1700 if namespace == 'bookmarks':
1721 if namespace == 'bookmarks':
1701 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1722 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1702
1723
1703 # bookmark data were either already there or pulled in the bundle
1724 # bookmark data were either already there or pulled in the bundle
1704 if pullop.remotebookmarks is not None:
1725 if pullop.remotebookmarks is not None:
1705 _pullbookmarks(pullop)
1726 _pullbookmarks(pullop)
1706
1727
1707 def _pullbundle2extraprepare(pullop, kwargs):
1728 def _pullbundle2extraprepare(pullop, kwargs):
1708 """hook function so that extensions can extend the getbundle call"""
1729 """hook function so that extensions can extend the getbundle call"""
1709
1730
1710 def _pullchangeset(pullop):
1731 def _pullchangeset(pullop):
1711 """pull changeset from unbundle into the local repo"""
1732 """pull changeset from unbundle into the local repo"""
1712 # We delay the open of the transaction as late as possible so we
1733 # We delay the open of the transaction as late as possible so we
1713 # don't open transaction for nothing or you break future useful
1734 # don't open transaction for nothing or you break future useful
1714 # rollback call
1735 # rollback call
1715 if 'changegroup' in pullop.stepsdone:
1736 if 'changegroup' in pullop.stepsdone:
1716 return
1737 return
1717 pullop.stepsdone.add('changegroup')
1738 pullop.stepsdone.add('changegroup')
1718 if not pullop.fetch:
1739 if not pullop.fetch:
1719 pullop.repo.ui.status(_("no changes found\n"))
1740 pullop.repo.ui.status(_("no changes found\n"))
1720 pullop.cgresult = 0
1741 pullop.cgresult = 0
1721 return
1742 return
1722 tr = pullop.gettransaction()
1743 tr = pullop.gettransaction()
1723 if pullop.heads is None and list(pullop.common) == [nullid]:
1744 if pullop.heads is None and list(pullop.common) == [nullid]:
1724 pullop.repo.ui.status(_("requesting all changes\n"))
1745 pullop.repo.ui.status(_("requesting all changes\n"))
1725 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1746 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1726 # issue1320, avoid a race if remote changed after discovery
1747 # issue1320, avoid a race if remote changed after discovery
1727 pullop.heads = pullop.rheads
1748 pullop.heads = pullop.rheads
1728
1749
1729 if pullop.remote.capable('getbundle'):
1750 if pullop.remote.capable('getbundle'):
1730 # TODO: get bundlecaps from remote
1751 # TODO: get bundlecaps from remote
1731 cg = pullop.remote.getbundle('pull', common=pullop.common,
1752 cg = pullop.remote.getbundle('pull', common=pullop.common,
1732 heads=pullop.heads or pullop.rheads)
1753 heads=pullop.heads or pullop.rheads)
1733 elif pullop.heads is None:
1754 elif pullop.heads is None:
1734 with pullop.remote.commandexecutor() as e:
1755 with pullop.remote.commandexecutor() as e:
1735 cg = e.callcommand('changegroup', {
1756 cg = e.callcommand('changegroup', {
1736 'nodes': pullop.fetch,
1757 'nodes': pullop.fetch,
1737 'source': 'pull',
1758 'source': 'pull',
1738 }).result()
1759 }).result()
1739
1760
1740 elif not pullop.remote.capable('changegroupsubset'):
1761 elif not pullop.remote.capable('changegroupsubset'):
1741 raise error.Abort(_("partial pull cannot be done because "
1762 raise error.Abort(_("partial pull cannot be done because "
1742 "other repository doesn't support "
1763 "other repository doesn't support "
1743 "changegroupsubset."))
1764 "changegroupsubset."))
1744 else:
1765 else:
1745 with pullop.remote.commandexecutor() as e:
1766 with pullop.remote.commandexecutor() as e:
1746 cg = e.callcommand('changegroupsubset', {
1767 cg = e.callcommand('changegroupsubset', {
1747 'bases': pullop.fetch,
1768 'bases': pullop.fetch,
1748 'heads': pullop.heads,
1769 'heads': pullop.heads,
1749 'source': 'pull',
1770 'source': 'pull',
1750 }).result()
1771 }).result()
1751
1772
1752 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1773 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1753 pullop.remote.url())
1774 pullop.remote.url())
1754 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1775 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1755
1776
1756 def _pullphase(pullop):
1777 def _pullphase(pullop):
1757 # Get remote phases data from remote
1778 # Get remote phases data from remote
1758 if 'phases' in pullop.stepsdone:
1779 if 'phases' in pullop.stepsdone:
1759 return
1780 return
1760 remotephases = listkeys(pullop.remote, 'phases')
1781 remotephases = listkeys(pullop.remote, 'phases')
1761 _pullapplyphases(pullop, remotephases)
1782 _pullapplyphases(pullop, remotephases)
1762
1783
1763 def _pullapplyphases(pullop, remotephases):
1784 def _pullapplyphases(pullop, remotephases):
1764 """apply phase movement from observed remote state"""
1785 """apply phase movement from observed remote state"""
1765 if 'phases' in pullop.stepsdone:
1786 if 'phases' in pullop.stepsdone:
1766 return
1787 return
1767 pullop.stepsdone.add('phases')
1788 pullop.stepsdone.add('phases')
1768 publishing = bool(remotephases.get('publishing', False))
1789 publishing = bool(remotephases.get('publishing', False))
1769 if remotephases and not publishing:
1790 if remotephases and not publishing:
1770 # remote is new and non-publishing
1791 # remote is new and non-publishing
1771 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1792 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1772 pullop.pulledsubset,
1793 pullop.pulledsubset,
1773 remotephases)
1794 remotephases)
1774 dheads = pullop.pulledsubset
1795 dheads = pullop.pulledsubset
1775 else:
1796 else:
1776 # Remote is old or publishing all common changesets
1797 # Remote is old or publishing all common changesets
1777 # should be seen as public
1798 # should be seen as public
1778 pheads = pullop.pulledsubset
1799 pheads = pullop.pulledsubset
1779 dheads = []
1800 dheads = []
1780 unfi = pullop.repo.unfiltered()
1801 unfi = pullop.repo.unfiltered()
1781 phase = unfi._phasecache.phase
1802 phase = unfi._phasecache.phase
1782 rev = unfi.changelog.nodemap.get
1803 rev = unfi.changelog.nodemap.get
1783 public = phases.public
1804 public = phases.public
1784 draft = phases.draft
1805 draft = phases.draft
1785
1806
1786 # exclude changesets already public locally and update the others
1807 # exclude changesets already public locally and update the others
1787 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1808 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1788 if pheads:
1809 if pheads:
1789 tr = pullop.gettransaction()
1810 tr = pullop.gettransaction()
1790 phases.advanceboundary(pullop.repo, tr, public, pheads)
1811 phases.advanceboundary(pullop.repo, tr, public, pheads)
1791
1812
1792 # exclude changesets already draft locally and update the others
1813 # exclude changesets already draft locally and update the others
1793 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1814 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1794 if dheads:
1815 if dheads:
1795 tr = pullop.gettransaction()
1816 tr = pullop.gettransaction()
1796 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1817 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1797
1818
1798 def _pullbookmarks(pullop):
1819 def _pullbookmarks(pullop):
1799 """process the remote bookmark information to update the local one"""
1820 """process the remote bookmark information to update the local one"""
1800 if 'bookmarks' in pullop.stepsdone:
1821 if 'bookmarks' in pullop.stepsdone:
1801 return
1822 return
1802 pullop.stepsdone.add('bookmarks')
1823 pullop.stepsdone.add('bookmarks')
1803 repo = pullop.repo
1824 repo = pullop.repo
1804 remotebookmarks = pullop.remotebookmarks
1825 remotebookmarks = pullop.remotebookmarks
1805 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1826 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1806 pullop.remote.url(),
1827 pullop.remote.url(),
1807 pullop.gettransaction,
1828 pullop.gettransaction,
1808 explicit=pullop.explicitbookmarks)
1829 explicit=pullop.explicitbookmarks)
1809
1830
1810 def _pullobsolete(pullop):
1831 def _pullobsolete(pullop):
1811 """utility function to pull obsolete markers from a remote
1832 """utility function to pull obsolete markers from a remote
1812
1833
1813 The `gettransaction` is function that return the pull transaction, creating
1834 The `gettransaction` is function that return the pull transaction, creating
1814 one if necessary. We return the transaction to inform the calling code that
1835 one if necessary. We return the transaction to inform the calling code that
1815 a new transaction have been created (when applicable).
1836 a new transaction have been created (when applicable).
1816
1837
1817 Exists mostly to allow overriding for experimentation purpose"""
1838 Exists mostly to allow overriding for experimentation purpose"""
1818 if 'obsmarkers' in pullop.stepsdone:
1839 if 'obsmarkers' in pullop.stepsdone:
1819 return
1840 return
1820 pullop.stepsdone.add('obsmarkers')
1841 pullop.stepsdone.add('obsmarkers')
1821 tr = None
1842 tr = None
1822 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1843 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1823 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1844 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1824 remoteobs = listkeys(pullop.remote, 'obsolete')
1845 remoteobs = listkeys(pullop.remote, 'obsolete')
1825 if 'dump0' in remoteobs:
1846 if 'dump0' in remoteobs:
1826 tr = pullop.gettransaction()
1847 tr = pullop.gettransaction()
1827 markers = []
1848 markers = []
1828 for key in sorted(remoteobs, reverse=True):
1849 for key in sorted(remoteobs, reverse=True):
1829 if key.startswith('dump'):
1850 if key.startswith('dump'):
1830 data = util.b85decode(remoteobs[key])
1851 data = util.b85decode(remoteobs[key])
1831 version, newmarks = obsolete._readmarkers(data)
1852 version, newmarks = obsolete._readmarkers(data)
1832 markers += newmarks
1853 markers += newmarks
1833 if markers:
1854 if markers:
1834 pullop.repo.obsstore.add(tr, markers)
1855 pullop.repo.obsstore.add(tr, markers)
1835 pullop.repo.invalidatevolatilesets()
1856 pullop.repo.invalidatevolatilesets()
1836 return tr
1857 return tr
1837
1858
1838 def applynarrowacl(repo, kwargs):
1859 def applynarrowacl(repo, kwargs):
1839 """Apply narrow fetch access control.
1860 """Apply narrow fetch access control.
1840
1861
1841 This massages the named arguments for getbundle wire protocol commands
1862 This massages the named arguments for getbundle wire protocol commands
1842 so requested data is filtered through access control rules.
1863 so requested data is filtered through access control rules.
1843 """
1864 """
1844 ui = repo.ui
1865 ui = repo.ui
1845 # TODO this assumes existence of HTTP and is a layering violation.
1866 # TODO this assumes existence of HTTP and is a layering violation.
1846 username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
1867 username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
1847 user_includes = ui.configlist(
1868 user_includes = ui.configlist(
1848 _NARROWACL_SECTION, username + '.includes',
1869 _NARROWACL_SECTION, username + '.includes',
1849 ui.configlist(_NARROWACL_SECTION, 'default.includes'))
1870 ui.configlist(_NARROWACL_SECTION, 'default.includes'))
1850 user_excludes = ui.configlist(
1871 user_excludes = ui.configlist(
1851 _NARROWACL_SECTION, username + '.excludes',
1872 _NARROWACL_SECTION, username + '.excludes',
1852 ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
1873 ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
1853 if not user_includes:
1874 if not user_includes:
1854 raise error.Abort(_("{} configuration for user {} is empty")
1875 raise error.Abort(_("{} configuration for user {} is empty")
1855 .format(_NARROWACL_SECTION, username))
1876 .format(_NARROWACL_SECTION, username))
1856
1877
1857 user_includes = [
1878 user_includes = [
1858 'path:.' if p == '*' else 'path:' + p for p in user_includes]
1879 'path:.' if p == '*' else 'path:' + p for p in user_includes]
1859 user_excludes = [
1880 user_excludes = [
1860 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
1881 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
1861
1882
1862 req_includes = set(kwargs.get(r'includepats', []))
1883 req_includes = set(kwargs.get(r'includepats', []))
1863 req_excludes = set(kwargs.get(r'excludepats', []))
1884 req_excludes = set(kwargs.get(r'excludepats', []))
1864
1885
1865 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
1886 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
1866 req_includes, req_excludes, user_includes, user_excludes)
1887 req_includes, req_excludes, user_includes, user_excludes)
1867
1888
1868 if invalid_includes:
1889 if invalid_includes:
1869 raise error.Abort(
1890 raise error.Abort(
1870 _("The following includes are not accessible for {}: {}")
1891 _("The following includes are not accessible for {}: {}")
1871 .format(username, invalid_includes))
1892 .format(username, invalid_includes))
1872
1893
1873 new_args = {}
1894 new_args = {}
1874 new_args.update(kwargs)
1895 new_args.update(kwargs)
1875 new_args[r'narrow'] = True
1896 new_args[r'narrow'] = True
1876 new_args[r'includepats'] = req_includes
1897 new_args[r'includepats'] = req_includes
1877 if req_excludes:
1898 if req_excludes:
1878 new_args[r'excludepats'] = req_excludes
1899 new_args[r'excludepats'] = req_excludes
1879
1900
1880 return new_args
1901 return new_args
1881
1902
1882 def _computeellipsis(repo, common, heads, known, match, depth=None):
1903 def _computeellipsis(repo, common, heads, known, match, depth=None):
1883 """Compute the shape of a narrowed DAG.
1904 """Compute the shape of a narrowed DAG.
1884
1905
1885 Args:
1906 Args:
1886 repo: The repository we're transferring.
1907 repo: The repository we're transferring.
1887 common: The roots of the DAG range we're transferring.
1908 common: The roots of the DAG range we're transferring.
1888 May be just [nullid], which means all ancestors of heads.
1909 May be just [nullid], which means all ancestors of heads.
1889 heads: The heads of the DAG range we're transferring.
1910 heads: The heads of the DAG range we're transferring.
1890 match: The narrowmatcher that allows us to identify relevant changes.
1911 match: The narrowmatcher that allows us to identify relevant changes.
1891 depth: If not None, only consider nodes to be full nodes if they are at
1912 depth: If not None, only consider nodes to be full nodes if they are at
1892 most depth changesets away from one of heads.
1913 most depth changesets away from one of heads.
1893
1914
1894 Returns:
1915 Returns:
1895 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
1916 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
1896
1917
1897 visitnodes: The list of nodes (either full or ellipsis) which
1918 visitnodes: The list of nodes (either full or ellipsis) which
1898 need to be sent to the client.
1919 need to be sent to the client.
1899 relevant_nodes: The set of changelog nodes which change a file inside
1920 relevant_nodes: The set of changelog nodes which change a file inside
1900 the narrowspec. The client needs these as non-ellipsis nodes.
1921 the narrowspec. The client needs these as non-ellipsis nodes.
1901 ellipsisroots: A dict of {rev: parents} that is used in
1922 ellipsisroots: A dict of {rev: parents} that is used in
1902 narrowchangegroup to produce ellipsis nodes with the
1923 narrowchangegroup to produce ellipsis nodes with the
1903 correct parents.
1924 correct parents.
1904 """
1925 """
1905 cl = repo.changelog
1926 cl = repo.changelog
1906 mfl = repo.manifestlog
1927 mfl = repo.manifestlog
1907
1928
1908 clrev = cl.rev
1929 clrev = cl.rev
1909
1930
1910 commonrevs = {clrev(n) for n in common} | {nullrev}
1931 commonrevs = {clrev(n) for n in common} | {nullrev}
1911 headsrevs = {clrev(n) for n in heads}
1932 headsrevs = {clrev(n) for n in heads}
1912
1933
1913 if depth:
1934 if depth:
1914 revdepth = {h: 0 for h in headsrevs}
1935 revdepth = {h: 0 for h in headsrevs}
1915
1936
1916 ellipsisheads = collections.defaultdict(set)
1937 ellipsisheads = collections.defaultdict(set)
1917 ellipsisroots = collections.defaultdict(set)
1938 ellipsisroots = collections.defaultdict(set)
1918
1939
1919 def addroot(head, curchange):
1940 def addroot(head, curchange):
1920 """Add a root to an ellipsis head, splitting heads with 3 roots."""
1941 """Add a root to an ellipsis head, splitting heads with 3 roots."""
1921 ellipsisroots[head].add(curchange)
1942 ellipsisroots[head].add(curchange)
1922 # Recursively split ellipsis heads with 3 roots by finding the
1943 # Recursively split ellipsis heads with 3 roots by finding the
1923 # roots' youngest common descendant which is an elided merge commit.
1944 # roots' youngest common descendant which is an elided merge commit.
1924 # That descendant takes 2 of the 3 roots as its own, and becomes a
1945 # That descendant takes 2 of the 3 roots as its own, and becomes a
1925 # root of the head.
1946 # root of the head.
1926 while len(ellipsisroots[head]) > 2:
1947 while len(ellipsisroots[head]) > 2:
1927 child, roots = splithead(head)
1948 child, roots = splithead(head)
1928 splitroots(head, child, roots)
1949 splitroots(head, child, roots)
1929 head = child # Recurse in case we just added a 3rd root
1950 head = child # Recurse in case we just added a 3rd root
1930
1951
1931 def splitroots(head, child, roots):
1952 def splitroots(head, child, roots):
1932 ellipsisroots[head].difference_update(roots)
1953 ellipsisroots[head].difference_update(roots)
1933 ellipsisroots[head].add(child)
1954 ellipsisroots[head].add(child)
1934 ellipsisroots[child].update(roots)
1955 ellipsisroots[child].update(roots)
1935 ellipsisroots[child].discard(child)
1956 ellipsisroots[child].discard(child)
1936
1957
1937 def splithead(head):
1958 def splithead(head):
1938 r1, r2, r3 = sorted(ellipsisroots[head])
1959 r1, r2, r3 = sorted(ellipsisroots[head])
1939 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
1960 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
1940 mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
1961 mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
1941 nr1, head, nr2, head)
1962 nr1, head, nr2, head)
1942 for j in mid:
1963 for j in mid:
1943 if j == nr2:
1964 if j == nr2:
1944 return nr2, (nr1, nr2)
1965 return nr2, (nr1, nr2)
1945 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
1966 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
1946 return j, (nr1, nr2)
1967 return j, (nr1, nr2)
1947 raise error.Abort(_('Failed to split up ellipsis node! head: %d, '
1968 raise error.Abort(_('Failed to split up ellipsis node! head: %d, '
1948 'roots: %d %d %d') % (head, r1, r2, r3))
1969 'roots: %d %d %d') % (head, r1, r2, r3))
1949
1970
1950 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
1971 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
1951 visit = reversed(missing)
1972 visit = reversed(missing)
1952 relevant_nodes = set()
1973 relevant_nodes = set()
1953 visitnodes = [cl.node(m) for m in missing]
1974 visitnodes = [cl.node(m) for m in missing]
1954 required = set(headsrevs) | known
1975 required = set(headsrevs) | known
1955 for rev in visit:
1976 for rev in visit:
1956 clrev = cl.changelogrevision(rev)
1977 clrev = cl.changelogrevision(rev)
1957 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
1978 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
1958 if depth is not None:
1979 if depth is not None:
1959 curdepth = revdepth[rev]
1980 curdepth = revdepth[rev]
1960 for p in ps:
1981 for p in ps:
1961 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
1982 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
1962 needed = False
1983 needed = False
1963 shallow_enough = depth is None or revdepth[rev] <= depth
1984 shallow_enough = depth is None or revdepth[rev] <= depth
1964 if shallow_enough:
1985 if shallow_enough:
1965 curmf = mfl[clrev.manifest].read()
1986 curmf = mfl[clrev.manifest].read()
1966 if ps:
1987 if ps:
1967 # We choose to not trust the changed files list in
1988 # We choose to not trust the changed files list in
1968 # changesets because it's not always correct. TODO: could
1989 # changesets because it's not always correct. TODO: could
1969 # we trust it for the non-merge case?
1990 # we trust it for the non-merge case?
1970 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
1991 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
1971 needed = bool(curmf.diff(p1mf, match))
1992 needed = bool(curmf.diff(p1mf, match))
1972 if not needed and len(ps) > 1:
1993 if not needed and len(ps) > 1:
1973 # For merge changes, the list of changed files is not
1994 # For merge changes, the list of changed files is not
1974 # helpful, since we need to emit the merge if a file
1995 # helpful, since we need to emit the merge if a file
1975 # in the narrow spec has changed on either side of the
1996 # in the narrow spec has changed on either side of the
1976 # merge. As a result, we do a manifest diff to check.
1997 # merge. As a result, we do a manifest diff to check.
1977 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
1998 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
1978 needed = bool(curmf.diff(p2mf, match))
1999 needed = bool(curmf.diff(p2mf, match))
1979 else:
2000 else:
1980 # For a root node, we need to include the node if any
2001 # For a root node, we need to include the node if any
1981 # files in the node match the narrowspec.
2002 # files in the node match the narrowspec.
1982 needed = any(curmf.walk(match))
2003 needed = any(curmf.walk(match))
1983
2004
1984 if needed:
2005 if needed:
1985 for head in ellipsisheads[rev]:
2006 for head in ellipsisheads[rev]:
1986 addroot(head, rev)
2007 addroot(head, rev)
1987 for p in ps:
2008 for p in ps:
1988 required.add(p)
2009 required.add(p)
1989 relevant_nodes.add(cl.node(rev))
2010 relevant_nodes.add(cl.node(rev))
1990 else:
2011 else:
1991 if not ps:
2012 if not ps:
1992 ps = [nullrev]
2013 ps = [nullrev]
1993 if rev in required:
2014 if rev in required:
1994 for head in ellipsisheads[rev]:
2015 for head in ellipsisheads[rev]:
1995 addroot(head, rev)
2016 addroot(head, rev)
1996 for p in ps:
2017 for p in ps:
1997 ellipsisheads[p].add(rev)
2018 ellipsisheads[p].add(rev)
1998 else:
2019 else:
1999 for p in ps:
2020 for p in ps:
2000 ellipsisheads[p] |= ellipsisheads[rev]
2021 ellipsisheads[p] |= ellipsisheads[rev]
2001
2022
2002 # add common changesets as roots of their reachable ellipsis heads
2023 # add common changesets as roots of their reachable ellipsis heads
2003 for c in commonrevs:
2024 for c in commonrevs:
2004 for head in ellipsisheads[c]:
2025 for head in ellipsisheads[c]:
2005 addroot(head, c)
2026 addroot(head, c)
2006 return visitnodes, relevant_nodes, ellipsisroots
2027 return visitnodes, relevant_nodes, ellipsisroots
2007
2028
2008 def caps20to10(repo, role):
2029 def caps20to10(repo, role):
2009 """return a set with appropriate options to use bundle20 during getbundle"""
2030 """return a set with appropriate options to use bundle20 during getbundle"""
2010 caps = {'HG20'}
2031 caps = {'HG20'}
2011 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2032 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2012 caps.add('bundle2=' + urlreq.quote(capsblob))
2033 caps.add('bundle2=' + urlreq.quote(capsblob))
2013 return caps
2034 return caps
2014
2035
2015 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2036 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2016 getbundle2partsorder = []
2037 getbundle2partsorder = []
2017
2038
2018 # Mapping between step name and function
2039 # Mapping between step name and function
2019 #
2040 #
2020 # This exists to help extensions wrap steps if necessary
2041 # This exists to help extensions wrap steps if necessary
2021 getbundle2partsmapping = {}
2042 getbundle2partsmapping = {}
2022
2043
2023 def getbundle2partsgenerator(stepname, idx=None):
2044 def getbundle2partsgenerator(stepname, idx=None):
2024 """decorator for function generating bundle2 part for getbundle
2045 """decorator for function generating bundle2 part for getbundle
2025
2046
2026 The function is added to the step -> function mapping and appended to the
2047 The function is added to the step -> function mapping and appended to the
2027 list of steps. Beware that decorated functions will be added in order
2048 list of steps. Beware that decorated functions will be added in order
2028 (this may matter).
2049 (this may matter).
2029
2050
2030 You can only use this decorator for new steps, if you want to wrap a step
2051 You can only use this decorator for new steps, if you want to wrap a step
2031 from an extension, attack the getbundle2partsmapping dictionary directly."""
2052 from an extension, attack the getbundle2partsmapping dictionary directly."""
2032 def dec(func):
2053 def dec(func):
2033 assert stepname not in getbundle2partsmapping
2054 assert stepname not in getbundle2partsmapping
2034 getbundle2partsmapping[stepname] = func
2055 getbundle2partsmapping[stepname] = func
2035 if idx is None:
2056 if idx is None:
2036 getbundle2partsorder.append(stepname)
2057 getbundle2partsorder.append(stepname)
2037 else:
2058 else:
2038 getbundle2partsorder.insert(idx, stepname)
2059 getbundle2partsorder.insert(idx, stepname)
2039 return func
2060 return func
2040 return dec
2061 return dec
2041
2062
2042 def bundle2requested(bundlecaps):
2063 def bundle2requested(bundlecaps):
2043 if bundlecaps is not None:
2064 if bundlecaps is not None:
2044 return any(cap.startswith('HG2') for cap in bundlecaps)
2065 return any(cap.startswith('HG2') for cap in bundlecaps)
2045 return False
2066 return False
2046
2067
2047 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
2068 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
2048 **kwargs):
2069 **kwargs):
2049 """Return chunks constituting a bundle's raw data.
2070 """Return chunks constituting a bundle's raw data.
2050
2071
2051 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2072 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2052 passed.
2073 passed.
2053
2074
2054 Returns a 2-tuple of a dict with metadata about the generated bundle
2075 Returns a 2-tuple of a dict with metadata about the generated bundle
2055 and an iterator over raw chunks (of varying sizes).
2076 and an iterator over raw chunks (of varying sizes).
2056 """
2077 """
2057 kwargs = pycompat.byteskwargs(kwargs)
2078 kwargs = pycompat.byteskwargs(kwargs)
2058 info = {}
2079 info = {}
2059 usebundle2 = bundle2requested(bundlecaps)
2080 usebundle2 = bundle2requested(bundlecaps)
2060 # bundle10 case
2081 # bundle10 case
2061 if not usebundle2:
2082 if not usebundle2:
2062 if bundlecaps and not kwargs.get('cg', True):
2083 if bundlecaps and not kwargs.get('cg', True):
2063 raise ValueError(_('request for bundle10 must include changegroup'))
2084 raise ValueError(_('request for bundle10 must include changegroup'))
2064
2085
2065 if kwargs:
2086 if kwargs:
2066 raise ValueError(_('unsupported getbundle arguments: %s')
2087 raise ValueError(_('unsupported getbundle arguments: %s')
2067 % ', '.join(sorted(kwargs.keys())))
2088 % ', '.join(sorted(kwargs.keys())))
2068 outgoing = _computeoutgoing(repo, heads, common)
2089 outgoing = _computeoutgoing(repo, heads, common)
2069 info['bundleversion'] = 1
2090 info['bundleversion'] = 1
2070 return info, changegroup.makestream(repo, outgoing, '01', source,
2091 return info, changegroup.makestream(repo, outgoing, '01', source,
2071 bundlecaps=bundlecaps)
2092 bundlecaps=bundlecaps)
2072
2093
2073 # bundle20 case
2094 # bundle20 case
2074 info['bundleversion'] = 2
2095 info['bundleversion'] = 2
2075 b2caps = {}
2096 b2caps = {}
2076 for bcaps in bundlecaps:
2097 for bcaps in bundlecaps:
2077 if bcaps.startswith('bundle2='):
2098 if bcaps.startswith('bundle2='):
2078 blob = urlreq.unquote(bcaps[len('bundle2='):])
2099 blob = urlreq.unquote(bcaps[len('bundle2='):])
2079 b2caps.update(bundle2.decodecaps(blob))
2100 b2caps.update(bundle2.decodecaps(blob))
2080 bundler = bundle2.bundle20(repo.ui, b2caps)
2101 bundler = bundle2.bundle20(repo.ui, b2caps)
2081
2102
2082 kwargs['heads'] = heads
2103 kwargs['heads'] = heads
2083 kwargs['common'] = common
2104 kwargs['common'] = common
2084
2105
2085 for name in getbundle2partsorder:
2106 for name in getbundle2partsorder:
2086 func = getbundle2partsmapping[name]
2107 func = getbundle2partsmapping[name]
2087 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
2108 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
2088 **pycompat.strkwargs(kwargs))
2109 **pycompat.strkwargs(kwargs))
2089
2110
2090 info['prefercompressed'] = bundler.prefercompressed
2111 info['prefercompressed'] = bundler.prefercompressed
2091
2112
2092 return info, bundler.getchunks()
2113 return info, bundler.getchunks()
2093
2114
2094 @getbundle2partsgenerator('stream2')
2115 @getbundle2partsgenerator('stream2')
2095 def _getbundlestream2(bundler, repo, *args, **kwargs):
2116 def _getbundlestream2(bundler, repo, *args, **kwargs):
2096 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2117 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2097
2118
2098 @getbundle2partsgenerator('changegroup')
2119 @getbundle2partsgenerator('changegroup')
2099 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
2120 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
2100 b2caps=None, heads=None, common=None, **kwargs):
2121 b2caps=None, heads=None, common=None, **kwargs):
2101 """add a changegroup part to the requested bundle"""
2122 """add a changegroup part to the requested bundle"""
2102 if not kwargs.get(r'cg', True):
2123 if not kwargs.get(r'cg', True):
2103 return
2124 return
2104
2125
2105 version = '01'
2126 version = '01'
2106 cgversions = b2caps.get('changegroup')
2127 cgversions = b2caps.get('changegroup')
2107 if cgversions: # 3.1 and 3.2 ship with an empty value
2128 if cgversions: # 3.1 and 3.2 ship with an empty value
2108 cgversions = [v for v in cgversions
2129 cgversions = [v for v in cgversions
2109 if v in changegroup.supportedoutgoingversions(repo)]
2130 if v in changegroup.supportedoutgoingversions(repo)]
2110 if not cgversions:
2131 if not cgversions:
2111 raise ValueError(_('no common changegroup version'))
2132 raise ValueError(_('no common changegroup version'))
2112 version = max(cgversions)
2133 version = max(cgversions)
2113
2134
2114 outgoing = _computeoutgoing(repo, heads, common)
2135 outgoing = _computeoutgoing(repo, heads, common)
2115 if not outgoing.missing:
2136 if not outgoing.missing:
2116 return
2137 return
2117
2138
2118 if kwargs.get(r'narrow', False):
2139 if kwargs.get(r'narrow', False):
2119 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
2140 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
2120 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
2141 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
2121 filematcher = narrowspec.match(repo.root, include=include,
2142 filematcher = narrowspec.match(repo.root, include=include,
2122 exclude=exclude)
2143 exclude=exclude)
2123 else:
2144 else:
2124 filematcher = None
2145 filematcher = None
2125
2146
2126 cgstream = changegroup.makestream(repo, outgoing, version, source,
2147 cgstream = changegroup.makestream(repo, outgoing, version, source,
2127 bundlecaps=bundlecaps,
2148 bundlecaps=bundlecaps,
2128 filematcher=filematcher)
2149 filematcher=filematcher)
2129
2150
2130 part = bundler.newpart('changegroup', data=cgstream)
2151 part = bundler.newpart('changegroup', data=cgstream)
2131 if cgversions:
2152 if cgversions:
2132 part.addparam('version', version)
2153 part.addparam('version', version)
2133
2154
2134 part.addparam('nbchanges', '%d' % len(outgoing.missing),
2155 part.addparam('nbchanges', '%d' % len(outgoing.missing),
2135 mandatory=False)
2156 mandatory=False)
2136
2157
2137 if 'treemanifest' in repo.requirements:
2158 if 'treemanifest' in repo.requirements:
2138 part.addparam('treemanifest', '1')
2159 part.addparam('treemanifest', '1')
2139
2160
2140 if kwargs.get(r'narrow', False) and (include or exclude):
2161 if kwargs.get(r'narrow', False) and (include or exclude):
2141 narrowspecpart = bundler.newpart('narrow:spec')
2162 narrowspecpart = bundler.newpart('narrow:spec')
2142 if include:
2163 if include:
2143 narrowspecpart.addparam(
2164 narrowspecpart.addparam(
2144 'include', '\n'.join(include), mandatory=True)
2165 'include', '\n'.join(include), mandatory=True)
2145 if exclude:
2166 if exclude:
2146 narrowspecpart.addparam(
2167 narrowspecpart.addparam(
2147 'exclude', '\n'.join(exclude), mandatory=True)
2168 'exclude', '\n'.join(exclude), mandatory=True)
2148
2169
2149 @getbundle2partsgenerator('bookmarks')
2170 @getbundle2partsgenerator('bookmarks')
2150 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
2171 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
2151 b2caps=None, **kwargs):
2172 b2caps=None, **kwargs):
2152 """add a bookmark part to the requested bundle"""
2173 """add a bookmark part to the requested bundle"""
2153 if not kwargs.get(r'bookmarks', False):
2174 if not kwargs.get(r'bookmarks', False):
2154 return
2175 return
2155 if 'bookmarks' not in b2caps:
2176 if 'bookmarks' not in b2caps:
2156 raise ValueError(_('no common bookmarks exchange method'))
2177 raise ValueError(_('no common bookmarks exchange method'))
2157 books = bookmod.listbinbookmarks(repo)
2178 books = bookmod.listbinbookmarks(repo)
2158 data = bookmod.binaryencode(books)
2179 data = bookmod.binaryencode(books)
2159 if data:
2180 if data:
2160 bundler.newpart('bookmarks', data=data)
2181 bundler.newpart('bookmarks', data=data)
2161
2182
2162 @getbundle2partsgenerator('listkeys')
2183 @getbundle2partsgenerator('listkeys')
2163 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
2184 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
2164 b2caps=None, **kwargs):
2185 b2caps=None, **kwargs):
2165 """add parts containing listkeys namespaces to the requested bundle"""
2186 """add parts containing listkeys namespaces to the requested bundle"""
2166 listkeys = kwargs.get(r'listkeys', ())
2187 listkeys = kwargs.get(r'listkeys', ())
2167 for namespace in listkeys:
2188 for namespace in listkeys:
2168 part = bundler.newpart('listkeys')
2189 part = bundler.newpart('listkeys')
2169 part.addparam('namespace', namespace)
2190 part.addparam('namespace', namespace)
2170 keys = repo.listkeys(namespace).items()
2191 keys = repo.listkeys(namespace).items()
2171 part.data = pushkey.encodekeys(keys)
2192 part.data = pushkey.encodekeys(keys)
2172
2193
2173 @getbundle2partsgenerator('obsmarkers')
2194 @getbundle2partsgenerator('obsmarkers')
2174 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
2195 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
2175 b2caps=None, heads=None, **kwargs):
2196 b2caps=None, heads=None, **kwargs):
2176 """add an obsolescence markers part to the requested bundle"""
2197 """add an obsolescence markers part to the requested bundle"""
2177 if kwargs.get(r'obsmarkers', False):
2198 if kwargs.get(r'obsmarkers', False):
2178 if heads is None:
2199 if heads is None:
2179 heads = repo.heads()
2200 heads = repo.heads()
2180 subset = [c.node() for c in repo.set('::%ln', heads)]
2201 subset = [c.node() for c in repo.set('::%ln', heads)]
2181 markers = repo.obsstore.relevantmarkers(subset)
2202 markers = repo.obsstore.relevantmarkers(subset)
2182 markers = sorted(markers)
2203 markers = sorted(markers)
2183 bundle2.buildobsmarkerspart(bundler, markers)
2204 bundle2.buildobsmarkerspart(bundler, markers)
2184
2205
2185 @getbundle2partsgenerator('phases')
2206 @getbundle2partsgenerator('phases')
2186 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
2207 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
2187 b2caps=None, heads=None, **kwargs):
2208 b2caps=None, heads=None, **kwargs):
2188 """add phase heads part to the requested bundle"""
2209 """add phase heads part to the requested bundle"""
2189 if kwargs.get(r'phases', False):
2210 if kwargs.get(r'phases', False):
2190 if not 'heads' in b2caps.get('phases'):
2211 if not 'heads' in b2caps.get('phases'):
2191 raise ValueError(_('no common phases exchange method'))
2212 raise ValueError(_('no common phases exchange method'))
2192 if heads is None:
2213 if heads is None:
2193 heads = repo.heads()
2214 heads = repo.heads()
2194
2215
2195 headsbyphase = collections.defaultdict(set)
2216 headsbyphase = collections.defaultdict(set)
2196 if repo.publishing():
2217 if repo.publishing():
2197 headsbyphase[phases.public] = heads
2218 headsbyphase[phases.public] = heads
2198 else:
2219 else:
2199 # find the appropriate heads to move
2220 # find the appropriate heads to move
2200
2221
2201 phase = repo._phasecache.phase
2222 phase = repo._phasecache.phase
2202 node = repo.changelog.node
2223 node = repo.changelog.node
2203 rev = repo.changelog.rev
2224 rev = repo.changelog.rev
2204 for h in heads:
2225 for h in heads:
2205 headsbyphase[phase(repo, rev(h))].add(h)
2226 headsbyphase[phase(repo, rev(h))].add(h)
2206 seenphases = list(headsbyphase.keys())
2227 seenphases = list(headsbyphase.keys())
2207
2228
2208 # We do not handle anything but public and draft phase for now)
2229 # We do not handle anything but public and draft phase for now)
2209 if seenphases:
2230 if seenphases:
2210 assert max(seenphases) <= phases.draft
2231 assert max(seenphases) <= phases.draft
2211
2232
2212 # if client is pulling non-public changesets, we need to find
2233 # if client is pulling non-public changesets, we need to find
2213 # intermediate public heads.
2234 # intermediate public heads.
2214 draftheads = headsbyphase.get(phases.draft, set())
2235 draftheads = headsbyphase.get(phases.draft, set())
2215 if draftheads:
2236 if draftheads:
2216 publicheads = headsbyphase.get(phases.public, set())
2237 publicheads = headsbyphase.get(phases.public, set())
2217
2238
2218 revset = 'heads(only(%ln, %ln) and public())'
2239 revset = 'heads(only(%ln, %ln) and public())'
2219 extraheads = repo.revs(revset, draftheads, publicheads)
2240 extraheads = repo.revs(revset, draftheads, publicheads)
2220 for r in extraheads:
2241 for r in extraheads:
2221 headsbyphase[phases.public].add(node(r))
2242 headsbyphase[phases.public].add(node(r))
2222
2243
2223 # transform data in a format used by the encoding function
2244 # transform data in a format used by the encoding function
2224 phasemapping = []
2245 phasemapping = []
2225 for phase in phases.allphases:
2246 for phase in phases.allphases:
2226 phasemapping.append(sorted(headsbyphase[phase]))
2247 phasemapping.append(sorted(headsbyphase[phase]))
2227
2248
2228 # generate the actual part
2249 # generate the actual part
2229 phasedata = phases.binaryencode(phasemapping)
2250 phasedata = phases.binaryencode(phasemapping)
2230 bundler.newpart('phase-heads', data=phasedata)
2251 bundler.newpart('phase-heads', data=phasedata)
2231
2252
2232 @getbundle2partsgenerator('hgtagsfnodes')
2253 @getbundle2partsgenerator('hgtagsfnodes')
2233 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
2254 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
2234 b2caps=None, heads=None, common=None,
2255 b2caps=None, heads=None, common=None,
2235 **kwargs):
2256 **kwargs):
2236 """Transfer the .hgtags filenodes mapping.
2257 """Transfer the .hgtags filenodes mapping.
2237
2258
2238 Only values for heads in this bundle will be transferred.
2259 Only values for heads in this bundle will be transferred.
2239
2260
2240 The part data consists of pairs of 20 byte changeset node and .hgtags
2261 The part data consists of pairs of 20 byte changeset node and .hgtags
2241 filenodes raw values.
2262 filenodes raw values.
2242 """
2263 """
2243 # Don't send unless:
2264 # Don't send unless:
2244 # - changeset are being exchanged,
2265 # - changeset are being exchanged,
2245 # - the client supports it.
2266 # - the client supports it.
2246 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
2267 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
2247 return
2268 return
2248
2269
2249 outgoing = _computeoutgoing(repo, heads, common)
2270 outgoing = _computeoutgoing(repo, heads, common)
2250 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2271 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2251
2272
2252 @getbundle2partsgenerator('cache:rev-branch-cache')
2273 @getbundle2partsgenerator('cache:rev-branch-cache')
2253 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
2274 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
2254 b2caps=None, heads=None, common=None,
2275 b2caps=None, heads=None, common=None,
2255 **kwargs):
2276 **kwargs):
2256 """Transfer the rev-branch-cache mapping
2277 """Transfer the rev-branch-cache mapping
2257
2278
2258 The payload is a series of data related to each branch
2279 The payload is a series of data related to each branch
2259
2280
2260 1) branch name length
2281 1) branch name length
2261 2) number of open heads
2282 2) number of open heads
2262 3) number of closed heads
2283 3) number of closed heads
2263 4) open heads nodes
2284 4) open heads nodes
2264 5) closed heads nodes
2285 5) closed heads nodes
2265 """
2286 """
2266 # Don't send unless:
2287 # Don't send unless:
2267 # - changeset are being exchanged,
2288 # - changeset are being exchanged,
2268 # - the client supports it.
2289 # - the client supports it.
2269 # - narrow bundle isn't in play (not currently compatible).
2290 # - narrow bundle isn't in play (not currently compatible).
2270 if (not kwargs.get(r'cg', True)
2291 if (not kwargs.get(r'cg', True)
2271 or 'rev-branch-cache' not in b2caps
2292 or 'rev-branch-cache' not in b2caps
2272 or kwargs.get(r'narrow', False)
2293 or kwargs.get(r'narrow', False)
2273 or repo.ui.has_section(_NARROWACL_SECTION)):
2294 or repo.ui.has_section(_NARROWACL_SECTION)):
2274 return
2295 return
2275
2296
2276 outgoing = _computeoutgoing(repo, heads, common)
2297 outgoing = _computeoutgoing(repo, heads, common)
2277 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2298 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2278
2299
2279 def check_heads(repo, their_heads, context):
2300 def check_heads(repo, their_heads, context):
2280 """check if the heads of a repo have been modified
2301 """check if the heads of a repo have been modified
2281
2302
2282 Used by peer for unbundling.
2303 Used by peer for unbundling.
2283 """
2304 """
2284 heads = repo.heads()
2305 heads = repo.heads()
2285 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
2306 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
2286 if not (their_heads == ['force'] or their_heads == heads or
2307 if not (their_heads == ['force'] or their_heads == heads or
2287 their_heads == ['hashed', heads_hash]):
2308 their_heads == ['hashed', heads_hash]):
2288 # someone else committed/pushed/unbundled while we
2309 # someone else committed/pushed/unbundled while we
2289 # were transferring data
2310 # were transferring data
2290 raise error.PushRaced('repository changed while %s - '
2311 raise error.PushRaced('repository changed while %s - '
2291 'please try again' % context)
2312 'please try again' % context)
2292
2313
2293 def unbundle(repo, cg, heads, source, url):
2314 def unbundle(repo, cg, heads, source, url):
2294 """Apply a bundle to a repo.
2315 """Apply a bundle to a repo.
2295
2316
2296 this function makes sure the repo is locked during the application and have
2317 this function makes sure the repo is locked during the application and have
2297 mechanism to check that no push race occurred between the creation of the
2318 mechanism to check that no push race occurred between the creation of the
2298 bundle and its application.
2319 bundle and its application.
2299
2320
2300 If the push was raced as PushRaced exception is raised."""
2321 If the push was raced as PushRaced exception is raised."""
2301 r = 0
2322 r = 0
2302 # need a transaction when processing a bundle2 stream
2323 # need a transaction when processing a bundle2 stream
2303 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2324 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2304 lockandtr = [None, None, None]
2325 lockandtr = [None, None, None]
2305 recordout = None
2326 recordout = None
2306 # quick fix for output mismatch with bundle2 in 3.4
2327 # quick fix for output mismatch with bundle2 in 3.4
2307 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
2328 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
2308 if url.startswith('remote:http:') or url.startswith('remote:https:'):
2329 if url.startswith('remote:http:') or url.startswith('remote:https:'):
2309 captureoutput = True
2330 captureoutput = True
2310 try:
2331 try:
2311 # note: outside bundle1, 'heads' is expected to be empty and this
2332 # note: outside bundle1, 'heads' is expected to be empty and this
2312 # 'check_heads' call wil be a no-op
2333 # 'check_heads' call wil be a no-op
2313 check_heads(repo, heads, 'uploading changes')
2334 check_heads(repo, heads, 'uploading changes')
2314 # push can proceed
2335 # push can proceed
2315 if not isinstance(cg, bundle2.unbundle20):
2336 if not isinstance(cg, bundle2.unbundle20):
2316 # legacy case: bundle1 (changegroup 01)
2337 # legacy case: bundle1 (changegroup 01)
2317 txnname = "\n".join([source, util.hidepassword(url)])
2338 txnname = "\n".join([source, util.hidepassword(url)])
2318 with repo.lock(), repo.transaction(txnname) as tr:
2339 with repo.lock(), repo.transaction(txnname) as tr:
2319 op = bundle2.applybundle(repo, cg, tr, source, url)
2340 op = bundle2.applybundle(repo, cg, tr, source, url)
2320 r = bundle2.combinechangegroupresults(op)
2341 r = bundle2.combinechangegroupresults(op)
2321 else:
2342 else:
2322 r = None
2343 r = None
2323 try:
2344 try:
2324 def gettransaction():
2345 def gettransaction():
2325 if not lockandtr[2]:
2346 if not lockandtr[2]:
2326 lockandtr[0] = repo.wlock()
2347 lockandtr[0] = repo.wlock()
2327 lockandtr[1] = repo.lock()
2348 lockandtr[1] = repo.lock()
2328 lockandtr[2] = repo.transaction(source)
2349 lockandtr[2] = repo.transaction(source)
2329 lockandtr[2].hookargs['source'] = source
2350 lockandtr[2].hookargs['source'] = source
2330 lockandtr[2].hookargs['url'] = url
2351 lockandtr[2].hookargs['url'] = url
2331 lockandtr[2].hookargs['bundle2'] = '1'
2352 lockandtr[2].hookargs['bundle2'] = '1'
2332 return lockandtr[2]
2353 return lockandtr[2]
2333
2354
2334 # Do greedy locking by default until we're satisfied with lazy
2355 # Do greedy locking by default until we're satisfied with lazy
2335 # locking.
2356 # locking.
2336 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2357 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2337 gettransaction()
2358 gettransaction()
2338
2359
2339 op = bundle2.bundleoperation(repo, gettransaction,
2360 op = bundle2.bundleoperation(repo, gettransaction,
2340 captureoutput=captureoutput,
2361 captureoutput=captureoutput,
2341 source='push')
2362 source='push')
2342 try:
2363 try:
2343 op = bundle2.processbundle(repo, cg, op=op)
2364 op = bundle2.processbundle(repo, cg, op=op)
2344 finally:
2365 finally:
2345 r = op.reply
2366 r = op.reply
2346 if captureoutput and r is not None:
2367 if captureoutput and r is not None:
2347 repo.ui.pushbuffer(error=True, subproc=True)
2368 repo.ui.pushbuffer(error=True, subproc=True)
2348 def recordout(output):
2369 def recordout(output):
2349 r.newpart('output', data=output, mandatory=False)
2370 r.newpart('output', data=output, mandatory=False)
2350 if lockandtr[2] is not None:
2371 if lockandtr[2] is not None:
2351 lockandtr[2].close()
2372 lockandtr[2].close()
2352 except BaseException as exc:
2373 except BaseException as exc:
2353 exc.duringunbundle2 = True
2374 exc.duringunbundle2 = True
2354 if captureoutput and r is not None:
2375 if captureoutput and r is not None:
2355 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2376 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2356 def recordout(output):
2377 def recordout(output):
2357 part = bundle2.bundlepart('output', data=output,
2378 part = bundle2.bundlepart('output', data=output,
2358 mandatory=False)
2379 mandatory=False)
2359 parts.append(part)
2380 parts.append(part)
2360 raise
2381 raise
2361 finally:
2382 finally:
2362 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2383 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2363 if recordout is not None:
2384 if recordout is not None:
2364 recordout(repo.ui.popbuffer())
2385 recordout(repo.ui.popbuffer())
2365 return r
2386 return r
2366
2387
2367 def _maybeapplyclonebundle(pullop):
2388 def _maybeapplyclonebundle(pullop):
2368 """Apply a clone bundle from a remote, if possible."""
2389 """Apply a clone bundle from a remote, if possible."""
2369
2390
2370 repo = pullop.repo
2391 repo = pullop.repo
2371 remote = pullop.remote
2392 remote = pullop.remote
2372
2393
2373 if not repo.ui.configbool('ui', 'clonebundles'):
2394 if not repo.ui.configbool('ui', 'clonebundles'):
2374 return
2395 return
2375
2396
2376 # Only run if local repo is empty.
2397 # Only run if local repo is empty.
2377 if len(repo):
2398 if len(repo):
2378 return
2399 return
2379
2400
2380 if pullop.heads:
2401 if pullop.heads:
2381 return
2402 return
2382
2403
2383 if not remote.capable('clonebundles'):
2404 if not remote.capable('clonebundles'):
2384 return
2405 return
2385
2406
2386 with remote.commandexecutor() as e:
2407 with remote.commandexecutor() as e:
2387 res = e.callcommand('clonebundles', {}).result()
2408 res = e.callcommand('clonebundles', {}).result()
2388
2409
2389 # If we call the wire protocol command, that's good enough to record the
2410 # If we call the wire protocol command, that's good enough to record the
2390 # attempt.
2411 # attempt.
2391 pullop.clonebundleattempted = True
2412 pullop.clonebundleattempted = True
2392
2413
2393 entries = parseclonebundlesmanifest(repo, res)
2414 entries = parseclonebundlesmanifest(repo, res)
2394 if not entries:
2415 if not entries:
2395 repo.ui.note(_('no clone bundles available on remote; '
2416 repo.ui.note(_('no clone bundles available on remote; '
2396 'falling back to regular clone\n'))
2417 'falling back to regular clone\n'))
2397 return
2418 return
2398
2419
2399 entries = filterclonebundleentries(
2420 entries = filterclonebundleentries(
2400 repo, entries, streamclonerequested=pullop.streamclonerequested)
2421 repo, entries, streamclonerequested=pullop.streamclonerequested)
2401
2422
2402 if not entries:
2423 if not entries:
2403 # There is a thundering herd concern here. However, if a server
2424 # There is a thundering herd concern here. However, if a server
2404 # operator doesn't advertise bundles appropriate for its clients,
2425 # operator doesn't advertise bundles appropriate for its clients,
2405 # they deserve what's coming. Furthermore, from a client's
2426 # they deserve what's coming. Furthermore, from a client's
2406 # perspective, no automatic fallback would mean not being able to
2427 # perspective, no automatic fallback would mean not being able to
2407 # clone!
2428 # clone!
2408 repo.ui.warn(_('no compatible clone bundles available on server; '
2429 repo.ui.warn(_('no compatible clone bundles available on server; '
2409 'falling back to regular clone\n'))
2430 'falling back to regular clone\n'))
2410 repo.ui.warn(_('(you may want to report this to the server '
2431 repo.ui.warn(_('(you may want to report this to the server '
2411 'operator)\n'))
2432 'operator)\n'))
2412 return
2433 return
2413
2434
2414 entries = sortclonebundleentries(repo.ui, entries)
2435 entries = sortclonebundleentries(repo.ui, entries)
2415
2436
2416 url = entries[0]['URL']
2437 url = entries[0]['URL']
2417 repo.ui.status(_('applying clone bundle from %s\n') % url)
2438 repo.ui.status(_('applying clone bundle from %s\n') % url)
2418 if trypullbundlefromurl(repo.ui, repo, url):
2439 if trypullbundlefromurl(repo.ui, repo, url):
2419 repo.ui.status(_('finished applying clone bundle\n'))
2440 repo.ui.status(_('finished applying clone bundle\n'))
2420 # Bundle failed.
2441 # Bundle failed.
2421 #
2442 #
2422 # We abort by default to avoid the thundering herd of
2443 # We abort by default to avoid the thundering herd of
2423 # clients flooding a server that was expecting expensive
2444 # clients flooding a server that was expecting expensive
2424 # clone load to be offloaded.
2445 # clone load to be offloaded.
2425 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2446 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2426 repo.ui.warn(_('falling back to normal clone\n'))
2447 repo.ui.warn(_('falling back to normal clone\n'))
2427 else:
2448 else:
2428 raise error.Abort(_('error applying bundle'),
2449 raise error.Abort(_('error applying bundle'),
2429 hint=_('if this error persists, consider contacting '
2450 hint=_('if this error persists, consider contacting '
2430 'the server operator or disable clone '
2451 'the server operator or disable clone '
2431 'bundles via '
2452 'bundles via '
2432 '"--config ui.clonebundles=false"'))
2453 '"--config ui.clonebundles=false"'))
2433
2454
2434 def parseclonebundlesmanifest(repo, s):
2455 def parseclonebundlesmanifest(repo, s):
2435 """Parses the raw text of a clone bundles manifest.
2456 """Parses the raw text of a clone bundles manifest.
2436
2457
2437 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2458 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2438 to the URL and other keys are the attributes for the entry.
2459 to the URL and other keys are the attributes for the entry.
2439 """
2460 """
2440 m = []
2461 m = []
2441 for line in s.splitlines():
2462 for line in s.splitlines():
2442 fields = line.split()
2463 fields = line.split()
2443 if not fields:
2464 if not fields:
2444 continue
2465 continue
2445 attrs = {'URL': fields[0]}
2466 attrs = {'URL': fields[0]}
2446 for rawattr in fields[1:]:
2467 for rawattr in fields[1:]:
2447 key, value = rawattr.split('=', 1)
2468 key, value = rawattr.split('=', 1)
2448 key = urlreq.unquote(key)
2469 key = urlreq.unquote(key)
2449 value = urlreq.unquote(value)
2470 value = urlreq.unquote(value)
2450 attrs[key] = value
2471 attrs[key] = value
2451
2472
2452 # Parse BUNDLESPEC into components. This makes client-side
2473 # Parse BUNDLESPEC into components. This makes client-side
2453 # preferences easier to specify since you can prefer a single
2474 # preferences easier to specify since you can prefer a single
2454 # component of the BUNDLESPEC.
2475 # component of the BUNDLESPEC.
2455 if key == 'BUNDLESPEC':
2476 if key == 'BUNDLESPEC':
2456 try:
2477 try:
2457 bundlespec = parsebundlespec(repo, value)
2478 bundlespec = parsebundlespec(repo, value)
2458 attrs['COMPRESSION'] = bundlespec.compression
2479 attrs['COMPRESSION'] = bundlespec.compression
2459 attrs['VERSION'] = bundlespec.version
2480 attrs['VERSION'] = bundlespec.version
2460 except error.InvalidBundleSpecification:
2481 except error.InvalidBundleSpecification:
2461 pass
2482 pass
2462 except error.UnsupportedBundleSpecification:
2483 except error.UnsupportedBundleSpecification:
2463 pass
2484 pass
2464
2485
2465 m.append(attrs)
2486 m.append(attrs)
2466
2487
2467 return m
2488 return m
2468
2489
2469 def isstreamclonespec(bundlespec):
2490 def isstreamclonespec(bundlespec):
2470 # Stream clone v1
2491 # Stream clone v1
2471 if (bundlespec.wirecompression == 'UN' and bundlespec.wireversion == 's1'):
2492 if (bundlespec.wirecompression == 'UN' and bundlespec.wireversion == 's1'):
2472 return True
2493 return True
2473
2494
2474 # Stream clone v2
2495 # Stream clone v2
2475 if (bundlespec.wirecompression == 'UN' and \
2496 if (bundlespec.wirecompression == 'UN' and \
2476 bundlespec.wireversion == '02' and \
2497 bundlespec.wireversion == '02' and \
2477 bundlespec.contentopts.get('streamv2')):
2498 bundlespec.contentopts.get('streamv2')):
2478 return True
2499 return True
2479
2500
2480 return False
2501 return False
2481
2502
2482 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2503 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2483 """Remove incompatible clone bundle manifest entries.
2504 """Remove incompatible clone bundle manifest entries.
2484
2505
2485 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2506 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2486 and returns a new list consisting of only the entries that this client
2507 and returns a new list consisting of only the entries that this client
2487 should be able to apply.
2508 should be able to apply.
2488
2509
2489 There is no guarantee we'll be able to apply all returned entries because
2510 There is no guarantee we'll be able to apply all returned entries because
2490 the metadata we use to filter on may be missing or wrong.
2511 the metadata we use to filter on may be missing or wrong.
2491 """
2512 """
2492 newentries = []
2513 newentries = []
2493 for entry in entries:
2514 for entry in entries:
2494 spec = entry.get('BUNDLESPEC')
2515 spec = entry.get('BUNDLESPEC')
2495 if spec:
2516 if spec:
2496 try:
2517 try:
2497 bundlespec = parsebundlespec(repo, spec, strict=True)
2518 bundlespec = parsebundlespec(repo, spec, strict=True)
2498
2519
2499 # If a stream clone was requested, filter out non-streamclone
2520 # If a stream clone was requested, filter out non-streamclone
2500 # entries.
2521 # entries.
2501 if streamclonerequested and not isstreamclonespec(bundlespec):
2522 if streamclonerequested and not isstreamclonespec(bundlespec):
2502 repo.ui.debug('filtering %s because not a stream clone\n' %
2523 repo.ui.debug('filtering %s because not a stream clone\n' %
2503 entry['URL'])
2524 entry['URL'])
2504 continue
2525 continue
2505
2526
2506 except error.InvalidBundleSpecification as e:
2527 except error.InvalidBundleSpecification as e:
2507 repo.ui.debug(stringutil.forcebytestr(e) + '\n')
2528 repo.ui.debug(stringutil.forcebytestr(e) + '\n')
2508 continue
2529 continue
2509 except error.UnsupportedBundleSpecification as e:
2530 except error.UnsupportedBundleSpecification as e:
2510 repo.ui.debug('filtering %s because unsupported bundle '
2531 repo.ui.debug('filtering %s because unsupported bundle '
2511 'spec: %s\n' % (
2532 'spec: %s\n' % (
2512 entry['URL'], stringutil.forcebytestr(e)))
2533 entry['URL'], stringutil.forcebytestr(e)))
2513 continue
2534 continue
2514 # If we don't have a spec and requested a stream clone, we don't know
2535 # If we don't have a spec and requested a stream clone, we don't know
2515 # what the entry is so don't attempt to apply it.
2536 # what the entry is so don't attempt to apply it.
2516 elif streamclonerequested:
2537 elif streamclonerequested:
2517 repo.ui.debug('filtering %s because cannot determine if a stream '
2538 repo.ui.debug('filtering %s because cannot determine if a stream '
2518 'clone bundle\n' % entry['URL'])
2539 'clone bundle\n' % entry['URL'])
2519 continue
2540 continue
2520
2541
2521 if 'REQUIRESNI' in entry and not sslutil.hassni:
2542 if 'REQUIRESNI' in entry and not sslutil.hassni:
2522 repo.ui.debug('filtering %s because SNI not supported\n' %
2543 repo.ui.debug('filtering %s because SNI not supported\n' %
2523 entry['URL'])
2544 entry['URL'])
2524 continue
2545 continue
2525
2546
2526 newentries.append(entry)
2547 newentries.append(entry)
2527
2548
2528 return newentries
2549 return newentries
2529
2550
2530 class clonebundleentry(object):
2551 class clonebundleentry(object):
2531 """Represents an item in a clone bundles manifest.
2552 """Represents an item in a clone bundles manifest.
2532
2553
2533 This rich class is needed to support sorting since sorted() in Python 3
2554 This rich class is needed to support sorting since sorted() in Python 3
2534 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2555 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2535 won't work.
2556 won't work.
2536 """
2557 """
2537
2558
2538 def __init__(self, value, prefers):
2559 def __init__(self, value, prefers):
2539 self.value = value
2560 self.value = value
2540 self.prefers = prefers
2561 self.prefers = prefers
2541
2562
2542 def _cmp(self, other):
2563 def _cmp(self, other):
2543 for prefkey, prefvalue in self.prefers:
2564 for prefkey, prefvalue in self.prefers:
2544 avalue = self.value.get(prefkey)
2565 avalue = self.value.get(prefkey)
2545 bvalue = other.value.get(prefkey)
2566 bvalue = other.value.get(prefkey)
2546
2567
2547 # Special case for b missing attribute and a matches exactly.
2568 # Special case for b missing attribute and a matches exactly.
2548 if avalue is not None and bvalue is None and avalue == prefvalue:
2569 if avalue is not None and bvalue is None and avalue == prefvalue:
2549 return -1
2570 return -1
2550
2571
2551 # Special case for a missing attribute and b matches exactly.
2572 # Special case for a missing attribute and b matches exactly.
2552 if bvalue is not None and avalue is None and bvalue == prefvalue:
2573 if bvalue is not None and avalue is None and bvalue == prefvalue:
2553 return 1
2574 return 1
2554
2575
2555 # We can't compare unless attribute present on both.
2576 # We can't compare unless attribute present on both.
2556 if avalue is None or bvalue is None:
2577 if avalue is None or bvalue is None:
2557 continue
2578 continue
2558
2579
2559 # Same values should fall back to next attribute.
2580 # Same values should fall back to next attribute.
2560 if avalue == bvalue:
2581 if avalue == bvalue:
2561 continue
2582 continue
2562
2583
2563 # Exact matches come first.
2584 # Exact matches come first.
2564 if avalue == prefvalue:
2585 if avalue == prefvalue:
2565 return -1
2586 return -1
2566 if bvalue == prefvalue:
2587 if bvalue == prefvalue:
2567 return 1
2588 return 1
2568
2589
2569 # Fall back to next attribute.
2590 # Fall back to next attribute.
2570 continue
2591 continue
2571
2592
2572 # If we got here we couldn't sort by attributes and prefers. Fall
2593 # If we got here we couldn't sort by attributes and prefers. Fall
2573 # back to index order.
2594 # back to index order.
2574 return 0
2595 return 0
2575
2596
2576 def __lt__(self, other):
2597 def __lt__(self, other):
2577 return self._cmp(other) < 0
2598 return self._cmp(other) < 0
2578
2599
2579 def __gt__(self, other):
2600 def __gt__(self, other):
2580 return self._cmp(other) > 0
2601 return self._cmp(other) > 0
2581
2602
2582 def __eq__(self, other):
2603 def __eq__(self, other):
2583 return self._cmp(other) == 0
2604 return self._cmp(other) == 0
2584
2605
2585 def __le__(self, other):
2606 def __le__(self, other):
2586 return self._cmp(other) <= 0
2607 return self._cmp(other) <= 0
2587
2608
2588 def __ge__(self, other):
2609 def __ge__(self, other):
2589 return self._cmp(other) >= 0
2610 return self._cmp(other) >= 0
2590
2611
2591 def __ne__(self, other):
2612 def __ne__(self, other):
2592 return self._cmp(other) != 0
2613 return self._cmp(other) != 0
2593
2614
2594 def sortclonebundleentries(ui, entries):
2615 def sortclonebundleentries(ui, entries):
2595 prefers = ui.configlist('ui', 'clonebundleprefers')
2616 prefers = ui.configlist('ui', 'clonebundleprefers')
2596 if not prefers:
2617 if not prefers:
2597 return list(entries)
2618 return list(entries)
2598
2619
2599 prefers = [p.split('=', 1) for p in prefers]
2620 prefers = [p.split('=', 1) for p in prefers]
2600
2621
2601 items = sorted(clonebundleentry(v, prefers) for v in entries)
2622 items = sorted(clonebundleentry(v, prefers) for v in entries)
2602 return [i.value for i in items]
2623 return [i.value for i in items]
2603
2624
2604 def trypullbundlefromurl(ui, repo, url):
2625 def trypullbundlefromurl(ui, repo, url):
2605 """Attempt to apply a bundle from a URL."""
2626 """Attempt to apply a bundle from a URL."""
2606 with repo.lock(), repo.transaction('bundleurl') as tr:
2627 with repo.lock(), repo.transaction('bundleurl') as tr:
2607 try:
2628 try:
2608 fh = urlmod.open(ui, url)
2629 fh = urlmod.open(ui, url)
2609 cg = readbundle(ui, fh, 'stream')
2630 cg = readbundle(ui, fh, 'stream')
2610
2631
2611 if isinstance(cg, streamclone.streamcloneapplier):
2632 if isinstance(cg, streamclone.streamcloneapplier):
2612 cg.apply(repo)
2633 cg.apply(repo)
2613 else:
2634 else:
2614 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2635 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2615 return True
2636 return True
2616 except urlerr.httperror as e:
2637 except urlerr.httperror as e:
2617 ui.warn(_('HTTP error fetching bundle: %s\n') %
2638 ui.warn(_('HTTP error fetching bundle: %s\n') %
2618 stringutil.forcebytestr(e))
2639 stringutil.forcebytestr(e))
2619 except urlerr.urlerror as e:
2640 except urlerr.urlerror as e:
2620 ui.warn(_('error fetching bundle: %s\n') %
2641 ui.warn(_('error fetching bundle: %s\n') %
2621 stringutil.forcebytestr(e.reason))
2642 stringutil.forcebytestr(e.reason))
2622
2643
2623 return False
2644 return False
@@ -1,1225 +1,1227 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import shutil
14 import shutil
15 import stat
15 import stat
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 nullid,
19 nullid,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 bookmarks,
23 bookmarks,
24 bundlerepo,
24 bundlerepo,
25 cacheutil,
25 cacheutil,
26 cmdutil,
26 cmdutil,
27 destutil,
27 destutil,
28 discovery,
28 discovery,
29 error,
29 error,
30 exchange,
30 exchange,
31 extensions,
31 extensions,
32 httppeer,
32 httppeer,
33 localrepo,
33 localrepo,
34 lock,
34 lock,
35 logcmdutil,
35 logcmdutil,
36 logexchange,
36 logexchange,
37 merge as mergemod,
37 merge as mergemod,
38 narrowspec,
38 narrowspec,
39 node,
39 node,
40 phases,
40 phases,
41 scmutil,
41 scmutil,
42 sshpeer,
42 sshpeer,
43 statichttprepo,
43 statichttprepo,
44 ui as uimod,
44 ui as uimod,
45 unionrepo,
45 unionrepo,
46 url,
46 url,
47 util,
47 util,
48 verify as verifymod,
48 verify as verifymod,
49 vfs as vfsmod,
49 vfs as vfsmod,
50 )
50 )
51
51
52 from .utils import (
52 from .utils import (
53 stringutil,
53 stringutil,
54 )
54 )
55
55
56 release = lock.release
56 release = lock.release
57
57
58 # shared features
58 # shared features
59 sharedbookmarks = 'bookmarks'
59 sharedbookmarks = 'bookmarks'
60
60
61 def _local(path):
61 def _local(path):
62 path = util.expandpath(util.urllocalpath(path))
62 path = util.expandpath(util.urllocalpath(path))
63 return (os.path.isfile(path) and bundlerepo or localrepo)
63 return (os.path.isfile(path) and bundlerepo or localrepo)
64
64
65 def addbranchrevs(lrepo, other, branches, revs):
65 def addbranchrevs(lrepo, other, branches, revs):
66 peer = other.peer() # a courtesy to callers using a localrepo for other
66 peer = other.peer() # a courtesy to callers using a localrepo for other
67 hashbranch, branches = branches
67 hashbranch, branches = branches
68 if not hashbranch and not branches:
68 if not hashbranch and not branches:
69 x = revs or None
69 x = revs or None
70 if revs:
70 if revs:
71 y = revs[0]
71 y = revs[0]
72 else:
72 else:
73 y = None
73 y = None
74 return x, y
74 return x, y
75 if revs:
75 if revs:
76 revs = list(revs)
76 revs = list(revs)
77 else:
77 else:
78 revs = []
78 revs = []
79
79
80 if not peer.capable('branchmap'):
80 if not peer.capable('branchmap'):
81 if branches:
81 if branches:
82 raise error.Abort(_("remote branch lookup not supported"))
82 raise error.Abort(_("remote branch lookup not supported"))
83 revs.append(hashbranch)
83 revs.append(hashbranch)
84 return revs, revs[0]
84 return revs, revs[0]
85
85
86 with peer.commandexecutor() as e:
86 with peer.commandexecutor() as e:
87 branchmap = e.callcommand('branchmap', {}).result()
87 branchmap = e.callcommand('branchmap', {}).result()
88
88
89 def primary(branch):
89 def primary(branch):
90 if branch == '.':
90 if branch == '.':
91 if not lrepo:
91 if not lrepo:
92 raise error.Abort(_("dirstate branch not accessible"))
92 raise error.Abort(_("dirstate branch not accessible"))
93 branch = lrepo.dirstate.branch()
93 branch = lrepo.dirstate.branch()
94 if branch in branchmap:
94 if branch in branchmap:
95 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
95 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
96 return True
96 return True
97 else:
97 else:
98 return False
98 return False
99
99
100 for branch in branches:
100 for branch in branches:
101 if not primary(branch):
101 if not primary(branch):
102 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
102 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
103 if hashbranch:
103 if hashbranch:
104 if not primary(hashbranch):
104 if not primary(hashbranch):
105 revs.append(hashbranch)
105 revs.append(hashbranch)
106 return revs, revs[0]
106 return revs, revs[0]
107
107
108 def parseurl(path, branches=None):
108 def parseurl(path, branches=None):
109 '''parse url#branch, returning (url, (branch, branches))'''
109 '''parse url#branch, returning (url, (branch, branches))'''
110
110
111 u = util.url(path)
111 u = util.url(path)
112 branch = None
112 branch = None
113 if u.fragment:
113 if u.fragment:
114 branch = u.fragment
114 branch = u.fragment
115 u.fragment = None
115 u.fragment = None
116 return bytes(u), (branch, branches or [])
116 return bytes(u), (branch, branches or [])
117
117
118 schemes = {
118 schemes = {
119 'bundle': bundlerepo,
119 'bundle': bundlerepo,
120 'union': unionrepo,
120 'union': unionrepo,
121 'file': _local,
121 'file': _local,
122 'http': httppeer,
122 'http': httppeer,
123 'https': httppeer,
123 'https': httppeer,
124 'ssh': sshpeer,
124 'ssh': sshpeer,
125 'static-http': statichttprepo,
125 'static-http': statichttprepo,
126 }
126 }
127
127
128 def _peerlookup(path):
128 def _peerlookup(path):
129 u = util.url(path)
129 u = util.url(path)
130 scheme = u.scheme or 'file'
130 scheme = u.scheme or 'file'
131 thing = schemes.get(scheme) or schemes['file']
131 thing = schemes.get(scheme) or schemes['file']
132 try:
132 try:
133 return thing(path)
133 return thing(path)
134 except TypeError:
134 except TypeError:
135 # we can't test callable(thing) because 'thing' can be an unloaded
135 # we can't test callable(thing) because 'thing' can be an unloaded
136 # module that implements __call__
136 # module that implements __call__
137 if not util.safehasattr(thing, 'instance'):
137 if not util.safehasattr(thing, 'instance'):
138 raise
138 raise
139 return thing
139 return thing
140
140
141 def islocal(repo):
141 def islocal(repo):
142 '''return true if repo (or path pointing to repo) is local'''
142 '''return true if repo (or path pointing to repo) is local'''
143 if isinstance(repo, bytes):
143 if isinstance(repo, bytes):
144 try:
144 try:
145 return _peerlookup(repo).islocal(repo)
145 return _peerlookup(repo).islocal(repo)
146 except AttributeError:
146 except AttributeError:
147 return False
147 return False
148 return repo.local()
148 return repo.local()
149
149
150 def openpath(ui, path):
150 def openpath(ui, path):
151 '''open path with open if local, url.open if remote'''
151 '''open path with open if local, url.open if remote'''
152 pathurl = util.url(path, parsequery=False, parsefragment=False)
152 pathurl = util.url(path, parsequery=False, parsefragment=False)
153 if pathurl.islocal():
153 if pathurl.islocal():
154 return util.posixfile(pathurl.localpath(), 'rb')
154 return util.posixfile(pathurl.localpath(), 'rb')
155 else:
155 else:
156 return url.open(ui, path)
156 return url.open(ui, path)
157
157
158 # a list of (ui, repo) functions called for wire peer initialization
158 # a list of (ui, repo) functions called for wire peer initialization
159 wirepeersetupfuncs = []
159 wirepeersetupfuncs = []
160
160
161 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
161 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
162 intents=None, createopts=None):
162 intents=None, createopts=None):
163 """return a repository object for the specified path"""
163 """return a repository object for the specified path"""
164 obj = _peerlookup(path).instance(ui, path, create, intents=intents,
164 obj = _peerlookup(path).instance(ui, path, create, intents=intents,
165 createopts=createopts)
165 createopts=createopts)
166 ui = getattr(obj, "ui", ui)
166 ui = getattr(obj, "ui", ui)
167 if ui.configbool('devel', 'debug.extensions'):
167 if ui.configbool('devel', 'debug.extensions'):
168 log = lambda msg, *values: ui.debug('debug.extensions: ',
168 log = lambda msg, *values: ui.debug('debug.extensions: ',
169 msg % values, label='debug.extensions')
169 msg % values, label='debug.extensions')
170 else:
170 else:
171 log = lambda *a, **kw: None
171 log = lambda *a, **kw: None
172 for f in presetupfuncs or []:
172 for f in presetupfuncs or []:
173 f(ui, obj)
173 f(ui, obj)
174 log('- executing reposetup hooks\n')
174 log('- executing reposetup hooks\n')
175 with util.timedcm('all reposetup') as allreposetupstats:
175 with util.timedcm('all reposetup') as allreposetupstats:
176 for name, module in extensions.extensions(ui):
176 for name, module in extensions.extensions(ui):
177 log(' - running reposetup for %s\n' % (name,))
177 log(' - running reposetup for %s\n' % (name,))
178 hook = getattr(module, 'reposetup', None)
178 hook = getattr(module, 'reposetup', None)
179 if hook:
179 if hook:
180 with util.timedcm('reposetup %r', name) as stats:
180 with util.timedcm('reposetup %r', name) as stats:
181 hook(ui, obj)
181 hook(ui, obj)
182 log(' > reposetup for %r took %s\n', name, stats)
182 log(' > reposetup for %r took %s\n', name, stats)
183 log('> all reposetup took %s\n', allreposetupstats)
183 log('> all reposetup took %s\n', allreposetupstats)
184 if not obj.local():
184 if not obj.local():
185 for f in wirepeersetupfuncs:
185 for f in wirepeersetupfuncs:
186 f(ui, obj)
186 f(ui, obj)
187 return obj
187 return obj
188
188
189 def repository(ui, path='', create=False, presetupfuncs=None, intents=None,
189 def repository(ui, path='', create=False, presetupfuncs=None, intents=None,
190 createopts=None):
190 createopts=None):
191 """return a repository object for the specified path"""
191 """return a repository object for the specified path"""
192 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
192 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
193 intents=intents, createopts=createopts)
193 intents=intents, createopts=createopts)
194 repo = peer.local()
194 repo = peer.local()
195 if not repo:
195 if not repo:
196 raise error.Abort(_("repository '%s' is not local") %
196 raise error.Abort(_("repository '%s' is not local") %
197 (path or peer.url()))
197 (path or peer.url()))
198 return repo.filtered('visible')
198 return repo.filtered('visible')
199
199
200 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
200 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
201 '''return a repository peer for the specified path'''
201 '''return a repository peer for the specified path'''
202 rui = remoteui(uiorrepo, opts)
202 rui = remoteui(uiorrepo, opts)
203 return _peerorrepo(rui, path, create, intents=intents,
203 return _peerorrepo(rui, path, create, intents=intents,
204 createopts=createopts).peer()
204 createopts=createopts).peer()
205
205
206 def defaultdest(source):
206 def defaultdest(source):
207 '''return default destination of clone if none is given
207 '''return default destination of clone if none is given
208
208
209 >>> defaultdest(b'foo')
209 >>> defaultdest(b'foo')
210 'foo'
210 'foo'
211 >>> defaultdest(b'/foo/bar')
211 >>> defaultdest(b'/foo/bar')
212 'bar'
212 'bar'
213 >>> defaultdest(b'/')
213 >>> defaultdest(b'/')
214 ''
214 ''
215 >>> defaultdest(b'')
215 >>> defaultdest(b'')
216 ''
216 ''
217 >>> defaultdest(b'http://example.org/')
217 >>> defaultdest(b'http://example.org/')
218 ''
218 ''
219 >>> defaultdest(b'http://example.org/foo/')
219 >>> defaultdest(b'http://example.org/foo/')
220 'foo'
220 'foo'
221 '''
221 '''
222 path = util.url(source).path
222 path = util.url(source).path
223 if not path:
223 if not path:
224 return ''
224 return ''
225 return os.path.basename(os.path.normpath(path))
225 return os.path.basename(os.path.normpath(path))
226
226
227 def sharedreposource(repo):
227 def sharedreposource(repo):
228 """Returns repository object for source repository of a shared repo.
228 """Returns repository object for source repository of a shared repo.
229
229
230 If repo is not a shared repository, returns None.
230 If repo is not a shared repository, returns None.
231 """
231 """
232 if repo.sharedpath == repo.path:
232 if repo.sharedpath == repo.path:
233 return None
233 return None
234
234
235 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
235 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
236 return repo.srcrepo
236 return repo.srcrepo
237
237
238 # the sharedpath always ends in the .hg; we want the path to the repo
238 # the sharedpath always ends in the .hg; we want the path to the repo
239 source = repo.vfs.split(repo.sharedpath)[0]
239 source = repo.vfs.split(repo.sharedpath)[0]
240 srcurl, branches = parseurl(source)
240 srcurl, branches = parseurl(source)
241 srcrepo = repository(repo.ui, srcurl)
241 srcrepo = repository(repo.ui, srcurl)
242 repo.srcrepo = srcrepo
242 repo.srcrepo = srcrepo
243 return srcrepo
243 return srcrepo
244
244
245 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
245 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
246 relative=False):
246 relative=False):
247 '''create a shared repository'''
247 '''create a shared repository'''
248
248
249 if not islocal(source):
249 if not islocal(source):
250 raise error.Abort(_('can only share local repositories'))
250 raise error.Abort(_('can only share local repositories'))
251
251
252 if not dest:
252 if not dest:
253 dest = defaultdest(source)
253 dest = defaultdest(source)
254 else:
254 else:
255 dest = ui.expandpath(dest)
255 dest = ui.expandpath(dest)
256
256
257 if isinstance(source, bytes):
257 if isinstance(source, bytes):
258 origsource = ui.expandpath(source)
258 origsource = ui.expandpath(source)
259 source, branches = parseurl(origsource)
259 source, branches = parseurl(origsource)
260 srcrepo = repository(ui, source)
260 srcrepo = repository(ui, source)
261 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
261 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
262 else:
262 else:
263 srcrepo = source.local()
263 srcrepo = source.local()
264 origsource = source = srcrepo.url()
264 origsource = source = srcrepo.url()
265 checkout = None
265 checkout = None
266
266
267 sharedpath = srcrepo.sharedpath # if our source is already sharing
267 sharedpath = srcrepo.sharedpath # if our source is already sharing
268
268
269 destwvfs = vfsmod.vfs(dest, realpath=True)
269 destwvfs = vfsmod.vfs(dest, realpath=True)
270 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
270 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
271
271
272 if destvfs.lexists():
272 if destvfs.lexists():
273 raise error.Abort(_('destination already exists'))
273 raise error.Abort(_('destination already exists'))
274
274
275 if not destwvfs.isdir():
275 if not destwvfs.isdir():
276 destwvfs.makedirs()
276 destwvfs.makedirs()
277 destvfs.makedir()
277 destvfs.makedir()
278
278
279 requirements = ''
279 requirements = ''
280 try:
280 try:
281 requirements = srcrepo.vfs.read('requires')
281 requirements = srcrepo.vfs.read('requires')
282 except IOError as inst:
282 except IOError as inst:
283 if inst.errno != errno.ENOENT:
283 if inst.errno != errno.ENOENT:
284 raise
284 raise
285
285
286 if relative:
286 if relative:
287 try:
287 try:
288 sharedpath = os.path.relpath(sharedpath, destvfs.base)
288 sharedpath = os.path.relpath(sharedpath, destvfs.base)
289 requirements += 'relshared\n'
289 requirements += 'relshared\n'
290 except (IOError, ValueError) as e:
290 except (IOError, ValueError) as e:
291 # ValueError is raised on Windows if the drive letters differ on
291 # ValueError is raised on Windows if the drive letters differ on
292 # each path
292 # each path
293 raise error.Abort(_('cannot calculate relative path'),
293 raise error.Abort(_('cannot calculate relative path'),
294 hint=stringutil.forcebytestr(e))
294 hint=stringutil.forcebytestr(e))
295 else:
295 else:
296 requirements += 'shared\n'
296 requirements += 'shared\n'
297
297
298 destvfs.write('requires', requirements)
298 destvfs.write('requires', requirements)
299 destvfs.write('sharedpath', sharedpath)
299 destvfs.write('sharedpath', sharedpath)
300
300
301 r = repository(ui, destwvfs.base)
301 r = repository(ui, destwvfs.base)
302 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
302 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
303 _postshareupdate(r, update, checkout=checkout)
303 _postshareupdate(r, update, checkout=checkout)
304 return r
304 return r
305
305
306 def unshare(ui, repo):
306 def unshare(ui, repo):
307 """convert a shared repository to a normal one
307 """convert a shared repository to a normal one
308
308
309 Copy the store data to the repo and remove the sharedpath data.
309 Copy the store data to the repo and remove the sharedpath data.
310 """
310 """
311
311
312 destlock = lock = None
312 destlock = lock = None
313 lock = repo.lock()
313 lock = repo.lock()
314 try:
314 try:
315 # we use locks here because if we race with commit, we
315 # we use locks here because if we race with commit, we
316 # can end up with extra data in the cloned revlogs that's
316 # can end up with extra data in the cloned revlogs that's
317 # not pointed to by changesets, thus causing verify to
317 # not pointed to by changesets, thus causing verify to
318 # fail
318 # fail
319
319
320 destlock = copystore(ui, repo, repo.path)
320 destlock = copystore(ui, repo, repo.path)
321
321
322 sharefile = repo.vfs.join('sharedpath')
322 sharefile = repo.vfs.join('sharedpath')
323 util.rename(sharefile, sharefile + '.old')
323 util.rename(sharefile, sharefile + '.old')
324
324
325 repo.requirements.discard('shared')
325 repo.requirements.discard('shared')
326 repo.requirements.discard('relshared')
326 repo.requirements.discard('relshared')
327 repo._writerequirements()
327 repo._writerequirements()
328 finally:
328 finally:
329 destlock and destlock.release()
329 destlock and destlock.release()
330 lock and lock.release()
330 lock and lock.release()
331
331
332 # update store, spath, svfs and sjoin of repo
332 # update store, spath, svfs and sjoin of repo
333 repo.unfiltered().__init__(repo.baseui, repo.root)
333 repo.unfiltered().__init__(repo.baseui, repo.root)
334
334
335 # TODO: figure out how to access subrepos that exist, but were previously
335 # TODO: figure out how to access subrepos that exist, but were previously
336 # removed from .hgsub
336 # removed from .hgsub
337 c = repo['.']
337 c = repo['.']
338 subs = c.substate
338 subs = c.substate
339 for s in sorted(subs):
339 for s in sorted(subs):
340 c.sub(s).unshare()
340 c.sub(s).unshare()
341
341
342 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
342 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
343 """Called after a new shared repo is created.
343 """Called after a new shared repo is created.
344
344
345 The new repo only has a requirements file and pointer to the source.
345 The new repo only has a requirements file and pointer to the source.
346 This function configures additional shared data.
346 This function configures additional shared data.
347
347
348 Extensions can wrap this function and write additional entries to
348 Extensions can wrap this function and write additional entries to
349 destrepo/.hg/shared to indicate additional pieces of data to be shared.
349 destrepo/.hg/shared to indicate additional pieces of data to be shared.
350 """
350 """
351 default = defaultpath or sourcerepo.ui.config('paths', 'default')
351 default = defaultpath or sourcerepo.ui.config('paths', 'default')
352 if default:
352 if default:
353 template = ('[paths]\n'
353 template = ('[paths]\n'
354 'default = %s\n')
354 'default = %s\n')
355 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
355 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
356
356
357 with destrepo.wlock():
357 with destrepo.wlock():
358 if bookmarks:
358 if bookmarks:
359 destrepo.vfs.write('shared', sharedbookmarks + '\n')
359 destrepo.vfs.write('shared', sharedbookmarks + '\n')
360
360
361 def _postshareupdate(repo, update, checkout=None):
361 def _postshareupdate(repo, update, checkout=None):
362 """Maybe perform a working directory update after a shared repo is created.
362 """Maybe perform a working directory update after a shared repo is created.
363
363
364 ``update`` can be a boolean or a revision to update to.
364 ``update`` can be a boolean or a revision to update to.
365 """
365 """
366 if not update:
366 if not update:
367 return
367 return
368
368
369 repo.ui.status(_("updating working directory\n"))
369 repo.ui.status(_("updating working directory\n"))
370 if update is not True:
370 if update is not True:
371 checkout = update
371 checkout = update
372 for test in (checkout, 'default', 'tip'):
372 for test in (checkout, 'default', 'tip'):
373 if test is None:
373 if test is None:
374 continue
374 continue
375 try:
375 try:
376 uprev = repo.lookup(test)
376 uprev = repo.lookup(test)
377 break
377 break
378 except error.RepoLookupError:
378 except error.RepoLookupError:
379 continue
379 continue
380 _update(repo, uprev)
380 _update(repo, uprev)
381
381
382 def copystore(ui, srcrepo, destpath):
382 def copystore(ui, srcrepo, destpath):
383 '''copy files from store of srcrepo in destpath
383 '''copy files from store of srcrepo in destpath
384
384
385 returns destlock
385 returns destlock
386 '''
386 '''
387 destlock = None
387 destlock = None
388 try:
388 try:
389 hardlink = None
389 hardlink = None
390 topic = _('linking') if hardlink else _('copying')
390 topic = _('linking') if hardlink else _('copying')
391 with ui.makeprogress(topic) as progress:
391 with ui.makeprogress(topic) as progress:
392 num = 0
392 num = 0
393 srcpublishing = srcrepo.publishing()
393 srcpublishing = srcrepo.publishing()
394 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
394 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
395 dstvfs = vfsmod.vfs(destpath)
395 dstvfs = vfsmod.vfs(destpath)
396 for f in srcrepo.store.copylist():
396 for f in srcrepo.store.copylist():
397 if srcpublishing and f.endswith('phaseroots'):
397 if srcpublishing and f.endswith('phaseroots'):
398 continue
398 continue
399 dstbase = os.path.dirname(f)
399 dstbase = os.path.dirname(f)
400 if dstbase and not dstvfs.exists(dstbase):
400 if dstbase and not dstvfs.exists(dstbase):
401 dstvfs.mkdir(dstbase)
401 dstvfs.mkdir(dstbase)
402 if srcvfs.exists(f):
402 if srcvfs.exists(f):
403 if f.endswith('data'):
403 if f.endswith('data'):
404 # 'dstbase' may be empty (e.g. revlog format 0)
404 # 'dstbase' may be empty (e.g. revlog format 0)
405 lockfile = os.path.join(dstbase, "lock")
405 lockfile = os.path.join(dstbase, "lock")
406 # lock to avoid premature writing to the target
406 # lock to avoid premature writing to the target
407 destlock = lock.lock(dstvfs, lockfile)
407 destlock = lock.lock(dstvfs, lockfile)
408 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
408 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
409 hardlink, progress)
409 hardlink, progress)
410 num += n
410 num += n
411 if hardlink:
411 if hardlink:
412 ui.debug("linked %d files\n" % num)
412 ui.debug("linked %d files\n" % num)
413 else:
413 else:
414 ui.debug("copied %d files\n" % num)
414 ui.debug("copied %d files\n" % num)
415 return destlock
415 return destlock
416 except: # re-raises
416 except: # re-raises
417 release(destlock)
417 release(destlock)
418 raise
418 raise
419
419
420 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
420 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
421 rev=None, update=True, stream=False):
421 rev=None, update=True, stream=False):
422 """Perform a clone using a shared repo.
422 """Perform a clone using a shared repo.
423
423
424 The store for the repository will be located at <sharepath>/.hg. The
424 The store for the repository will be located at <sharepath>/.hg. The
425 specified revisions will be cloned or pulled from "source". A shared repo
425 specified revisions will be cloned or pulled from "source". A shared repo
426 will be created at "dest" and a working copy will be created if "update" is
426 will be created at "dest" and a working copy will be created if "update" is
427 True.
427 True.
428 """
428 """
429 revs = None
429 revs = None
430 if rev:
430 if rev:
431 if not srcpeer.capable('lookup'):
431 if not srcpeer.capable('lookup'):
432 raise error.Abort(_("src repository does not support "
432 raise error.Abort(_("src repository does not support "
433 "revision lookup and so doesn't "
433 "revision lookup and so doesn't "
434 "support clone by revision"))
434 "support clone by revision"))
435
435
436 # TODO this is batchable.
436 # TODO this is batchable.
437 remoterevs = []
437 remoterevs = []
438 for r in rev:
438 for r in rev:
439 with srcpeer.commandexecutor() as e:
439 with srcpeer.commandexecutor() as e:
440 remoterevs.append(e.callcommand('lookup', {
440 remoterevs.append(e.callcommand('lookup', {
441 'key': r,
441 'key': r,
442 }).result())
442 }).result())
443 revs = remoterevs
443 revs = remoterevs
444
444
445 # Obtain a lock before checking for or cloning the pooled repo otherwise
445 # Obtain a lock before checking for or cloning the pooled repo otherwise
446 # 2 clients may race creating or populating it.
446 # 2 clients may race creating or populating it.
447 pooldir = os.path.dirname(sharepath)
447 pooldir = os.path.dirname(sharepath)
448 # lock class requires the directory to exist.
448 # lock class requires the directory to exist.
449 try:
449 try:
450 util.makedir(pooldir, False)
450 util.makedir(pooldir, False)
451 except OSError as e:
451 except OSError as e:
452 if e.errno != errno.EEXIST:
452 if e.errno != errno.EEXIST:
453 raise
453 raise
454
454
455 poolvfs = vfsmod.vfs(pooldir)
455 poolvfs = vfsmod.vfs(pooldir)
456 basename = os.path.basename(sharepath)
456 basename = os.path.basename(sharepath)
457
457
458 with lock.lock(poolvfs, '%s.lock' % basename):
458 with lock.lock(poolvfs, '%s.lock' % basename):
459 if os.path.exists(sharepath):
459 if os.path.exists(sharepath):
460 ui.status(_('(sharing from existing pooled repository %s)\n') %
460 ui.status(_('(sharing from existing pooled repository %s)\n') %
461 basename)
461 basename)
462 else:
462 else:
463 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
463 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
464 # Always use pull mode because hardlinks in share mode don't work
464 # Always use pull mode because hardlinks in share mode don't work
465 # well. Never update because working copies aren't necessary in
465 # well. Never update because working copies aren't necessary in
466 # share mode.
466 # share mode.
467 clone(ui, peeropts, source, dest=sharepath, pull=True,
467 clone(ui, peeropts, source, dest=sharepath, pull=True,
468 revs=rev, update=False, stream=stream)
468 revs=rev, update=False, stream=stream)
469
469
470 # Resolve the value to put in [paths] section for the source.
470 # Resolve the value to put in [paths] section for the source.
471 if islocal(source):
471 if islocal(source):
472 defaultpath = os.path.abspath(util.urllocalpath(source))
472 defaultpath = os.path.abspath(util.urllocalpath(source))
473 else:
473 else:
474 defaultpath = source
474 defaultpath = source
475
475
476 sharerepo = repository(ui, path=sharepath)
476 sharerepo = repository(ui, path=sharepath)
477 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
477 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
478 defaultpath=defaultpath)
478 defaultpath=defaultpath)
479
479
480 # We need to perform a pull against the dest repo to fetch bookmarks
480 # We need to perform a pull against the dest repo to fetch bookmarks
481 # and other non-store data that isn't shared by default. In the case of
481 # and other non-store data that isn't shared by default. In the case of
482 # non-existing shared repo, this means we pull from the remote twice. This
482 # non-existing shared repo, this means we pull from the remote twice. This
483 # is a bit weird. But at the time it was implemented, there wasn't an easy
483 # is a bit weird. But at the time it was implemented, there wasn't an easy
484 # way to pull just non-changegroup data.
484 # way to pull just non-changegroup data.
485 destrepo = repository(ui, path=dest)
485 destrepo = repository(ui, path=dest)
486 exchange.pull(destrepo, srcpeer, heads=revs)
486 exchange.pull(destrepo, srcpeer, heads=revs)
487
487
488 _postshareupdate(destrepo, update)
488 _postshareupdate(destrepo, update)
489
489
490 return srcpeer, peer(ui, peeropts, dest)
490 return srcpeer, peer(ui, peeropts, dest)
491
491
492 # Recomputing branch cache might be slow on big repos,
492 # Recomputing branch cache might be slow on big repos,
493 # so just copy it
493 # so just copy it
494 def _copycache(srcrepo, dstcachedir, fname):
494 def _copycache(srcrepo, dstcachedir, fname):
495 """copy a cache from srcrepo to destcachedir (if it exists)"""
495 """copy a cache from srcrepo to destcachedir (if it exists)"""
496 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
496 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
497 dstbranchcache = os.path.join(dstcachedir, fname)
497 dstbranchcache = os.path.join(dstcachedir, fname)
498 if os.path.exists(srcbranchcache):
498 if os.path.exists(srcbranchcache):
499 if not os.path.exists(dstcachedir):
499 if not os.path.exists(dstcachedir):
500 os.mkdir(dstcachedir)
500 os.mkdir(dstcachedir)
501 util.copyfile(srcbranchcache, dstbranchcache)
501 util.copyfile(srcbranchcache, dstbranchcache)
502
502
503 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
503 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
504 update=True, stream=False, branch=None, shareopts=None,
504 update=True, stream=False, branch=None, shareopts=None,
505 storeincludepats=None, storeexcludepats=None):
505 storeincludepats=None, storeexcludepats=None):
506 """Make a copy of an existing repository.
506 """Make a copy of an existing repository.
507
507
508 Create a copy of an existing repository in a new directory. The
508 Create a copy of an existing repository in a new directory. The
509 source and destination are URLs, as passed to the repository
509 source and destination are URLs, as passed to the repository
510 function. Returns a pair of repository peers, the source and
510 function. Returns a pair of repository peers, the source and
511 newly created destination.
511 newly created destination.
512
512
513 The location of the source is added to the new repository's
513 The location of the source is added to the new repository's
514 .hg/hgrc file, as the default to be used for future pulls and
514 .hg/hgrc file, as the default to be used for future pulls and
515 pushes.
515 pushes.
516
516
517 If an exception is raised, the partly cloned/updated destination
517 If an exception is raised, the partly cloned/updated destination
518 repository will be deleted.
518 repository will be deleted.
519
519
520 Arguments:
520 Arguments:
521
521
522 source: repository object or URL
522 source: repository object or URL
523
523
524 dest: URL of destination repository to create (defaults to base
524 dest: URL of destination repository to create (defaults to base
525 name of source repository)
525 name of source repository)
526
526
527 pull: always pull from source repository, even in local case or if the
527 pull: always pull from source repository, even in local case or if the
528 server prefers streaming
528 server prefers streaming
529
529
530 stream: stream raw data uncompressed from repository (fast over
530 stream: stream raw data uncompressed from repository (fast over
531 LAN, slow over WAN)
531 LAN, slow over WAN)
532
532
533 revs: revision to clone up to (implies pull=True)
533 revs: revision to clone up to (implies pull=True)
534
534
535 update: update working directory after clone completes, if
535 update: update working directory after clone completes, if
536 destination is local repository (True means update to default rev,
536 destination is local repository (True means update to default rev,
537 anything else is treated as a revision)
537 anything else is treated as a revision)
538
538
539 branch: branches to clone
539 branch: branches to clone
540
540
541 shareopts: dict of options to control auto sharing behavior. The "pool" key
541 shareopts: dict of options to control auto sharing behavior. The "pool" key
542 activates auto sharing mode and defines the directory for stores. The
542 activates auto sharing mode and defines the directory for stores. The
543 "mode" key determines how to construct the directory name of the shared
543 "mode" key determines how to construct the directory name of the shared
544 repository. "identity" means the name is derived from the node of the first
544 repository. "identity" means the name is derived from the node of the first
545 changeset in the repository. "remote" means the name is derived from the
545 changeset in the repository. "remote" means the name is derived from the
546 remote's path/URL. Defaults to "identity."
546 remote's path/URL. Defaults to "identity."
547
547
548 storeincludepats and storeexcludepats: sets of file patterns to include and
548 storeincludepats and storeexcludepats: sets of file patterns to include and
549 exclude in the repository copy, respectively. If not defined, all files
549 exclude in the repository copy, respectively. If not defined, all files
550 will be included (a "full" clone). Otherwise a "narrow" clone containing
550 will be included (a "full" clone). Otherwise a "narrow" clone containing
551 only the requested files will be performed. If ``storeincludepats`` is not
551 only the requested files will be performed. If ``storeincludepats`` is not
552 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
552 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
553 ``path:.``. If both are empty sets, no files will be cloned.
553 ``path:.``. If both are empty sets, no files will be cloned.
554 """
554 """
555
555
556 if isinstance(source, bytes):
556 if isinstance(source, bytes):
557 origsource = ui.expandpath(source)
557 origsource = ui.expandpath(source)
558 source, branches = parseurl(origsource, branch)
558 source, branches = parseurl(origsource, branch)
559 srcpeer = peer(ui, peeropts, source)
559 srcpeer = peer(ui, peeropts, source)
560 else:
560 else:
561 srcpeer = source.peer() # in case we were called with a localrepo
561 srcpeer = source.peer() # in case we were called with a localrepo
562 branches = (None, branch or [])
562 branches = (None, branch or [])
563 origsource = source = srcpeer.url()
563 origsource = source = srcpeer.url()
564 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
564 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
565
565
566 if dest is None:
566 if dest is None:
567 dest = defaultdest(source)
567 dest = defaultdest(source)
568 if dest:
568 if dest:
569 ui.status(_("destination directory: %s\n") % dest)
569 ui.status(_("destination directory: %s\n") % dest)
570 else:
570 else:
571 dest = ui.expandpath(dest)
571 dest = ui.expandpath(dest)
572
572
573 dest = util.urllocalpath(dest)
573 dest = util.urllocalpath(dest)
574 source = util.urllocalpath(source)
574 source = util.urllocalpath(source)
575
575
576 if not dest:
576 if not dest:
577 raise error.Abort(_("empty destination path is not valid"))
577 raise error.Abort(_("empty destination path is not valid"))
578
578
579 destvfs = vfsmod.vfs(dest, expandpath=True)
579 destvfs = vfsmod.vfs(dest, expandpath=True)
580 if destvfs.lexists():
580 if destvfs.lexists():
581 if not destvfs.isdir():
581 if not destvfs.isdir():
582 raise error.Abort(_("destination '%s' already exists") % dest)
582 raise error.Abort(_("destination '%s' already exists") % dest)
583 elif destvfs.listdir():
583 elif destvfs.listdir():
584 raise error.Abort(_("destination '%s' is not empty") % dest)
584 raise error.Abort(_("destination '%s' is not empty") % dest)
585
585
586 createopts = {}
586 createopts = {}
587 narrow = False
587 narrow = False
588
588
589 if storeincludepats is not None:
589 if storeincludepats is not None:
590 narrowspec.validatepatterns(storeincludepats)
590 narrowspec.validatepatterns(storeincludepats)
591 narrow = True
591 narrow = True
592
592
593 if storeexcludepats is not None:
593 if storeexcludepats is not None:
594 narrowspec.validatepatterns(storeexcludepats)
594 narrowspec.validatepatterns(storeexcludepats)
595 narrow = True
595 narrow = True
596
596
597 if narrow:
597 if narrow:
598 # Include everything by default if only exclusion patterns defined.
598 # Include everything by default if only exclusion patterns defined.
599 if storeexcludepats and not storeincludepats:
599 if storeexcludepats and not storeincludepats:
600 storeincludepats = {'path:.'}
600 storeincludepats = {'path:.'}
601
601
602 createopts['narrowfiles'] = True
602 createopts['narrowfiles'] = True
603
603
604 shareopts = shareopts or {}
604 shareopts = shareopts or {}
605 sharepool = shareopts.get('pool')
605 sharepool = shareopts.get('pool')
606 sharenamemode = shareopts.get('mode')
606 sharenamemode = shareopts.get('mode')
607 if sharepool and islocal(dest):
607 if sharepool and islocal(dest):
608 sharepath = None
608 sharepath = None
609 if sharenamemode == 'identity':
609 if sharenamemode == 'identity':
610 # Resolve the name from the initial changeset in the remote
610 # Resolve the name from the initial changeset in the remote
611 # repository. This returns nullid when the remote is empty. It
611 # repository. This returns nullid when the remote is empty. It
612 # raises RepoLookupError if revision 0 is filtered or otherwise
612 # raises RepoLookupError if revision 0 is filtered or otherwise
613 # not available. If we fail to resolve, sharing is not enabled.
613 # not available. If we fail to resolve, sharing is not enabled.
614 try:
614 try:
615 with srcpeer.commandexecutor() as e:
615 with srcpeer.commandexecutor() as e:
616 rootnode = e.callcommand('lookup', {
616 rootnode = e.callcommand('lookup', {
617 'key': '0',
617 'key': '0',
618 }).result()
618 }).result()
619
619
620 if rootnode != node.nullid:
620 if rootnode != node.nullid:
621 sharepath = os.path.join(sharepool, node.hex(rootnode))
621 sharepath = os.path.join(sharepool, node.hex(rootnode))
622 else:
622 else:
623 ui.status(_('(not using pooled storage: '
623 ui.status(_('(not using pooled storage: '
624 'remote appears to be empty)\n'))
624 'remote appears to be empty)\n'))
625 except error.RepoLookupError:
625 except error.RepoLookupError:
626 ui.status(_('(not using pooled storage: '
626 ui.status(_('(not using pooled storage: '
627 'unable to resolve identity of remote)\n'))
627 'unable to resolve identity of remote)\n'))
628 elif sharenamemode == 'remote':
628 elif sharenamemode == 'remote':
629 sharepath = os.path.join(
629 sharepath = os.path.join(
630 sharepool, node.hex(hashlib.sha1(source).digest()))
630 sharepool, node.hex(hashlib.sha1(source).digest()))
631 else:
631 else:
632 raise error.Abort(_('unknown share naming mode: %s') %
632 raise error.Abort(_('unknown share naming mode: %s') %
633 sharenamemode)
633 sharenamemode)
634
634
635 # TODO this is a somewhat arbitrary restriction.
635 # TODO this is a somewhat arbitrary restriction.
636 if narrow:
636 if narrow:
637 ui.status(_('(pooled storage not supported for narrow clones)\n'))
637 ui.status(_('(pooled storage not supported for narrow clones)\n'))
638 sharepath = None
638 sharepath = None
639
639
640 if sharepath:
640 if sharepath:
641 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
641 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
642 dest, pull=pull, rev=revs, update=update,
642 dest, pull=pull, rev=revs, update=update,
643 stream=stream)
643 stream=stream)
644
644
645 srclock = destlock = cleandir = None
645 srclock = destlock = cleandir = None
646 srcrepo = srcpeer.local()
646 srcrepo = srcpeer.local()
647 try:
647 try:
648 abspath = origsource
648 abspath = origsource
649 if islocal(origsource):
649 if islocal(origsource):
650 abspath = os.path.abspath(util.urllocalpath(origsource))
650 abspath = os.path.abspath(util.urllocalpath(origsource))
651
651
652 if islocal(dest):
652 if islocal(dest):
653 cleandir = dest
653 cleandir = dest
654
654
655 copy = False
655 copy = False
656 if (srcrepo and srcrepo.cancopy() and islocal(dest)
656 if (srcrepo and srcrepo.cancopy() and islocal(dest)
657 and not phases.hassecret(srcrepo)):
657 and not phases.hassecret(srcrepo)):
658 copy = not pull and not revs
658 copy = not pull and not revs
659
659
660 # TODO this is a somewhat arbitrary restriction.
660 # TODO this is a somewhat arbitrary restriction.
661 if narrow:
661 if narrow:
662 copy = False
662 copy = False
663
663
664 if copy:
664 if copy:
665 try:
665 try:
666 # we use a lock here because if we race with commit, we
666 # we use a lock here because if we race with commit, we
667 # can end up with extra data in the cloned revlogs that's
667 # can end up with extra data in the cloned revlogs that's
668 # not pointed to by changesets, thus causing verify to
668 # not pointed to by changesets, thus causing verify to
669 # fail
669 # fail
670 srclock = srcrepo.lock(wait=False)
670 srclock = srcrepo.lock(wait=False)
671 except error.LockError:
671 except error.LockError:
672 copy = False
672 copy = False
673
673
674 if copy:
674 if copy:
675 srcrepo.hook('preoutgoing', throw=True, source='clone')
675 srcrepo.hook('preoutgoing', throw=True, source='clone')
676 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
676 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
677 if not os.path.exists(dest):
677 if not os.path.exists(dest):
678 util.makedirs(dest)
678 util.makedirs(dest)
679 else:
679 else:
680 # only clean up directories we create ourselves
680 # only clean up directories we create ourselves
681 cleandir = hgdir
681 cleandir = hgdir
682 try:
682 try:
683 destpath = hgdir
683 destpath = hgdir
684 util.makedir(destpath, notindexed=True)
684 util.makedir(destpath, notindexed=True)
685 except OSError as inst:
685 except OSError as inst:
686 if inst.errno == errno.EEXIST:
686 if inst.errno == errno.EEXIST:
687 cleandir = None
687 cleandir = None
688 raise error.Abort(_("destination '%s' already exists")
688 raise error.Abort(_("destination '%s' already exists")
689 % dest)
689 % dest)
690 raise
690 raise
691
691
692 destlock = copystore(ui, srcrepo, destpath)
692 destlock = copystore(ui, srcrepo, destpath)
693 # copy bookmarks over
693 # copy bookmarks over
694 srcbookmarks = srcrepo.vfs.join('bookmarks')
694 srcbookmarks = srcrepo.vfs.join('bookmarks')
695 dstbookmarks = os.path.join(destpath, 'bookmarks')
695 dstbookmarks = os.path.join(destpath, 'bookmarks')
696 if os.path.exists(srcbookmarks):
696 if os.path.exists(srcbookmarks):
697 util.copyfile(srcbookmarks, dstbookmarks)
697 util.copyfile(srcbookmarks, dstbookmarks)
698
698
699 dstcachedir = os.path.join(destpath, 'cache')
699 dstcachedir = os.path.join(destpath, 'cache')
700 for cache in cacheutil.cachetocopy(srcrepo):
700 for cache in cacheutil.cachetocopy(srcrepo):
701 _copycache(srcrepo, dstcachedir, cache)
701 _copycache(srcrepo, dstcachedir, cache)
702
702
703 # we need to re-init the repo after manually copying the data
703 # we need to re-init the repo after manually copying the data
704 # into it
704 # into it
705 destpeer = peer(srcrepo, peeropts, dest)
705 destpeer = peer(srcrepo, peeropts, dest)
706 srcrepo.hook('outgoing', source='clone',
706 srcrepo.hook('outgoing', source='clone',
707 node=node.hex(node.nullid))
707 node=node.hex(node.nullid))
708 else:
708 else:
709 try:
709 try:
710 # only pass ui when no srcrepo
710 # only pass ui when no srcrepo
711 destpeer = peer(srcrepo or ui, peeropts, dest, create=True,
711 destpeer = peer(srcrepo or ui, peeropts, dest, create=True,
712 createopts=createopts)
712 createopts=createopts)
713 except OSError as inst:
713 except OSError as inst:
714 if inst.errno == errno.EEXIST:
714 if inst.errno == errno.EEXIST:
715 cleandir = None
715 cleandir = None
716 raise error.Abort(_("destination '%s' already exists")
716 raise error.Abort(_("destination '%s' already exists")
717 % dest)
717 % dest)
718 raise
718 raise
719
719
720 if revs:
720 if revs:
721 if not srcpeer.capable('lookup'):
721 if not srcpeer.capable('lookup'):
722 raise error.Abort(_("src repository does not support "
722 raise error.Abort(_("src repository does not support "
723 "revision lookup and so doesn't "
723 "revision lookup and so doesn't "
724 "support clone by revision"))
724 "support clone by revision"))
725
725
726 # TODO this is batchable.
726 # TODO this is batchable.
727 remoterevs = []
727 remoterevs = []
728 for rev in revs:
728 for rev in revs:
729 with srcpeer.commandexecutor() as e:
729 with srcpeer.commandexecutor() as e:
730 remoterevs.append(e.callcommand('lookup', {
730 remoterevs.append(e.callcommand('lookup', {
731 'key': rev,
731 'key': rev,
732 }).result())
732 }).result())
733 revs = remoterevs
733 revs = remoterevs
734
734
735 checkout = revs[0]
735 checkout = revs[0]
736 else:
736 else:
737 revs = None
737 revs = None
738 local = destpeer.local()
738 local = destpeer.local()
739 if local:
739 if local:
740 u = util.url(abspath)
740 u = util.url(abspath)
741 defaulturl = bytes(u)
741 defaulturl = bytes(u)
742 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
742 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
743 if not stream:
743 if not stream:
744 if pull:
744 if pull:
745 stream = False
745 stream = False
746 else:
746 else:
747 stream = None
747 stream = None
748 # internal config: ui.quietbookmarkmove
748 # internal config: ui.quietbookmarkmove
749 overrides = {('ui', 'quietbookmarkmove'): True}
749 overrides = {('ui', 'quietbookmarkmove'): True}
750 with local.ui.configoverride(overrides, 'clone'):
750 with local.ui.configoverride(overrides, 'clone'):
751 exchange.pull(local, srcpeer, revs,
751 exchange.pull(local, srcpeer, revs,
752 streamclonerequested=stream)
752 streamclonerequested=stream,
753 includepats=storeincludepats,
754 excludepats=storeexcludepats)
753 elif srcrepo:
755 elif srcrepo:
754 # TODO lift restriction once exchange.push() accepts narrow
756 # TODO lift restriction once exchange.push() accepts narrow
755 # push.
757 # push.
756 if narrow:
758 if narrow:
757 raise error.Abort(_('narrow clone not available for '
759 raise error.Abort(_('narrow clone not available for '
758 'remote destinations'))
760 'remote destinations'))
759
761
760 exchange.push(srcrepo, destpeer, revs=revs,
762 exchange.push(srcrepo, destpeer, revs=revs,
761 bookmarks=srcrepo._bookmarks.keys())
763 bookmarks=srcrepo._bookmarks.keys())
762 else:
764 else:
763 raise error.Abort(_("clone from remote to remote not supported")
765 raise error.Abort(_("clone from remote to remote not supported")
764 )
766 )
765
767
766 cleandir = None
768 cleandir = None
767
769
768 destrepo = destpeer.local()
770 destrepo = destpeer.local()
769 if destrepo:
771 if destrepo:
770 template = uimod.samplehgrcs['cloned']
772 template = uimod.samplehgrcs['cloned']
771 u = util.url(abspath)
773 u = util.url(abspath)
772 u.passwd = None
774 u.passwd = None
773 defaulturl = bytes(u)
775 defaulturl = bytes(u)
774 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
776 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
775 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
777 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
776
778
777 if ui.configbool('experimental', 'remotenames'):
779 if ui.configbool('experimental', 'remotenames'):
778 logexchange.pullremotenames(destrepo, srcpeer)
780 logexchange.pullremotenames(destrepo, srcpeer)
779
781
780 if update:
782 if update:
781 if update is not True:
783 if update is not True:
782 with srcpeer.commandexecutor() as e:
784 with srcpeer.commandexecutor() as e:
783 checkout = e.callcommand('lookup', {
785 checkout = e.callcommand('lookup', {
784 'key': update,
786 'key': update,
785 }).result()
787 }).result()
786
788
787 uprev = None
789 uprev = None
788 status = None
790 status = None
789 if checkout is not None:
791 if checkout is not None:
790 # Some extensions (at least hg-git and hg-subversion) have
792 # Some extensions (at least hg-git and hg-subversion) have
791 # a peer.lookup() implementation that returns a name instead
793 # a peer.lookup() implementation that returns a name instead
792 # of a nodeid. We work around it here until we've figured
794 # of a nodeid. We work around it here until we've figured
793 # out a better solution.
795 # out a better solution.
794 if len(checkout) == 20 and checkout in destrepo:
796 if len(checkout) == 20 and checkout in destrepo:
795 uprev = checkout
797 uprev = checkout
796 elif scmutil.isrevsymbol(destrepo, checkout):
798 elif scmutil.isrevsymbol(destrepo, checkout):
797 uprev = scmutil.revsymbol(destrepo, checkout).node()
799 uprev = scmutil.revsymbol(destrepo, checkout).node()
798 else:
800 else:
799 if update is not True:
801 if update is not True:
800 try:
802 try:
801 uprev = destrepo.lookup(update)
803 uprev = destrepo.lookup(update)
802 except error.RepoLookupError:
804 except error.RepoLookupError:
803 pass
805 pass
804 if uprev is None:
806 if uprev is None:
805 try:
807 try:
806 uprev = destrepo._bookmarks['@']
808 uprev = destrepo._bookmarks['@']
807 update = '@'
809 update = '@'
808 bn = destrepo[uprev].branch()
810 bn = destrepo[uprev].branch()
809 if bn == 'default':
811 if bn == 'default':
810 status = _("updating to bookmark @\n")
812 status = _("updating to bookmark @\n")
811 else:
813 else:
812 status = (_("updating to bookmark @ on branch %s\n")
814 status = (_("updating to bookmark @ on branch %s\n")
813 % bn)
815 % bn)
814 except KeyError:
816 except KeyError:
815 try:
817 try:
816 uprev = destrepo.branchtip('default')
818 uprev = destrepo.branchtip('default')
817 except error.RepoLookupError:
819 except error.RepoLookupError:
818 uprev = destrepo.lookup('tip')
820 uprev = destrepo.lookup('tip')
819 if not status:
821 if not status:
820 bn = destrepo[uprev].branch()
822 bn = destrepo[uprev].branch()
821 status = _("updating to branch %s\n") % bn
823 status = _("updating to branch %s\n") % bn
822 destrepo.ui.status(status)
824 destrepo.ui.status(status)
823 _update(destrepo, uprev)
825 _update(destrepo, uprev)
824 if update in destrepo._bookmarks:
826 if update in destrepo._bookmarks:
825 bookmarks.activate(destrepo, update)
827 bookmarks.activate(destrepo, update)
826 finally:
828 finally:
827 release(srclock, destlock)
829 release(srclock, destlock)
828 if cleandir is not None:
830 if cleandir is not None:
829 shutil.rmtree(cleandir, True)
831 shutil.rmtree(cleandir, True)
830 if srcpeer is not None:
832 if srcpeer is not None:
831 srcpeer.close()
833 srcpeer.close()
832 return srcpeer, destpeer
834 return srcpeer, destpeer
833
835
834 def _showstats(repo, stats, quietempty=False):
836 def _showstats(repo, stats, quietempty=False):
835 if quietempty and stats.isempty():
837 if quietempty and stats.isempty():
836 return
838 return
837 repo.ui.status(_("%d files updated, %d files merged, "
839 repo.ui.status(_("%d files updated, %d files merged, "
838 "%d files removed, %d files unresolved\n") % (
840 "%d files removed, %d files unresolved\n") % (
839 stats.updatedcount, stats.mergedcount,
841 stats.updatedcount, stats.mergedcount,
840 stats.removedcount, stats.unresolvedcount))
842 stats.removedcount, stats.unresolvedcount))
841
843
842 def updaterepo(repo, node, overwrite, updatecheck=None):
844 def updaterepo(repo, node, overwrite, updatecheck=None):
843 """Update the working directory to node.
845 """Update the working directory to node.
844
846
845 When overwrite is set, changes are clobbered, merged else
847 When overwrite is set, changes are clobbered, merged else
846
848
847 returns stats (see pydoc mercurial.merge.applyupdates)"""
849 returns stats (see pydoc mercurial.merge.applyupdates)"""
848 return mergemod.update(repo, node, False, overwrite,
850 return mergemod.update(repo, node, False, overwrite,
849 labels=['working copy', 'destination'],
851 labels=['working copy', 'destination'],
850 updatecheck=updatecheck)
852 updatecheck=updatecheck)
851
853
852 def update(repo, node, quietempty=False, updatecheck=None):
854 def update(repo, node, quietempty=False, updatecheck=None):
853 """update the working directory to node"""
855 """update the working directory to node"""
854 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
856 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
855 _showstats(repo, stats, quietempty)
857 _showstats(repo, stats, quietempty)
856 if stats.unresolvedcount:
858 if stats.unresolvedcount:
857 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
859 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
858 return stats.unresolvedcount > 0
860 return stats.unresolvedcount > 0
859
861
860 # naming conflict in clone()
862 # naming conflict in clone()
861 _update = update
863 _update = update
862
864
863 def clean(repo, node, show_stats=True, quietempty=False):
865 def clean(repo, node, show_stats=True, quietempty=False):
864 """forcibly switch the working directory to node, clobbering changes"""
866 """forcibly switch the working directory to node, clobbering changes"""
865 stats = updaterepo(repo, node, True)
867 stats = updaterepo(repo, node, True)
866 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
868 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
867 if show_stats:
869 if show_stats:
868 _showstats(repo, stats, quietempty)
870 _showstats(repo, stats, quietempty)
869 return stats.unresolvedcount > 0
871 return stats.unresolvedcount > 0
870
872
871 # naming conflict in updatetotally()
873 # naming conflict in updatetotally()
872 _clean = clean
874 _clean = clean
873
875
874 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
876 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
875 """Update the working directory with extra care for non-file components
877 """Update the working directory with extra care for non-file components
876
878
877 This takes care of non-file components below:
879 This takes care of non-file components below:
878
880
879 :bookmark: might be advanced or (in)activated
881 :bookmark: might be advanced or (in)activated
880
882
881 This takes arguments below:
883 This takes arguments below:
882
884
883 :checkout: to which revision the working directory is updated
885 :checkout: to which revision the working directory is updated
884 :brev: a name, which might be a bookmark to be activated after updating
886 :brev: a name, which might be a bookmark to be activated after updating
885 :clean: whether changes in the working directory can be discarded
887 :clean: whether changes in the working directory can be discarded
886 :updatecheck: how to deal with a dirty working directory
888 :updatecheck: how to deal with a dirty working directory
887
889
888 Valid values for updatecheck are (None => linear):
890 Valid values for updatecheck are (None => linear):
889
891
890 * abort: abort if the working directory is dirty
892 * abort: abort if the working directory is dirty
891 * none: don't check (merge working directory changes into destination)
893 * none: don't check (merge working directory changes into destination)
892 * linear: check that update is linear before merging working directory
894 * linear: check that update is linear before merging working directory
893 changes into destination
895 changes into destination
894 * noconflict: check that the update does not result in file merges
896 * noconflict: check that the update does not result in file merges
895
897
896 This returns whether conflict is detected at updating or not.
898 This returns whether conflict is detected at updating or not.
897 """
899 """
898 if updatecheck is None:
900 if updatecheck is None:
899 updatecheck = ui.config('commands', 'update.check')
901 updatecheck = ui.config('commands', 'update.check')
900 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
902 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
901 # If not configured, or invalid value configured
903 # If not configured, or invalid value configured
902 updatecheck = 'linear'
904 updatecheck = 'linear'
903 with repo.wlock():
905 with repo.wlock():
904 movemarkfrom = None
906 movemarkfrom = None
905 warndest = False
907 warndest = False
906 if checkout is None:
908 if checkout is None:
907 updata = destutil.destupdate(repo, clean=clean)
909 updata = destutil.destupdate(repo, clean=clean)
908 checkout, movemarkfrom, brev = updata
910 checkout, movemarkfrom, brev = updata
909 warndest = True
911 warndest = True
910
912
911 if clean:
913 if clean:
912 ret = _clean(repo, checkout)
914 ret = _clean(repo, checkout)
913 else:
915 else:
914 if updatecheck == 'abort':
916 if updatecheck == 'abort':
915 cmdutil.bailifchanged(repo, merge=False)
917 cmdutil.bailifchanged(repo, merge=False)
916 updatecheck = 'none'
918 updatecheck = 'none'
917 ret = _update(repo, checkout, updatecheck=updatecheck)
919 ret = _update(repo, checkout, updatecheck=updatecheck)
918
920
919 if not ret and movemarkfrom:
921 if not ret and movemarkfrom:
920 if movemarkfrom == repo['.'].node():
922 if movemarkfrom == repo['.'].node():
921 pass # no-op update
923 pass # no-op update
922 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
924 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
923 b = ui.label(repo._activebookmark, 'bookmarks.active')
925 b = ui.label(repo._activebookmark, 'bookmarks.active')
924 ui.status(_("updating bookmark %s\n") % b)
926 ui.status(_("updating bookmark %s\n") % b)
925 else:
927 else:
926 # this can happen with a non-linear update
928 # this can happen with a non-linear update
927 b = ui.label(repo._activebookmark, 'bookmarks')
929 b = ui.label(repo._activebookmark, 'bookmarks')
928 ui.status(_("(leaving bookmark %s)\n") % b)
930 ui.status(_("(leaving bookmark %s)\n") % b)
929 bookmarks.deactivate(repo)
931 bookmarks.deactivate(repo)
930 elif brev in repo._bookmarks:
932 elif brev in repo._bookmarks:
931 if brev != repo._activebookmark:
933 if brev != repo._activebookmark:
932 b = ui.label(brev, 'bookmarks.active')
934 b = ui.label(brev, 'bookmarks.active')
933 ui.status(_("(activating bookmark %s)\n") % b)
935 ui.status(_("(activating bookmark %s)\n") % b)
934 bookmarks.activate(repo, brev)
936 bookmarks.activate(repo, brev)
935 elif brev:
937 elif brev:
936 if repo._activebookmark:
938 if repo._activebookmark:
937 b = ui.label(repo._activebookmark, 'bookmarks')
939 b = ui.label(repo._activebookmark, 'bookmarks')
938 ui.status(_("(leaving bookmark %s)\n") % b)
940 ui.status(_("(leaving bookmark %s)\n") % b)
939 bookmarks.deactivate(repo)
941 bookmarks.deactivate(repo)
940
942
941 if warndest:
943 if warndest:
942 destutil.statusotherdests(ui, repo)
944 destutil.statusotherdests(ui, repo)
943
945
944 return ret
946 return ret
945
947
946 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
948 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
947 abort=False):
949 abort=False):
948 """Branch merge with node, resolving changes. Return true if any
950 """Branch merge with node, resolving changes. Return true if any
949 unresolved conflicts."""
951 unresolved conflicts."""
950 if not abort:
952 if not abort:
951 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
953 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
952 labels=labels)
954 labels=labels)
953 else:
955 else:
954 ms = mergemod.mergestate.read(repo)
956 ms = mergemod.mergestate.read(repo)
955 if ms.active():
957 if ms.active():
956 # there were conflicts
958 # there were conflicts
957 node = ms.localctx.hex()
959 node = ms.localctx.hex()
958 else:
960 else:
959 # there were no conficts, mergestate was not stored
961 # there were no conficts, mergestate was not stored
960 node = repo['.'].hex()
962 node = repo['.'].hex()
961
963
962 repo.ui.status(_("aborting the merge, updating back to"
964 repo.ui.status(_("aborting the merge, updating back to"
963 " %s\n") % node[:12])
965 " %s\n") % node[:12])
964 stats = mergemod.update(repo, node, branchmerge=False, force=True,
966 stats = mergemod.update(repo, node, branchmerge=False, force=True,
965 labels=labels)
967 labels=labels)
966
968
967 _showstats(repo, stats)
969 _showstats(repo, stats)
968 if stats.unresolvedcount:
970 if stats.unresolvedcount:
969 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
971 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
970 "or 'hg merge --abort' to abandon\n"))
972 "or 'hg merge --abort' to abandon\n"))
971 elif remind and not abort:
973 elif remind and not abort:
972 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
974 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
973 return stats.unresolvedcount > 0
975 return stats.unresolvedcount > 0
974
976
975 def _incoming(displaychlist, subreporecurse, ui, repo, source,
977 def _incoming(displaychlist, subreporecurse, ui, repo, source,
976 opts, buffered=False):
978 opts, buffered=False):
977 """
979 """
978 Helper for incoming / gincoming.
980 Helper for incoming / gincoming.
979 displaychlist gets called with
981 displaychlist gets called with
980 (remoterepo, incomingchangesetlist, displayer) parameters,
982 (remoterepo, incomingchangesetlist, displayer) parameters,
981 and is supposed to contain only code that can't be unified.
983 and is supposed to contain only code that can't be unified.
982 """
984 """
983 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
985 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
984 other = peer(repo, opts, source)
986 other = peer(repo, opts, source)
985 ui.status(_('comparing with %s\n') % util.hidepassword(source))
987 ui.status(_('comparing with %s\n') % util.hidepassword(source))
986 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
988 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
987
989
988 if revs:
990 if revs:
989 revs = [other.lookup(rev) for rev in revs]
991 revs = [other.lookup(rev) for rev in revs]
990 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
992 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
991 revs, opts["bundle"], opts["force"])
993 revs, opts["bundle"], opts["force"])
992 try:
994 try:
993 if not chlist:
995 if not chlist:
994 ui.status(_("no changes found\n"))
996 ui.status(_("no changes found\n"))
995 return subreporecurse()
997 return subreporecurse()
996 ui.pager('incoming')
998 ui.pager('incoming')
997 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
999 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
998 buffered=buffered)
1000 buffered=buffered)
999 displaychlist(other, chlist, displayer)
1001 displaychlist(other, chlist, displayer)
1000 displayer.close()
1002 displayer.close()
1001 finally:
1003 finally:
1002 cleanupfn()
1004 cleanupfn()
1003 subreporecurse()
1005 subreporecurse()
1004 return 0 # exit code is zero since we found incoming changes
1006 return 0 # exit code is zero since we found incoming changes
1005
1007
1006 def incoming(ui, repo, source, opts):
1008 def incoming(ui, repo, source, opts):
1007 def subreporecurse():
1009 def subreporecurse():
1008 ret = 1
1010 ret = 1
1009 if opts.get('subrepos'):
1011 if opts.get('subrepos'):
1010 ctx = repo[None]
1012 ctx = repo[None]
1011 for subpath in sorted(ctx.substate):
1013 for subpath in sorted(ctx.substate):
1012 sub = ctx.sub(subpath)
1014 sub = ctx.sub(subpath)
1013 ret = min(ret, sub.incoming(ui, source, opts))
1015 ret = min(ret, sub.incoming(ui, source, opts))
1014 return ret
1016 return ret
1015
1017
1016 def display(other, chlist, displayer):
1018 def display(other, chlist, displayer):
1017 limit = logcmdutil.getlimit(opts)
1019 limit = logcmdutil.getlimit(opts)
1018 if opts.get('newest_first'):
1020 if opts.get('newest_first'):
1019 chlist.reverse()
1021 chlist.reverse()
1020 count = 0
1022 count = 0
1021 for n in chlist:
1023 for n in chlist:
1022 if limit is not None and count >= limit:
1024 if limit is not None and count >= limit:
1023 break
1025 break
1024 parents = [p for p in other.changelog.parents(n) if p != nullid]
1026 parents = [p for p in other.changelog.parents(n) if p != nullid]
1025 if opts.get('no_merges') and len(parents) == 2:
1027 if opts.get('no_merges') and len(parents) == 2:
1026 continue
1028 continue
1027 count += 1
1029 count += 1
1028 displayer.show(other[n])
1030 displayer.show(other[n])
1029 return _incoming(display, subreporecurse, ui, repo, source, opts)
1031 return _incoming(display, subreporecurse, ui, repo, source, opts)
1030
1032
1031 def _outgoing(ui, repo, dest, opts):
1033 def _outgoing(ui, repo, dest, opts):
1032 path = ui.paths.getpath(dest, default=('default-push', 'default'))
1034 path = ui.paths.getpath(dest, default=('default-push', 'default'))
1033 if not path:
1035 if not path:
1034 raise error.Abort(_('default repository not configured!'),
1036 raise error.Abort(_('default repository not configured!'),
1035 hint=_("see 'hg help config.paths'"))
1037 hint=_("see 'hg help config.paths'"))
1036 dest = path.pushloc or path.loc
1038 dest = path.pushloc or path.loc
1037 branches = path.branch, opts.get('branch') or []
1039 branches = path.branch, opts.get('branch') or []
1038
1040
1039 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
1041 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
1040 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
1042 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
1041 if revs:
1043 if revs:
1042 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1044 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1043
1045
1044 other = peer(repo, opts, dest)
1046 other = peer(repo, opts, dest)
1045 outgoing = discovery.findcommonoutgoing(repo, other, revs,
1047 outgoing = discovery.findcommonoutgoing(repo, other, revs,
1046 force=opts.get('force'))
1048 force=opts.get('force'))
1047 o = outgoing.missing
1049 o = outgoing.missing
1048 if not o:
1050 if not o:
1049 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1051 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1050 return o, other
1052 return o, other
1051
1053
1052 def outgoing(ui, repo, dest, opts):
1054 def outgoing(ui, repo, dest, opts):
1053 def recurse():
1055 def recurse():
1054 ret = 1
1056 ret = 1
1055 if opts.get('subrepos'):
1057 if opts.get('subrepos'):
1056 ctx = repo[None]
1058 ctx = repo[None]
1057 for subpath in sorted(ctx.substate):
1059 for subpath in sorted(ctx.substate):
1058 sub = ctx.sub(subpath)
1060 sub = ctx.sub(subpath)
1059 ret = min(ret, sub.outgoing(ui, dest, opts))
1061 ret = min(ret, sub.outgoing(ui, dest, opts))
1060 return ret
1062 return ret
1061
1063
1062 limit = logcmdutil.getlimit(opts)
1064 limit = logcmdutil.getlimit(opts)
1063 o, other = _outgoing(ui, repo, dest, opts)
1065 o, other = _outgoing(ui, repo, dest, opts)
1064 if not o:
1066 if not o:
1065 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1067 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1066 return recurse()
1068 return recurse()
1067
1069
1068 if opts.get('newest_first'):
1070 if opts.get('newest_first'):
1069 o.reverse()
1071 o.reverse()
1070 ui.pager('outgoing')
1072 ui.pager('outgoing')
1071 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1073 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1072 count = 0
1074 count = 0
1073 for n in o:
1075 for n in o:
1074 if limit is not None and count >= limit:
1076 if limit is not None and count >= limit:
1075 break
1077 break
1076 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1078 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1077 if opts.get('no_merges') and len(parents) == 2:
1079 if opts.get('no_merges') and len(parents) == 2:
1078 continue
1080 continue
1079 count += 1
1081 count += 1
1080 displayer.show(repo[n])
1082 displayer.show(repo[n])
1081 displayer.close()
1083 displayer.close()
1082 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1084 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1083 recurse()
1085 recurse()
1084 return 0 # exit code is zero since we found outgoing changes
1086 return 0 # exit code is zero since we found outgoing changes
1085
1087
1086 def verify(repo):
1088 def verify(repo):
1087 """verify the consistency of a repository"""
1089 """verify the consistency of a repository"""
1088 ret = verifymod.verify(repo)
1090 ret = verifymod.verify(repo)
1089
1091
1090 # Broken subrepo references in hidden csets don't seem worth worrying about,
1092 # Broken subrepo references in hidden csets don't seem worth worrying about,
1091 # since they can't be pushed/pulled, and --hidden can be used if they are a
1093 # since they can't be pushed/pulled, and --hidden can be used if they are a
1092 # concern.
1094 # concern.
1093
1095
1094 # pathto() is needed for -R case
1096 # pathto() is needed for -R case
1095 revs = repo.revs("filelog(%s)",
1097 revs = repo.revs("filelog(%s)",
1096 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1098 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1097
1099
1098 if revs:
1100 if revs:
1099 repo.ui.status(_('checking subrepo links\n'))
1101 repo.ui.status(_('checking subrepo links\n'))
1100 for rev in revs:
1102 for rev in revs:
1101 ctx = repo[rev]
1103 ctx = repo[rev]
1102 try:
1104 try:
1103 for subpath in ctx.substate:
1105 for subpath in ctx.substate:
1104 try:
1106 try:
1105 ret = (ctx.sub(subpath, allowcreate=False).verify()
1107 ret = (ctx.sub(subpath, allowcreate=False).verify()
1106 or ret)
1108 or ret)
1107 except error.RepoError as e:
1109 except error.RepoError as e:
1108 repo.ui.warn(('%d: %s\n') % (rev, e))
1110 repo.ui.warn(('%d: %s\n') % (rev, e))
1109 except Exception:
1111 except Exception:
1110 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1112 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1111 node.short(ctx.node()))
1113 node.short(ctx.node()))
1112
1114
1113 return ret
1115 return ret
1114
1116
1115 def remoteui(src, opts):
1117 def remoteui(src, opts):
1116 'build a remote ui from ui or repo and opts'
1118 'build a remote ui from ui or repo and opts'
1117 if util.safehasattr(src, 'baseui'): # looks like a repository
1119 if util.safehasattr(src, 'baseui'): # looks like a repository
1118 dst = src.baseui.copy() # drop repo-specific config
1120 dst = src.baseui.copy() # drop repo-specific config
1119 src = src.ui # copy target options from repo
1121 src = src.ui # copy target options from repo
1120 else: # assume it's a global ui object
1122 else: # assume it's a global ui object
1121 dst = src.copy() # keep all global options
1123 dst = src.copy() # keep all global options
1122
1124
1123 # copy ssh-specific options
1125 # copy ssh-specific options
1124 for o in 'ssh', 'remotecmd':
1126 for o in 'ssh', 'remotecmd':
1125 v = opts.get(o) or src.config('ui', o)
1127 v = opts.get(o) or src.config('ui', o)
1126 if v:
1128 if v:
1127 dst.setconfig("ui", o, v, 'copied')
1129 dst.setconfig("ui", o, v, 'copied')
1128
1130
1129 # copy bundle-specific options
1131 # copy bundle-specific options
1130 r = src.config('bundle', 'mainreporoot')
1132 r = src.config('bundle', 'mainreporoot')
1131 if r:
1133 if r:
1132 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1134 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1133
1135
1134 # copy selected local settings to the remote ui
1136 # copy selected local settings to the remote ui
1135 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1137 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1136 for key, val in src.configitems(sect):
1138 for key, val in src.configitems(sect):
1137 dst.setconfig(sect, key, val, 'copied')
1139 dst.setconfig(sect, key, val, 'copied')
1138 v = src.config('web', 'cacerts')
1140 v = src.config('web', 'cacerts')
1139 if v:
1141 if v:
1140 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1142 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1141
1143
1142 return dst
1144 return dst
1143
1145
1144 # Files of interest
1146 # Files of interest
1145 # Used to check if the repository has changed looking at mtime and size of
1147 # Used to check if the repository has changed looking at mtime and size of
1146 # these files.
1148 # these files.
1147 foi = [('spath', '00changelog.i'),
1149 foi = [('spath', '00changelog.i'),
1148 ('spath', 'phaseroots'), # ! phase can change content at the same size
1150 ('spath', 'phaseroots'), # ! phase can change content at the same size
1149 ('spath', 'obsstore'),
1151 ('spath', 'obsstore'),
1150 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1152 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1151 ]
1153 ]
1152
1154
1153 class cachedlocalrepo(object):
1155 class cachedlocalrepo(object):
1154 """Holds a localrepository that can be cached and reused."""
1156 """Holds a localrepository that can be cached and reused."""
1155
1157
1156 def __init__(self, repo):
1158 def __init__(self, repo):
1157 """Create a new cached repo from an existing repo.
1159 """Create a new cached repo from an existing repo.
1158
1160
1159 We assume the passed in repo was recently created. If the
1161 We assume the passed in repo was recently created. If the
1160 repo has changed between when it was created and when it was
1162 repo has changed between when it was created and when it was
1161 turned into a cache, it may not refresh properly.
1163 turned into a cache, it may not refresh properly.
1162 """
1164 """
1163 assert isinstance(repo, localrepo.localrepository)
1165 assert isinstance(repo, localrepo.localrepository)
1164 self._repo = repo
1166 self._repo = repo
1165 self._state, self.mtime = self._repostate()
1167 self._state, self.mtime = self._repostate()
1166 self._filtername = repo.filtername
1168 self._filtername = repo.filtername
1167
1169
1168 def fetch(self):
1170 def fetch(self):
1169 """Refresh (if necessary) and return a repository.
1171 """Refresh (if necessary) and return a repository.
1170
1172
1171 If the cached instance is out of date, it will be recreated
1173 If the cached instance is out of date, it will be recreated
1172 automatically and returned.
1174 automatically and returned.
1173
1175
1174 Returns a tuple of the repo and a boolean indicating whether a new
1176 Returns a tuple of the repo and a boolean indicating whether a new
1175 repo instance was created.
1177 repo instance was created.
1176 """
1178 """
1177 # We compare the mtimes and sizes of some well-known files to
1179 # We compare the mtimes and sizes of some well-known files to
1178 # determine if the repo changed. This is not precise, as mtimes
1180 # determine if the repo changed. This is not precise, as mtimes
1179 # are susceptible to clock skew and imprecise filesystems and
1181 # are susceptible to clock skew and imprecise filesystems and
1180 # file content can change while maintaining the same size.
1182 # file content can change while maintaining the same size.
1181
1183
1182 state, mtime = self._repostate()
1184 state, mtime = self._repostate()
1183 if state == self._state:
1185 if state == self._state:
1184 return self._repo, False
1186 return self._repo, False
1185
1187
1186 repo = repository(self._repo.baseui, self._repo.url())
1188 repo = repository(self._repo.baseui, self._repo.url())
1187 if self._filtername:
1189 if self._filtername:
1188 self._repo = repo.filtered(self._filtername)
1190 self._repo = repo.filtered(self._filtername)
1189 else:
1191 else:
1190 self._repo = repo.unfiltered()
1192 self._repo = repo.unfiltered()
1191 self._state = state
1193 self._state = state
1192 self.mtime = mtime
1194 self.mtime = mtime
1193
1195
1194 return self._repo, True
1196 return self._repo, True
1195
1197
1196 def _repostate(self):
1198 def _repostate(self):
1197 state = []
1199 state = []
1198 maxmtime = -1
1200 maxmtime = -1
1199 for attr, fname in foi:
1201 for attr, fname in foi:
1200 prefix = getattr(self._repo, attr)
1202 prefix = getattr(self._repo, attr)
1201 p = os.path.join(prefix, fname)
1203 p = os.path.join(prefix, fname)
1202 try:
1204 try:
1203 st = os.stat(p)
1205 st = os.stat(p)
1204 except OSError:
1206 except OSError:
1205 st = os.stat(prefix)
1207 st = os.stat(prefix)
1206 state.append((st[stat.ST_MTIME], st.st_size))
1208 state.append((st[stat.ST_MTIME], st.st_size))
1207 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1209 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1208
1210
1209 return tuple(state), maxmtime
1211 return tuple(state), maxmtime
1210
1212
1211 def copy(self):
1213 def copy(self):
1212 """Obtain a copy of this class instance.
1214 """Obtain a copy of this class instance.
1213
1215
1214 A new localrepository instance is obtained. The new instance should be
1216 A new localrepository instance is obtained. The new instance should be
1215 completely independent of the original.
1217 completely independent of the original.
1216 """
1218 """
1217 repo = repository(self._repo.baseui, self._repo.origroot)
1219 repo = repository(self._repo.baseui, self._repo.origroot)
1218 if self._filtername:
1220 if self._filtername:
1219 repo = repo.filtered(self._filtername)
1221 repo = repo.filtered(self._filtername)
1220 else:
1222 else:
1221 repo = repo.unfiltered()
1223 repo = repo.unfiltered()
1222 c = cachedlocalrepo(repo)
1224 c = cachedlocalrepo(repo)
1223 c._state = self._state
1225 c._state = self._state
1224 c.mtime = self.mtime
1226 c.mtime = self.mtime
1225 return c
1227 return c
General Comments 0
You need to be logged in to leave comments. Login now