##// END OF EJS Templates
configitems: register 'ui.clonebundleprefers' as example for 'configlist'...
marmoute -
r32989:03608e8d default
parent child Browse files
Show More
@@ -1,44 +1,47
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from . import (
10 from . import (
11 error,
11 error,
12 )
12 )
13
13
14 class configitem(object):
14 class configitem(object):
15 """represent a known config item
15 """represent a known config item
16
16
17 :section: the official config section where to find this item,
17 :section: the official config section where to find this item,
18 :name: the official name within the section,
18 :name: the official name within the section,
19 :default: default value for this item,
19 :default: default value for this item,
20 """
20 """
21
21
22 def __init__(self, section, name, default=None):
22 def __init__(self, section, name, default=None):
23 self.section = section
23 self.section = section
24 self.name = name
24 self.name = name
25 self.default = default
25 self.default = default
26
26
27 coreitems = {}
27 coreitems = {}
28
28
29 def coreconfigitem(*args, **kwargs):
29 def coreconfigitem(*args, **kwargs):
30 item = configitem(*args, **kwargs)
30 item = configitem(*args, **kwargs)
31 section = coreitems.setdefault(item.section, {})
31 section = coreitems.setdefault(item.section, {})
32 if item.name in section:
32 if item.name in section:
33 msg = "duplicated config item registration for '%s.%s'"
33 msg = "duplicated config item registration for '%s.%s'"
34 raise error.ProgrammingError(msg % (item.section, item.name))
34 raise error.ProgrammingError(msg % (item.section, item.name))
35 section[item.name] = item
35 section[item.name] = item
36
36
37 # Registering actual config items
37 # Registering actual config items
38
38
39 coreconfigitem('patch', 'fuzz',
39 coreconfigitem('patch', 'fuzz',
40 default=2,
40 default=2,
41 )
41 )
42 coreconfigitem('ui', 'clonebundleprefers',
43 default=[],
44 )
42 coreconfigitem('ui', 'quiet',
45 coreconfigitem('ui', 'quiet',
43 default=False,
46 default=False,
44 )
47 )
@@ -1,2011 +1,2011
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 hex,
15 hex,
16 nullid,
16 nullid,
17 )
17 )
18 from . import (
18 from . import (
19 bookmarks as bookmod,
19 bookmarks as bookmod,
20 bundle2,
20 bundle2,
21 changegroup,
21 changegroup,
22 discovery,
22 discovery,
23 error,
23 error,
24 lock as lockmod,
24 lock as lockmod,
25 obsolete,
25 obsolete,
26 phases,
26 phases,
27 pushkey,
27 pushkey,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sslutil,
30 sslutil,
31 streamclone,
31 streamclone,
32 url as urlmod,
32 url as urlmod,
33 util,
33 util,
34 )
34 )
35
35
36 urlerr = util.urlerr
36 urlerr = util.urlerr
37 urlreq = util.urlreq
37 urlreq = util.urlreq
38
38
39 # Maps bundle version human names to changegroup versions.
39 # Maps bundle version human names to changegroup versions.
40 _bundlespeccgversions = {'v1': '01',
40 _bundlespeccgversions = {'v1': '01',
41 'v2': '02',
41 'v2': '02',
42 'packed1': 's1',
42 'packed1': 's1',
43 'bundle2': '02', #legacy
43 'bundle2': '02', #legacy
44 }
44 }
45
45
46 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
46 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
47 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
47 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
48
48
49 def parsebundlespec(repo, spec, strict=True, externalnames=False):
49 def parsebundlespec(repo, spec, strict=True, externalnames=False):
50 """Parse a bundle string specification into parts.
50 """Parse a bundle string specification into parts.
51
51
52 Bundle specifications denote a well-defined bundle/exchange format.
52 Bundle specifications denote a well-defined bundle/exchange format.
53 The content of a given specification should not change over time in
53 The content of a given specification should not change over time in
54 order to ensure that bundles produced by a newer version of Mercurial are
54 order to ensure that bundles produced by a newer version of Mercurial are
55 readable from an older version.
55 readable from an older version.
56
56
57 The string currently has the form:
57 The string currently has the form:
58
58
59 <compression>-<type>[;<parameter0>[;<parameter1>]]
59 <compression>-<type>[;<parameter0>[;<parameter1>]]
60
60
61 Where <compression> is one of the supported compression formats
61 Where <compression> is one of the supported compression formats
62 and <type> is (currently) a version string. A ";" can follow the type and
62 and <type> is (currently) a version string. A ";" can follow the type and
63 all text afterwards is interpreted as URI encoded, ";" delimited key=value
63 all text afterwards is interpreted as URI encoded, ";" delimited key=value
64 pairs.
64 pairs.
65
65
66 If ``strict`` is True (the default) <compression> is required. Otherwise,
66 If ``strict`` is True (the default) <compression> is required. Otherwise,
67 it is optional.
67 it is optional.
68
68
69 If ``externalnames`` is False (the default), the human-centric names will
69 If ``externalnames`` is False (the default), the human-centric names will
70 be converted to their internal representation.
70 be converted to their internal representation.
71
71
72 Returns a 3-tuple of (compression, version, parameters). Compression will
72 Returns a 3-tuple of (compression, version, parameters). Compression will
73 be ``None`` if not in strict mode and a compression isn't defined.
73 be ``None`` if not in strict mode and a compression isn't defined.
74
74
75 An ``InvalidBundleSpecification`` is raised when the specification is
75 An ``InvalidBundleSpecification`` is raised when the specification is
76 not syntactically well formed.
76 not syntactically well formed.
77
77
78 An ``UnsupportedBundleSpecification`` is raised when the compression or
78 An ``UnsupportedBundleSpecification`` is raised when the compression or
79 bundle type/version is not recognized.
79 bundle type/version is not recognized.
80
80
81 Note: this function will likely eventually return a more complex data
81 Note: this function will likely eventually return a more complex data
82 structure, including bundle2 part information.
82 structure, including bundle2 part information.
83 """
83 """
84 def parseparams(s):
84 def parseparams(s):
85 if ';' not in s:
85 if ';' not in s:
86 return s, {}
86 return s, {}
87
87
88 params = {}
88 params = {}
89 version, paramstr = s.split(';', 1)
89 version, paramstr = s.split(';', 1)
90
90
91 for p in paramstr.split(';'):
91 for p in paramstr.split(';'):
92 if '=' not in p:
92 if '=' not in p:
93 raise error.InvalidBundleSpecification(
93 raise error.InvalidBundleSpecification(
94 _('invalid bundle specification: '
94 _('invalid bundle specification: '
95 'missing "=" in parameter: %s') % p)
95 'missing "=" in parameter: %s') % p)
96
96
97 key, value = p.split('=', 1)
97 key, value = p.split('=', 1)
98 key = urlreq.unquote(key)
98 key = urlreq.unquote(key)
99 value = urlreq.unquote(value)
99 value = urlreq.unquote(value)
100 params[key] = value
100 params[key] = value
101
101
102 return version, params
102 return version, params
103
103
104
104
105 if strict and '-' not in spec:
105 if strict and '-' not in spec:
106 raise error.InvalidBundleSpecification(
106 raise error.InvalidBundleSpecification(
107 _('invalid bundle specification; '
107 _('invalid bundle specification; '
108 'must be prefixed with compression: %s') % spec)
108 'must be prefixed with compression: %s') % spec)
109
109
110 if '-' in spec:
110 if '-' in spec:
111 compression, version = spec.split('-', 1)
111 compression, version = spec.split('-', 1)
112
112
113 if compression not in util.compengines.supportedbundlenames:
113 if compression not in util.compengines.supportedbundlenames:
114 raise error.UnsupportedBundleSpecification(
114 raise error.UnsupportedBundleSpecification(
115 _('%s compression is not supported') % compression)
115 _('%s compression is not supported') % compression)
116
116
117 version, params = parseparams(version)
117 version, params = parseparams(version)
118
118
119 if version not in _bundlespeccgversions:
119 if version not in _bundlespeccgversions:
120 raise error.UnsupportedBundleSpecification(
120 raise error.UnsupportedBundleSpecification(
121 _('%s is not a recognized bundle version') % version)
121 _('%s is not a recognized bundle version') % version)
122 else:
122 else:
123 # Value could be just the compression or just the version, in which
123 # Value could be just the compression or just the version, in which
124 # case some defaults are assumed (but only when not in strict mode).
124 # case some defaults are assumed (but only when not in strict mode).
125 assert not strict
125 assert not strict
126
126
127 spec, params = parseparams(spec)
127 spec, params = parseparams(spec)
128
128
129 if spec in util.compengines.supportedbundlenames:
129 if spec in util.compengines.supportedbundlenames:
130 compression = spec
130 compression = spec
131 version = 'v1'
131 version = 'v1'
132 # Generaldelta repos require v2.
132 # Generaldelta repos require v2.
133 if 'generaldelta' in repo.requirements:
133 if 'generaldelta' in repo.requirements:
134 version = 'v2'
134 version = 'v2'
135 # Modern compression engines require v2.
135 # Modern compression engines require v2.
136 if compression not in _bundlespecv1compengines:
136 if compression not in _bundlespecv1compengines:
137 version = 'v2'
137 version = 'v2'
138 elif spec in _bundlespeccgversions:
138 elif spec in _bundlespeccgversions:
139 if spec == 'packed1':
139 if spec == 'packed1':
140 compression = 'none'
140 compression = 'none'
141 else:
141 else:
142 compression = 'bzip2'
142 compression = 'bzip2'
143 version = spec
143 version = spec
144 else:
144 else:
145 raise error.UnsupportedBundleSpecification(
145 raise error.UnsupportedBundleSpecification(
146 _('%s is not a recognized bundle specification') % spec)
146 _('%s is not a recognized bundle specification') % spec)
147
147
148 # Bundle version 1 only supports a known set of compression engines.
148 # Bundle version 1 only supports a known set of compression engines.
149 if version == 'v1' and compression not in _bundlespecv1compengines:
149 if version == 'v1' and compression not in _bundlespecv1compengines:
150 raise error.UnsupportedBundleSpecification(
150 raise error.UnsupportedBundleSpecification(
151 _('compression engine %s is not supported on v1 bundles') %
151 _('compression engine %s is not supported on v1 bundles') %
152 compression)
152 compression)
153
153
154 # The specification for packed1 can optionally declare the data formats
154 # The specification for packed1 can optionally declare the data formats
155 # required to apply it. If we see this metadata, compare against what the
155 # required to apply it. If we see this metadata, compare against what the
156 # repo supports and error if the bundle isn't compatible.
156 # repo supports and error if the bundle isn't compatible.
157 if version == 'packed1' and 'requirements' in params:
157 if version == 'packed1' and 'requirements' in params:
158 requirements = set(params['requirements'].split(','))
158 requirements = set(params['requirements'].split(','))
159 missingreqs = requirements - repo.supportedformats
159 missingreqs = requirements - repo.supportedformats
160 if missingreqs:
160 if missingreqs:
161 raise error.UnsupportedBundleSpecification(
161 raise error.UnsupportedBundleSpecification(
162 _('missing support for repository features: %s') %
162 _('missing support for repository features: %s') %
163 ', '.join(sorted(missingreqs)))
163 ', '.join(sorted(missingreqs)))
164
164
165 if not externalnames:
165 if not externalnames:
166 engine = util.compengines.forbundlename(compression)
166 engine = util.compengines.forbundlename(compression)
167 compression = engine.bundletype()[1]
167 compression = engine.bundletype()[1]
168 version = _bundlespeccgversions[version]
168 version = _bundlespeccgversions[version]
169 return compression, version, params
169 return compression, version, params
170
170
171 def readbundle(ui, fh, fname, vfs=None):
171 def readbundle(ui, fh, fname, vfs=None):
172 header = changegroup.readexactly(fh, 4)
172 header = changegroup.readexactly(fh, 4)
173
173
174 alg = None
174 alg = None
175 if not fname:
175 if not fname:
176 fname = "stream"
176 fname = "stream"
177 if not header.startswith('HG') and header.startswith('\0'):
177 if not header.startswith('HG') and header.startswith('\0'):
178 fh = changegroup.headerlessfixup(fh, header)
178 fh = changegroup.headerlessfixup(fh, header)
179 header = "HG10"
179 header = "HG10"
180 alg = 'UN'
180 alg = 'UN'
181 elif vfs:
181 elif vfs:
182 fname = vfs.join(fname)
182 fname = vfs.join(fname)
183
183
184 magic, version = header[0:2], header[2:4]
184 magic, version = header[0:2], header[2:4]
185
185
186 if magic != 'HG':
186 if magic != 'HG':
187 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
187 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
188 if version == '10':
188 if version == '10':
189 if alg is None:
189 if alg is None:
190 alg = changegroup.readexactly(fh, 2)
190 alg = changegroup.readexactly(fh, 2)
191 return changegroup.cg1unpacker(fh, alg)
191 return changegroup.cg1unpacker(fh, alg)
192 elif version.startswith('2'):
192 elif version.startswith('2'):
193 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
193 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
194 elif version == 'S1':
194 elif version == 'S1':
195 return streamclone.streamcloneapplier(fh)
195 return streamclone.streamcloneapplier(fh)
196 else:
196 else:
197 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
197 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
198
198
199 def getbundlespec(ui, fh):
199 def getbundlespec(ui, fh):
200 """Infer the bundlespec from a bundle file handle.
200 """Infer the bundlespec from a bundle file handle.
201
201
202 The input file handle is seeked and the original seek position is not
202 The input file handle is seeked and the original seek position is not
203 restored.
203 restored.
204 """
204 """
205 def speccompression(alg):
205 def speccompression(alg):
206 try:
206 try:
207 return util.compengines.forbundletype(alg).bundletype()[0]
207 return util.compengines.forbundletype(alg).bundletype()[0]
208 except KeyError:
208 except KeyError:
209 return None
209 return None
210
210
211 b = readbundle(ui, fh, None)
211 b = readbundle(ui, fh, None)
212 if isinstance(b, changegroup.cg1unpacker):
212 if isinstance(b, changegroup.cg1unpacker):
213 alg = b._type
213 alg = b._type
214 if alg == '_truncatedBZ':
214 if alg == '_truncatedBZ':
215 alg = 'BZ'
215 alg = 'BZ'
216 comp = speccompression(alg)
216 comp = speccompression(alg)
217 if not comp:
217 if not comp:
218 raise error.Abort(_('unknown compression algorithm: %s') % alg)
218 raise error.Abort(_('unknown compression algorithm: %s') % alg)
219 return '%s-v1' % comp
219 return '%s-v1' % comp
220 elif isinstance(b, bundle2.unbundle20):
220 elif isinstance(b, bundle2.unbundle20):
221 if 'Compression' in b.params:
221 if 'Compression' in b.params:
222 comp = speccompression(b.params['Compression'])
222 comp = speccompression(b.params['Compression'])
223 if not comp:
223 if not comp:
224 raise error.Abort(_('unknown compression algorithm: %s') % comp)
224 raise error.Abort(_('unknown compression algorithm: %s') % comp)
225 else:
225 else:
226 comp = 'none'
226 comp = 'none'
227
227
228 version = None
228 version = None
229 for part in b.iterparts():
229 for part in b.iterparts():
230 if part.type == 'changegroup':
230 if part.type == 'changegroup':
231 version = part.params['version']
231 version = part.params['version']
232 if version in ('01', '02'):
232 if version in ('01', '02'):
233 version = 'v2'
233 version = 'v2'
234 else:
234 else:
235 raise error.Abort(_('changegroup version %s does not have '
235 raise error.Abort(_('changegroup version %s does not have '
236 'a known bundlespec') % version,
236 'a known bundlespec') % version,
237 hint=_('try upgrading your Mercurial '
237 hint=_('try upgrading your Mercurial '
238 'client'))
238 'client'))
239
239
240 if not version:
240 if not version:
241 raise error.Abort(_('could not identify changegroup version in '
241 raise error.Abort(_('could not identify changegroup version in '
242 'bundle'))
242 'bundle'))
243
243
244 return '%s-%s' % (comp, version)
244 return '%s-%s' % (comp, version)
245 elif isinstance(b, streamclone.streamcloneapplier):
245 elif isinstance(b, streamclone.streamcloneapplier):
246 requirements = streamclone.readbundle1header(fh)[2]
246 requirements = streamclone.readbundle1header(fh)[2]
247 params = 'requirements=%s' % ','.join(sorted(requirements))
247 params = 'requirements=%s' % ','.join(sorted(requirements))
248 return 'none-packed1;%s' % urlreq.quote(params)
248 return 'none-packed1;%s' % urlreq.quote(params)
249 else:
249 else:
250 raise error.Abort(_('unknown bundle type: %s') % b)
250 raise error.Abort(_('unknown bundle type: %s') % b)
251
251
252 def _computeoutgoing(repo, heads, common):
252 def _computeoutgoing(repo, heads, common):
253 """Computes which revs are outgoing given a set of common
253 """Computes which revs are outgoing given a set of common
254 and a set of heads.
254 and a set of heads.
255
255
256 This is a separate function so extensions can have access to
256 This is a separate function so extensions can have access to
257 the logic.
257 the logic.
258
258
259 Returns a discovery.outgoing object.
259 Returns a discovery.outgoing object.
260 """
260 """
261 cl = repo.changelog
261 cl = repo.changelog
262 if common:
262 if common:
263 hasnode = cl.hasnode
263 hasnode = cl.hasnode
264 common = [n for n in common if hasnode(n)]
264 common = [n for n in common if hasnode(n)]
265 else:
265 else:
266 common = [nullid]
266 common = [nullid]
267 if not heads:
267 if not heads:
268 heads = cl.heads()
268 heads = cl.heads()
269 return discovery.outgoing(repo, common, heads)
269 return discovery.outgoing(repo, common, heads)
270
270
271 def _forcebundle1(op):
271 def _forcebundle1(op):
272 """return true if a pull/push must use bundle1
272 """return true if a pull/push must use bundle1
273
273
274 This function is used to allow testing of the older bundle version"""
274 This function is used to allow testing of the older bundle version"""
275 ui = op.repo.ui
275 ui = op.repo.ui
276 forcebundle1 = False
276 forcebundle1 = False
277 # The goal is this config is to allow developer to choose the bundle
277 # The goal is this config is to allow developer to choose the bundle
278 # version used during exchanged. This is especially handy during test.
278 # version used during exchanged. This is especially handy during test.
279 # Value is a list of bundle version to be picked from, highest version
279 # Value is a list of bundle version to be picked from, highest version
280 # should be used.
280 # should be used.
281 #
281 #
282 # developer config: devel.legacy.exchange
282 # developer config: devel.legacy.exchange
283 exchange = ui.configlist('devel', 'legacy.exchange')
283 exchange = ui.configlist('devel', 'legacy.exchange')
284 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
284 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
285 return forcebundle1 or not op.remote.capable('bundle2')
285 return forcebundle1 or not op.remote.capable('bundle2')
286
286
287 class pushoperation(object):
287 class pushoperation(object):
288 """A object that represent a single push operation
288 """A object that represent a single push operation
289
289
290 Its purpose is to carry push related state and very common operations.
290 Its purpose is to carry push related state and very common operations.
291
291
292 A new pushoperation should be created at the beginning of each push and
292 A new pushoperation should be created at the beginning of each push and
293 discarded afterward.
293 discarded afterward.
294 """
294 """
295
295
296 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
296 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
297 bookmarks=()):
297 bookmarks=()):
298 # repo we push from
298 # repo we push from
299 self.repo = repo
299 self.repo = repo
300 self.ui = repo.ui
300 self.ui = repo.ui
301 # repo we push to
301 # repo we push to
302 self.remote = remote
302 self.remote = remote
303 # force option provided
303 # force option provided
304 self.force = force
304 self.force = force
305 # revs to be pushed (None is "all")
305 # revs to be pushed (None is "all")
306 self.revs = revs
306 self.revs = revs
307 # bookmark explicitly pushed
307 # bookmark explicitly pushed
308 self.bookmarks = bookmarks
308 self.bookmarks = bookmarks
309 # allow push of new branch
309 # allow push of new branch
310 self.newbranch = newbranch
310 self.newbranch = newbranch
311 # did a local lock get acquired?
311 # did a local lock get acquired?
312 self.locallocked = None
312 self.locallocked = None
313 # step already performed
313 # step already performed
314 # (used to check what steps have been already performed through bundle2)
314 # (used to check what steps have been already performed through bundle2)
315 self.stepsdone = set()
315 self.stepsdone = set()
316 # Integer version of the changegroup push result
316 # Integer version of the changegroup push result
317 # - None means nothing to push
317 # - None means nothing to push
318 # - 0 means HTTP error
318 # - 0 means HTTP error
319 # - 1 means we pushed and remote head count is unchanged *or*
319 # - 1 means we pushed and remote head count is unchanged *or*
320 # we have outgoing changesets but refused to push
320 # we have outgoing changesets but refused to push
321 # - other values as described by addchangegroup()
321 # - other values as described by addchangegroup()
322 self.cgresult = None
322 self.cgresult = None
323 # Boolean value for the bookmark push
323 # Boolean value for the bookmark push
324 self.bkresult = None
324 self.bkresult = None
325 # discover.outgoing object (contains common and outgoing data)
325 # discover.outgoing object (contains common and outgoing data)
326 self.outgoing = None
326 self.outgoing = None
327 # all remote topological heads before the push
327 # all remote topological heads before the push
328 self.remoteheads = None
328 self.remoteheads = None
329 # Details of the remote branch pre and post push
329 # Details of the remote branch pre and post push
330 #
330 #
331 # mapping: {'branch': ([remoteheads],
331 # mapping: {'branch': ([remoteheads],
332 # [newheads],
332 # [newheads],
333 # [unsyncedheads],
333 # [unsyncedheads],
334 # [discardedheads])}
334 # [discardedheads])}
335 # - branch: the branch name
335 # - branch: the branch name
336 # - remoteheads: the list of remote heads known locally
336 # - remoteheads: the list of remote heads known locally
337 # None if the branch is new
337 # None if the branch is new
338 # - newheads: the new remote heads (known locally) with outgoing pushed
338 # - newheads: the new remote heads (known locally) with outgoing pushed
339 # - unsyncedheads: the list of remote heads unknown locally.
339 # - unsyncedheads: the list of remote heads unknown locally.
340 # - discardedheads: the list of remote heads made obsolete by the push
340 # - discardedheads: the list of remote heads made obsolete by the push
341 self.pushbranchmap = None
341 self.pushbranchmap = None
342 # testable as a boolean indicating if any nodes are missing locally.
342 # testable as a boolean indicating if any nodes are missing locally.
343 self.incoming = None
343 self.incoming = None
344 # phases changes that must be pushed along side the changesets
344 # phases changes that must be pushed along side the changesets
345 self.outdatedphases = None
345 self.outdatedphases = None
346 # phases changes that must be pushed if changeset push fails
346 # phases changes that must be pushed if changeset push fails
347 self.fallbackoutdatedphases = None
347 self.fallbackoutdatedphases = None
348 # outgoing obsmarkers
348 # outgoing obsmarkers
349 self.outobsmarkers = set()
349 self.outobsmarkers = set()
350 # outgoing bookmarks
350 # outgoing bookmarks
351 self.outbookmarks = []
351 self.outbookmarks = []
352 # transaction manager
352 # transaction manager
353 self.trmanager = None
353 self.trmanager = None
354 # map { pushkey partid -> callback handling failure}
354 # map { pushkey partid -> callback handling failure}
355 # used to handle exception from mandatory pushkey part failure
355 # used to handle exception from mandatory pushkey part failure
356 self.pkfailcb = {}
356 self.pkfailcb = {}
357
357
358 @util.propertycache
358 @util.propertycache
359 def futureheads(self):
359 def futureheads(self):
360 """future remote heads if the changeset push succeeds"""
360 """future remote heads if the changeset push succeeds"""
361 return self.outgoing.missingheads
361 return self.outgoing.missingheads
362
362
363 @util.propertycache
363 @util.propertycache
364 def fallbackheads(self):
364 def fallbackheads(self):
365 """future remote heads if the changeset push fails"""
365 """future remote heads if the changeset push fails"""
366 if self.revs is None:
366 if self.revs is None:
367 # not target to push, all common are relevant
367 # not target to push, all common are relevant
368 return self.outgoing.commonheads
368 return self.outgoing.commonheads
369 unfi = self.repo.unfiltered()
369 unfi = self.repo.unfiltered()
370 # I want cheads = heads(::missingheads and ::commonheads)
370 # I want cheads = heads(::missingheads and ::commonheads)
371 # (missingheads is revs with secret changeset filtered out)
371 # (missingheads is revs with secret changeset filtered out)
372 #
372 #
373 # This can be expressed as:
373 # This can be expressed as:
374 # cheads = ( (missingheads and ::commonheads)
374 # cheads = ( (missingheads and ::commonheads)
375 # + (commonheads and ::missingheads))"
375 # + (commonheads and ::missingheads))"
376 # )
376 # )
377 #
377 #
378 # while trying to push we already computed the following:
378 # while trying to push we already computed the following:
379 # common = (::commonheads)
379 # common = (::commonheads)
380 # missing = ((commonheads::missingheads) - commonheads)
380 # missing = ((commonheads::missingheads) - commonheads)
381 #
381 #
382 # We can pick:
382 # We can pick:
383 # * missingheads part of common (::commonheads)
383 # * missingheads part of common (::commonheads)
384 common = self.outgoing.common
384 common = self.outgoing.common
385 nm = self.repo.changelog.nodemap
385 nm = self.repo.changelog.nodemap
386 cheads = [node for node in self.revs if nm[node] in common]
386 cheads = [node for node in self.revs if nm[node] in common]
387 # and
387 # and
388 # * commonheads parents on missing
388 # * commonheads parents on missing
389 revset = unfi.set('%ln and parents(roots(%ln))',
389 revset = unfi.set('%ln and parents(roots(%ln))',
390 self.outgoing.commonheads,
390 self.outgoing.commonheads,
391 self.outgoing.missing)
391 self.outgoing.missing)
392 cheads.extend(c.node() for c in revset)
392 cheads.extend(c.node() for c in revset)
393 return cheads
393 return cheads
394
394
395 @property
395 @property
396 def commonheads(self):
396 def commonheads(self):
397 """set of all common heads after changeset bundle push"""
397 """set of all common heads after changeset bundle push"""
398 if self.cgresult:
398 if self.cgresult:
399 return self.futureheads
399 return self.futureheads
400 else:
400 else:
401 return self.fallbackheads
401 return self.fallbackheads
402
402
403 # mapping of message used when pushing bookmark
403 # mapping of message used when pushing bookmark
404 bookmsgmap = {'update': (_("updating bookmark %s\n"),
404 bookmsgmap = {'update': (_("updating bookmark %s\n"),
405 _('updating bookmark %s failed!\n')),
405 _('updating bookmark %s failed!\n')),
406 'export': (_("exporting bookmark %s\n"),
406 'export': (_("exporting bookmark %s\n"),
407 _('exporting bookmark %s failed!\n')),
407 _('exporting bookmark %s failed!\n')),
408 'delete': (_("deleting remote bookmark %s\n"),
408 'delete': (_("deleting remote bookmark %s\n"),
409 _('deleting remote bookmark %s failed!\n')),
409 _('deleting remote bookmark %s failed!\n')),
410 }
410 }
411
411
412
412
413 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
413 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
414 opargs=None):
414 opargs=None):
415 '''Push outgoing changesets (limited by revs) from a local
415 '''Push outgoing changesets (limited by revs) from a local
416 repository to remote. Return an integer:
416 repository to remote. Return an integer:
417 - None means nothing to push
417 - None means nothing to push
418 - 0 means HTTP error
418 - 0 means HTTP error
419 - 1 means we pushed and remote head count is unchanged *or*
419 - 1 means we pushed and remote head count is unchanged *or*
420 we have outgoing changesets but refused to push
420 we have outgoing changesets but refused to push
421 - other values as described by addchangegroup()
421 - other values as described by addchangegroup()
422 '''
422 '''
423 if opargs is None:
423 if opargs is None:
424 opargs = {}
424 opargs = {}
425 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
425 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
426 **opargs)
426 **opargs)
427 if pushop.remote.local():
427 if pushop.remote.local():
428 missing = (set(pushop.repo.requirements)
428 missing = (set(pushop.repo.requirements)
429 - pushop.remote.local().supported)
429 - pushop.remote.local().supported)
430 if missing:
430 if missing:
431 msg = _("required features are not"
431 msg = _("required features are not"
432 " supported in the destination:"
432 " supported in the destination:"
433 " %s") % (', '.join(sorted(missing)))
433 " %s") % (', '.join(sorted(missing)))
434 raise error.Abort(msg)
434 raise error.Abort(msg)
435
435
436 # there are two ways to push to remote repo:
436 # there are two ways to push to remote repo:
437 #
437 #
438 # addchangegroup assumes local user can lock remote
438 # addchangegroup assumes local user can lock remote
439 # repo (local filesystem, old ssh servers).
439 # repo (local filesystem, old ssh servers).
440 #
440 #
441 # unbundle assumes local user cannot lock remote repo (new ssh
441 # unbundle assumes local user cannot lock remote repo (new ssh
442 # servers, http servers).
442 # servers, http servers).
443
443
444 if not pushop.remote.canpush():
444 if not pushop.remote.canpush():
445 raise error.Abort(_("destination does not support push"))
445 raise error.Abort(_("destination does not support push"))
446 # get local lock as we might write phase data
446 # get local lock as we might write phase data
447 localwlock = locallock = None
447 localwlock = locallock = None
448 try:
448 try:
449 # bundle2 push may receive a reply bundle touching bookmarks or other
449 # bundle2 push may receive a reply bundle touching bookmarks or other
450 # things requiring the wlock. Take it now to ensure proper ordering.
450 # things requiring the wlock. Take it now to ensure proper ordering.
451 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
451 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
452 if (not _forcebundle1(pushop)) and maypushback:
452 if (not _forcebundle1(pushop)) and maypushback:
453 localwlock = pushop.repo.wlock()
453 localwlock = pushop.repo.wlock()
454 locallock = pushop.repo.lock()
454 locallock = pushop.repo.lock()
455 pushop.locallocked = True
455 pushop.locallocked = True
456 except IOError as err:
456 except IOError as err:
457 pushop.locallocked = False
457 pushop.locallocked = False
458 if err.errno != errno.EACCES:
458 if err.errno != errno.EACCES:
459 raise
459 raise
460 # source repo cannot be locked.
460 # source repo cannot be locked.
461 # We do not abort the push, but just disable the local phase
461 # We do not abort the push, but just disable the local phase
462 # synchronisation.
462 # synchronisation.
463 msg = 'cannot lock source repository: %s\n' % err
463 msg = 'cannot lock source repository: %s\n' % err
464 pushop.ui.debug(msg)
464 pushop.ui.debug(msg)
465 try:
465 try:
466 if pushop.locallocked:
466 if pushop.locallocked:
467 pushop.trmanager = transactionmanager(pushop.repo,
467 pushop.trmanager = transactionmanager(pushop.repo,
468 'push-response',
468 'push-response',
469 pushop.remote.url())
469 pushop.remote.url())
470 pushop.repo.checkpush(pushop)
470 pushop.repo.checkpush(pushop)
471 lock = None
471 lock = None
472 unbundle = pushop.remote.capable('unbundle')
472 unbundle = pushop.remote.capable('unbundle')
473 if not unbundle:
473 if not unbundle:
474 lock = pushop.remote.lock()
474 lock = pushop.remote.lock()
475 try:
475 try:
476 _pushdiscovery(pushop)
476 _pushdiscovery(pushop)
477 if not _forcebundle1(pushop):
477 if not _forcebundle1(pushop):
478 _pushbundle2(pushop)
478 _pushbundle2(pushop)
479 _pushchangeset(pushop)
479 _pushchangeset(pushop)
480 _pushsyncphase(pushop)
480 _pushsyncphase(pushop)
481 _pushobsolete(pushop)
481 _pushobsolete(pushop)
482 _pushbookmark(pushop)
482 _pushbookmark(pushop)
483 finally:
483 finally:
484 if lock is not None:
484 if lock is not None:
485 lock.release()
485 lock.release()
486 if pushop.trmanager:
486 if pushop.trmanager:
487 pushop.trmanager.close()
487 pushop.trmanager.close()
488 finally:
488 finally:
489 if pushop.trmanager:
489 if pushop.trmanager:
490 pushop.trmanager.release()
490 pushop.trmanager.release()
491 if locallock is not None:
491 if locallock is not None:
492 locallock.release()
492 locallock.release()
493 if localwlock is not None:
493 if localwlock is not None:
494 localwlock.release()
494 localwlock.release()
495
495
496 return pushop
496 return pushop
497
497
498 # list of steps to perform discovery before push
498 # list of steps to perform discovery before push
499 pushdiscoveryorder = []
499 pushdiscoveryorder = []
500
500
501 # Mapping between step name and function
501 # Mapping between step name and function
502 #
502 #
503 # This exists to help extensions wrap steps if necessary
503 # This exists to help extensions wrap steps if necessary
504 pushdiscoverymapping = {}
504 pushdiscoverymapping = {}
505
505
506 def pushdiscovery(stepname):
506 def pushdiscovery(stepname):
507 """decorator for function performing discovery before push
507 """decorator for function performing discovery before push
508
508
509 The function is added to the step -> function mapping and appended to the
509 The function is added to the step -> function mapping and appended to the
510 list of steps. Beware that decorated function will be added in order (this
510 list of steps. Beware that decorated function will be added in order (this
511 may matter).
511 may matter).
512
512
513 You can only use this decorator for a new step, if you want to wrap a step
513 You can only use this decorator for a new step, if you want to wrap a step
514 from an extension, change the pushdiscovery dictionary directly."""
514 from an extension, change the pushdiscovery dictionary directly."""
515 def dec(func):
515 def dec(func):
516 assert stepname not in pushdiscoverymapping
516 assert stepname not in pushdiscoverymapping
517 pushdiscoverymapping[stepname] = func
517 pushdiscoverymapping[stepname] = func
518 pushdiscoveryorder.append(stepname)
518 pushdiscoveryorder.append(stepname)
519 return func
519 return func
520 return dec
520 return dec
521
521
522 def _pushdiscovery(pushop):
522 def _pushdiscovery(pushop):
523 """Run all discovery steps"""
523 """Run all discovery steps"""
524 for stepname in pushdiscoveryorder:
524 for stepname in pushdiscoveryorder:
525 step = pushdiscoverymapping[stepname]
525 step = pushdiscoverymapping[stepname]
526 step(pushop)
526 step(pushop)
527
527
528 @pushdiscovery('changeset')
528 @pushdiscovery('changeset')
529 def _pushdiscoverychangeset(pushop):
529 def _pushdiscoverychangeset(pushop):
530 """discover the changeset that need to be pushed"""
530 """discover the changeset that need to be pushed"""
531 fci = discovery.findcommonincoming
531 fci = discovery.findcommonincoming
532 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
532 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
533 common, inc, remoteheads = commoninc
533 common, inc, remoteheads = commoninc
534 fco = discovery.findcommonoutgoing
534 fco = discovery.findcommonoutgoing
535 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
535 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
536 commoninc=commoninc, force=pushop.force)
536 commoninc=commoninc, force=pushop.force)
537 pushop.outgoing = outgoing
537 pushop.outgoing = outgoing
538 pushop.remoteheads = remoteheads
538 pushop.remoteheads = remoteheads
539 pushop.incoming = inc
539 pushop.incoming = inc
540
540
541 @pushdiscovery('phase')
541 @pushdiscovery('phase')
542 def _pushdiscoveryphase(pushop):
542 def _pushdiscoveryphase(pushop):
543 """discover the phase that needs to be pushed
543 """discover the phase that needs to be pushed
544
544
545 (computed for both success and failure case for changesets push)"""
545 (computed for both success and failure case for changesets push)"""
546 outgoing = pushop.outgoing
546 outgoing = pushop.outgoing
547 unfi = pushop.repo.unfiltered()
547 unfi = pushop.repo.unfiltered()
548 remotephases = pushop.remote.listkeys('phases')
548 remotephases = pushop.remote.listkeys('phases')
549 publishing = remotephases.get('publishing', False)
549 publishing = remotephases.get('publishing', False)
550 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
550 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
551 and remotephases # server supports phases
551 and remotephases # server supports phases
552 and not pushop.outgoing.missing # no changesets to be pushed
552 and not pushop.outgoing.missing # no changesets to be pushed
553 and publishing):
553 and publishing):
554 # When:
554 # When:
555 # - this is a subrepo push
555 # - this is a subrepo push
556 # - and remote support phase
556 # - and remote support phase
557 # - and no changeset are to be pushed
557 # - and no changeset are to be pushed
558 # - and remote is publishing
558 # - and remote is publishing
559 # We may be in issue 3871 case!
559 # We may be in issue 3871 case!
560 # We drop the possible phase synchronisation done by
560 # We drop the possible phase synchronisation done by
561 # courtesy to publish changesets possibly locally draft
561 # courtesy to publish changesets possibly locally draft
562 # on the remote.
562 # on the remote.
563 remotephases = {'publishing': 'True'}
563 remotephases = {'publishing': 'True'}
564 ana = phases.analyzeremotephases(pushop.repo,
564 ana = phases.analyzeremotephases(pushop.repo,
565 pushop.fallbackheads,
565 pushop.fallbackheads,
566 remotephases)
566 remotephases)
567 pheads, droots = ana
567 pheads, droots = ana
568 extracond = ''
568 extracond = ''
569 if not publishing:
569 if not publishing:
570 extracond = ' and public()'
570 extracond = ' and public()'
571 revset = 'heads((%%ln::%%ln) %s)' % extracond
571 revset = 'heads((%%ln::%%ln) %s)' % extracond
572 # Get the list of all revs draft on remote by public here.
572 # Get the list of all revs draft on remote by public here.
573 # XXX Beware that revset break if droots is not strictly
573 # XXX Beware that revset break if droots is not strictly
574 # XXX root we may want to ensure it is but it is costly
574 # XXX root we may want to ensure it is but it is costly
575 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
575 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
576 if not outgoing.missing:
576 if not outgoing.missing:
577 future = fallback
577 future = fallback
578 else:
578 else:
579 # adds changeset we are going to push as draft
579 # adds changeset we are going to push as draft
580 #
580 #
581 # should not be necessary for publishing server, but because of an
581 # should not be necessary for publishing server, but because of an
582 # issue fixed in xxxxx we have to do it anyway.
582 # issue fixed in xxxxx we have to do it anyway.
583 fdroots = list(unfi.set('roots(%ln + %ln::)',
583 fdroots = list(unfi.set('roots(%ln + %ln::)',
584 outgoing.missing, droots))
584 outgoing.missing, droots))
585 fdroots = [f.node() for f in fdroots]
585 fdroots = [f.node() for f in fdroots]
586 future = list(unfi.set(revset, fdroots, pushop.futureheads))
586 future = list(unfi.set(revset, fdroots, pushop.futureheads))
587 pushop.outdatedphases = future
587 pushop.outdatedphases = future
588 pushop.fallbackoutdatedphases = fallback
588 pushop.fallbackoutdatedphases = fallback
589
589
590 @pushdiscovery('obsmarker')
590 @pushdiscovery('obsmarker')
591 def _pushdiscoveryobsmarkers(pushop):
591 def _pushdiscoveryobsmarkers(pushop):
592 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
592 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
593 and pushop.repo.obsstore
593 and pushop.repo.obsstore
594 and 'obsolete' in pushop.remote.listkeys('namespaces')):
594 and 'obsolete' in pushop.remote.listkeys('namespaces')):
595 repo = pushop.repo
595 repo = pushop.repo
596 # very naive computation, that can be quite expensive on big repo.
596 # very naive computation, that can be quite expensive on big repo.
597 # However: evolution is currently slow on them anyway.
597 # However: evolution is currently slow on them anyway.
598 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
598 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
599 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
599 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
600
600
601 @pushdiscovery('bookmarks')
601 @pushdiscovery('bookmarks')
602 def _pushdiscoverybookmarks(pushop):
602 def _pushdiscoverybookmarks(pushop):
603 ui = pushop.ui
603 ui = pushop.ui
604 repo = pushop.repo.unfiltered()
604 repo = pushop.repo.unfiltered()
605 remote = pushop.remote
605 remote = pushop.remote
606 ui.debug("checking for updated bookmarks\n")
606 ui.debug("checking for updated bookmarks\n")
607 ancestors = ()
607 ancestors = ()
608 if pushop.revs:
608 if pushop.revs:
609 revnums = map(repo.changelog.rev, pushop.revs)
609 revnums = map(repo.changelog.rev, pushop.revs)
610 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
610 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
611 remotebookmark = remote.listkeys('bookmarks')
611 remotebookmark = remote.listkeys('bookmarks')
612
612
613 explicit = set([repo._bookmarks.expandname(bookmark)
613 explicit = set([repo._bookmarks.expandname(bookmark)
614 for bookmark in pushop.bookmarks])
614 for bookmark in pushop.bookmarks])
615
615
616 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
616 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
617 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
617 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
618
618
619 def safehex(x):
619 def safehex(x):
620 if x is None:
620 if x is None:
621 return x
621 return x
622 return hex(x)
622 return hex(x)
623
623
624 def hexifycompbookmarks(bookmarks):
624 def hexifycompbookmarks(bookmarks):
625 for b, scid, dcid in bookmarks:
625 for b, scid, dcid in bookmarks:
626 yield b, safehex(scid), safehex(dcid)
626 yield b, safehex(scid), safehex(dcid)
627
627
628 comp = [hexifycompbookmarks(marks) for marks in comp]
628 comp = [hexifycompbookmarks(marks) for marks in comp]
629 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
629 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
630
630
631 for b, scid, dcid in advsrc:
631 for b, scid, dcid in advsrc:
632 if b in explicit:
632 if b in explicit:
633 explicit.remove(b)
633 explicit.remove(b)
634 if not ancestors or repo[scid].rev() in ancestors:
634 if not ancestors or repo[scid].rev() in ancestors:
635 pushop.outbookmarks.append((b, dcid, scid))
635 pushop.outbookmarks.append((b, dcid, scid))
636 # search added bookmark
636 # search added bookmark
637 for b, scid, dcid in addsrc:
637 for b, scid, dcid in addsrc:
638 if b in explicit:
638 if b in explicit:
639 explicit.remove(b)
639 explicit.remove(b)
640 pushop.outbookmarks.append((b, '', scid))
640 pushop.outbookmarks.append((b, '', scid))
641 # search for overwritten bookmark
641 # search for overwritten bookmark
642 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
642 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
643 if b in explicit:
643 if b in explicit:
644 explicit.remove(b)
644 explicit.remove(b)
645 pushop.outbookmarks.append((b, dcid, scid))
645 pushop.outbookmarks.append((b, dcid, scid))
646 # search for bookmark to delete
646 # search for bookmark to delete
647 for b, scid, dcid in adddst:
647 for b, scid, dcid in adddst:
648 if b in explicit:
648 if b in explicit:
649 explicit.remove(b)
649 explicit.remove(b)
650 # treat as "deleted locally"
650 # treat as "deleted locally"
651 pushop.outbookmarks.append((b, dcid, ''))
651 pushop.outbookmarks.append((b, dcid, ''))
652 # identical bookmarks shouldn't get reported
652 # identical bookmarks shouldn't get reported
653 for b, scid, dcid in same:
653 for b, scid, dcid in same:
654 if b in explicit:
654 if b in explicit:
655 explicit.remove(b)
655 explicit.remove(b)
656
656
657 if explicit:
657 if explicit:
658 explicit = sorted(explicit)
658 explicit = sorted(explicit)
659 # we should probably list all of them
659 # we should probably list all of them
660 ui.warn(_('bookmark %s does not exist on the local '
660 ui.warn(_('bookmark %s does not exist on the local '
661 'or remote repository!\n') % explicit[0])
661 'or remote repository!\n') % explicit[0])
662 pushop.bkresult = 2
662 pushop.bkresult = 2
663
663
664 pushop.outbookmarks.sort()
664 pushop.outbookmarks.sort()
665
665
666 def _pushcheckoutgoing(pushop):
666 def _pushcheckoutgoing(pushop):
667 outgoing = pushop.outgoing
667 outgoing = pushop.outgoing
668 unfi = pushop.repo.unfiltered()
668 unfi = pushop.repo.unfiltered()
669 if not outgoing.missing:
669 if not outgoing.missing:
670 # nothing to push
670 # nothing to push
671 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
671 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
672 return False
672 return False
673 # something to push
673 # something to push
674 if not pushop.force:
674 if not pushop.force:
675 # if repo.obsstore == False --> no obsolete
675 # if repo.obsstore == False --> no obsolete
676 # then, save the iteration
676 # then, save the iteration
677 if unfi.obsstore:
677 if unfi.obsstore:
678 # this message are here for 80 char limit reason
678 # this message are here for 80 char limit reason
679 mso = _("push includes obsolete changeset: %s!")
679 mso = _("push includes obsolete changeset: %s!")
680 mst = {"unstable": _("push includes unstable changeset: %s!"),
680 mst = {"unstable": _("push includes unstable changeset: %s!"),
681 "bumped": _("push includes bumped changeset: %s!"),
681 "bumped": _("push includes bumped changeset: %s!"),
682 "divergent": _("push includes divergent changeset: %s!")}
682 "divergent": _("push includes divergent changeset: %s!")}
683 # If we are to push if there is at least one
683 # If we are to push if there is at least one
684 # obsolete or unstable changeset in missing, at
684 # obsolete or unstable changeset in missing, at
685 # least one of the missinghead will be obsolete or
685 # least one of the missinghead will be obsolete or
686 # unstable. So checking heads only is ok
686 # unstable. So checking heads only is ok
687 for node in outgoing.missingheads:
687 for node in outgoing.missingheads:
688 ctx = unfi[node]
688 ctx = unfi[node]
689 if ctx.obsolete():
689 if ctx.obsolete():
690 raise error.Abort(mso % ctx)
690 raise error.Abort(mso % ctx)
691 elif ctx.troubled():
691 elif ctx.troubled():
692 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
692 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
693
693
694 discovery.checkheads(pushop)
694 discovery.checkheads(pushop)
695 return True
695 return True
696
696
697 # List of names of steps to perform for an outgoing bundle2, order matters.
697 # List of names of steps to perform for an outgoing bundle2, order matters.
698 b2partsgenorder = []
698 b2partsgenorder = []
699
699
700 # Mapping between step name and function
700 # Mapping between step name and function
701 #
701 #
702 # This exists to help extensions wrap steps if necessary
702 # This exists to help extensions wrap steps if necessary
703 b2partsgenmapping = {}
703 b2partsgenmapping = {}
704
704
705 def b2partsgenerator(stepname, idx=None):
705 def b2partsgenerator(stepname, idx=None):
706 """decorator for function generating bundle2 part
706 """decorator for function generating bundle2 part
707
707
708 The function is added to the step -> function mapping and appended to the
708 The function is added to the step -> function mapping and appended to the
709 list of steps. Beware that decorated functions will be added in order
709 list of steps. Beware that decorated functions will be added in order
710 (this may matter).
710 (this may matter).
711
711
712 You can only use this decorator for new steps, if you want to wrap a step
712 You can only use this decorator for new steps, if you want to wrap a step
713 from an extension, attack the b2partsgenmapping dictionary directly."""
713 from an extension, attack the b2partsgenmapping dictionary directly."""
714 def dec(func):
714 def dec(func):
715 assert stepname not in b2partsgenmapping
715 assert stepname not in b2partsgenmapping
716 b2partsgenmapping[stepname] = func
716 b2partsgenmapping[stepname] = func
717 if idx is None:
717 if idx is None:
718 b2partsgenorder.append(stepname)
718 b2partsgenorder.append(stepname)
719 else:
719 else:
720 b2partsgenorder.insert(idx, stepname)
720 b2partsgenorder.insert(idx, stepname)
721 return func
721 return func
722 return dec
722 return dec
723
723
724 def _pushb2ctxcheckheads(pushop, bundler):
724 def _pushb2ctxcheckheads(pushop, bundler):
725 """Generate race condition checking parts
725 """Generate race condition checking parts
726
726
727 Exists as an independent function to aid extensions
727 Exists as an independent function to aid extensions
728 """
728 """
729 # * 'force' do not check for push race,
729 # * 'force' do not check for push race,
730 # * if we don't push anything, there are nothing to check.
730 # * if we don't push anything, there are nothing to check.
731 if not pushop.force and pushop.outgoing.missingheads:
731 if not pushop.force and pushop.outgoing.missingheads:
732 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
732 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
733 if not allowunrelated:
733 if not allowunrelated:
734 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
734 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
735 else:
735 else:
736 affected = set()
736 affected = set()
737 for branch, heads in pushop.pushbranchmap.iteritems():
737 for branch, heads in pushop.pushbranchmap.iteritems():
738 remoteheads, newheads, unsyncedheads, discardedheads = heads
738 remoteheads, newheads, unsyncedheads, discardedheads = heads
739 if remoteheads is not None:
739 if remoteheads is not None:
740 remote = set(remoteheads)
740 remote = set(remoteheads)
741 affected |= set(discardedheads) & remote
741 affected |= set(discardedheads) & remote
742 affected |= remote - set(newheads)
742 affected |= remote - set(newheads)
743 if affected:
743 if affected:
744 data = iter(sorted(affected))
744 data = iter(sorted(affected))
745 bundler.newpart('check:updated-heads', data=data)
745 bundler.newpart('check:updated-heads', data=data)
746
746
747 @b2partsgenerator('changeset')
747 @b2partsgenerator('changeset')
748 def _pushb2ctx(pushop, bundler):
748 def _pushb2ctx(pushop, bundler):
749 """handle changegroup push through bundle2
749 """handle changegroup push through bundle2
750
750
751 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
751 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
752 """
752 """
753 if 'changesets' in pushop.stepsdone:
753 if 'changesets' in pushop.stepsdone:
754 return
754 return
755 pushop.stepsdone.add('changesets')
755 pushop.stepsdone.add('changesets')
756 # Send known heads to the server for race detection.
756 # Send known heads to the server for race detection.
757 if not _pushcheckoutgoing(pushop):
757 if not _pushcheckoutgoing(pushop):
758 return
758 return
759 pushop.repo.prepushoutgoinghooks(pushop)
759 pushop.repo.prepushoutgoinghooks(pushop)
760
760
761 _pushb2ctxcheckheads(pushop, bundler)
761 _pushb2ctxcheckheads(pushop, bundler)
762
762
763 b2caps = bundle2.bundle2caps(pushop.remote)
763 b2caps = bundle2.bundle2caps(pushop.remote)
764 version = '01'
764 version = '01'
765 cgversions = b2caps.get('changegroup')
765 cgversions = b2caps.get('changegroup')
766 if cgversions: # 3.1 and 3.2 ship with an empty value
766 if cgversions: # 3.1 and 3.2 ship with an empty value
767 cgversions = [v for v in cgversions
767 cgversions = [v for v in cgversions
768 if v in changegroup.supportedoutgoingversions(
768 if v in changegroup.supportedoutgoingversions(
769 pushop.repo)]
769 pushop.repo)]
770 if not cgversions:
770 if not cgversions:
771 raise ValueError(_('no common changegroup version'))
771 raise ValueError(_('no common changegroup version'))
772 version = max(cgversions)
772 version = max(cgversions)
773 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
773 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
774 pushop.outgoing,
774 pushop.outgoing,
775 version=version)
775 version=version)
776 cgpart = bundler.newpart('changegroup', data=cg)
776 cgpart = bundler.newpart('changegroup', data=cg)
777 if cgversions:
777 if cgversions:
778 cgpart.addparam('version', version)
778 cgpart.addparam('version', version)
779 if 'treemanifest' in pushop.repo.requirements:
779 if 'treemanifest' in pushop.repo.requirements:
780 cgpart.addparam('treemanifest', '1')
780 cgpart.addparam('treemanifest', '1')
781 def handlereply(op):
781 def handlereply(op):
782 """extract addchangegroup returns from server reply"""
782 """extract addchangegroup returns from server reply"""
783 cgreplies = op.records.getreplies(cgpart.id)
783 cgreplies = op.records.getreplies(cgpart.id)
784 assert len(cgreplies['changegroup']) == 1
784 assert len(cgreplies['changegroup']) == 1
785 pushop.cgresult = cgreplies['changegroup'][0]['return']
785 pushop.cgresult = cgreplies['changegroup'][0]['return']
786 return handlereply
786 return handlereply
787
787
788 @b2partsgenerator('phase')
788 @b2partsgenerator('phase')
789 def _pushb2phases(pushop, bundler):
789 def _pushb2phases(pushop, bundler):
790 """handle phase push through bundle2"""
790 """handle phase push through bundle2"""
791 if 'phases' in pushop.stepsdone:
791 if 'phases' in pushop.stepsdone:
792 return
792 return
793 b2caps = bundle2.bundle2caps(pushop.remote)
793 b2caps = bundle2.bundle2caps(pushop.remote)
794 if not 'pushkey' in b2caps:
794 if not 'pushkey' in b2caps:
795 return
795 return
796 pushop.stepsdone.add('phases')
796 pushop.stepsdone.add('phases')
797 part2node = []
797 part2node = []
798
798
799 def handlefailure(pushop, exc):
799 def handlefailure(pushop, exc):
800 targetid = int(exc.partid)
800 targetid = int(exc.partid)
801 for partid, node in part2node:
801 for partid, node in part2node:
802 if partid == targetid:
802 if partid == targetid:
803 raise error.Abort(_('updating %s to public failed') % node)
803 raise error.Abort(_('updating %s to public failed') % node)
804
804
805 enc = pushkey.encode
805 enc = pushkey.encode
806 for newremotehead in pushop.outdatedphases:
806 for newremotehead in pushop.outdatedphases:
807 part = bundler.newpart('pushkey')
807 part = bundler.newpart('pushkey')
808 part.addparam('namespace', enc('phases'))
808 part.addparam('namespace', enc('phases'))
809 part.addparam('key', enc(newremotehead.hex()))
809 part.addparam('key', enc(newremotehead.hex()))
810 part.addparam('old', enc(str(phases.draft)))
810 part.addparam('old', enc(str(phases.draft)))
811 part.addparam('new', enc(str(phases.public)))
811 part.addparam('new', enc(str(phases.public)))
812 part2node.append((part.id, newremotehead))
812 part2node.append((part.id, newremotehead))
813 pushop.pkfailcb[part.id] = handlefailure
813 pushop.pkfailcb[part.id] = handlefailure
814
814
815 def handlereply(op):
815 def handlereply(op):
816 for partid, node in part2node:
816 for partid, node in part2node:
817 partrep = op.records.getreplies(partid)
817 partrep = op.records.getreplies(partid)
818 results = partrep['pushkey']
818 results = partrep['pushkey']
819 assert len(results) <= 1
819 assert len(results) <= 1
820 msg = None
820 msg = None
821 if not results:
821 if not results:
822 msg = _('server ignored update of %s to public!\n') % node
822 msg = _('server ignored update of %s to public!\n') % node
823 elif not int(results[0]['return']):
823 elif not int(results[0]['return']):
824 msg = _('updating %s to public failed!\n') % node
824 msg = _('updating %s to public failed!\n') % node
825 if msg is not None:
825 if msg is not None:
826 pushop.ui.warn(msg)
826 pushop.ui.warn(msg)
827 return handlereply
827 return handlereply
828
828
829 @b2partsgenerator('obsmarkers')
829 @b2partsgenerator('obsmarkers')
830 def _pushb2obsmarkers(pushop, bundler):
830 def _pushb2obsmarkers(pushop, bundler):
831 if 'obsmarkers' in pushop.stepsdone:
831 if 'obsmarkers' in pushop.stepsdone:
832 return
832 return
833 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
833 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
834 if obsolete.commonversion(remoteversions) is None:
834 if obsolete.commonversion(remoteversions) is None:
835 return
835 return
836 pushop.stepsdone.add('obsmarkers')
836 pushop.stepsdone.add('obsmarkers')
837 if pushop.outobsmarkers:
837 if pushop.outobsmarkers:
838 markers = sorted(pushop.outobsmarkers)
838 markers = sorted(pushop.outobsmarkers)
839 bundle2.buildobsmarkerspart(bundler, markers)
839 bundle2.buildobsmarkerspart(bundler, markers)
840
840
841 @b2partsgenerator('bookmarks')
841 @b2partsgenerator('bookmarks')
842 def _pushb2bookmarks(pushop, bundler):
842 def _pushb2bookmarks(pushop, bundler):
843 """handle bookmark push through bundle2"""
843 """handle bookmark push through bundle2"""
844 if 'bookmarks' in pushop.stepsdone:
844 if 'bookmarks' in pushop.stepsdone:
845 return
845 return
846 b2caps = bundle2.bundle2caps(pushop.remote)
846 b2caps = bundle2.bundle2caps(pushop.remote)
847 if 'pushkey' not in b2caps:
847 if 'pushkey' not in b2caps:
848 return
848 return
849 pushop.stepsdone.add('bookmarks')
849 pushop.stepsdone.add('bookmarks')
850 part2book = []
850 part2book = []
851 enc = pushkey.encode
851 enc = pushkey.encode
852
852
853 def handlefailure(pushop, exc):
853 def handlefailure(pushop, exc):
854 targetid = int(exc.partid)
854 targetid = int(exc.partid)
855 for partid, book, action in part2book:
855 for partid, book, action in part2book:
856 if partid == targetid:
856 if partid == targetid:
857 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
857 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
858 # we should not be called for part we did not generated
858 # we should not be called for part we did not generated
859 assert False
859 assert False
860
860
861 for book, old, new in pushop.outbookmarks:
861 for book, old, new in pushop.outbookmarks:
862 part = bundler.newpart('pushkey')
862 part = bundler.newpart('pushkey')
863 part.addparam('namespace', enc('bookmarks'))
863 part.addparam('namespace', enc('bookmarks'))
864 part.addparam('key', enc(book))
864 part.addparam('key', enc(book))
865 part.addparam('old', enc(old))
865 part.addparam('old', enc(old))
866 part.addparam('new', enc(new))
866 part.addparam('new', enc(new))
867 action = 'update'
867 action = 'update'
868 if not old:
868 if not old:
869 action = 'export'
869 action = 'export'
870 elif not new:
870 elif not new:
871 action = 'delete'
871 action = 'delete'
872 part2book.append((part.id, book, action))
872 part2book.append((part.id, book, action))
873 pushop.pkfailcb[part.id] = handlefailure
873 pushop.pkfailcb[part.id] = handlefailure
874
874
875 def handlereply(op):
875 def handlereply(op):
876 ui = pushop.ui
876 ui = pushop.ui
877 for partid, book, action in part2book:
877 for partid, book, action in part2book:
878 partrep = op.records.getreplies(partid)
878 partrep = op.records.getreplies(partid)
879 results = partrep['pushkey']
879 results = partrep['pushkey']
880 assert len(results) <= 1
880 assert len(results) <= 1
881 if not results:
881 if not results:
882 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
882 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
883 else:
883 else:
884 ret = int(results[0]['return'])
884 ret = int(results[0]['return'])
885 if ret:
885 if ret:
886 ui.status(bookmsgmap[action][0] % book)
886 ui.status(bookmsgmap[action][0] % book)
887 else:
887 else:
888 ui.warn(bookmsgmap[action][1] % book)
888 ui.warn(bookmsgmap[action][1] % book)
889 if pushop.bkresult is not None:
889 if pushop.bkresult is not None:
890 pushop.bkresult = 1
890 pushop.bkresult = 1
891 return handlereply
891 return handlereply
892
892
893
893
894 def _pushbundle2(pushop):
894 def _pushbundle2(pushop):
895 """push data to the remote using bundle2
895 """push data to the remote using bundle2
896
896
897 The only currently supported type of data is changegroup but this will
897 The only currently supported type of data is changegroup but this will
898 evolve in the future."""
898 evolve in the future."""
899 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
899 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
900 pushback = (pushop.trmanager
900 pushback = (pushop.trmanager
901 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
901 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
902
902
903 # create reply capability
903 # create reply capability
904 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
904 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
905 allowpushback=pushback))
905 allowpushback=pushback))
906 bundler.newpart('replycaps', data=capsblob)
906 bundler.newpart('replycaps', data=capsblob)
907 replyhandlers = []
907 replyhandlers = []
908 for partgenname in b2partsgenorder:
908 for partgenname in b2partsgenorder:
909 partgen = b2partsgenmapping[partgenname]
909 partgen = b2partsgenmapping[partgenname]
910 ret = partgen(pushop, bundler)
910 ret = partgen(pushop, bundler)
911 if callable(ret):
911 if callable(ret):
912 replyhandlers.append(ret)
912 replyhandlers.append(ret)
913 # do not push if nothing to push
913 # do not push if nothing to push
914 if bundler.nbparts <= 1:
914 if bundler.nbparts <= 1:
915 return
915 return
916 stream = util.chunkbuffer(bundler.getchunks())
916 stream = util.chunkbuffer(bundler.getchunks())
917 try:
917 try:
918 try:
918 try:
919 reply = pushop.remote.unbundle(
919 reply = pushop.remote.unbundle(
920 stream, ['force'], pushop.remote.url())
920 stream, ['force'], pushop.remote.url())
921 except error.BundleValueError as exc:
921 except error.BundleValueError as exc:
922 raise error.Abort(_('missing support for %s') % exc)
922 raise error.Abort(_('missing support for %s') % exc)
923 try:
923 try:
924 trgetter = None
924 trgetter = None
925 if pushback:
925 if pushback:
926 trgetter = pushop.trmanager.transaction
926 trgetter = pushop.trmanager.transaction
927 op = bundle2.processbundle(pushop.repo, reply, trgetter)
927 op = bundle2.processbundle(pushop.repo, reply, trgetter)
928 except error.BundleValueError as exc:
928 except error.BundleValueError as exc:
929 raise error.Abort(_('missing support for %s') % exc)
929 raise error.Abort(_('missing support for %s') % exc)
930 except bundle2.AbortFromPart as exc:
930 except bundle2.AbortFromPart as exc:
931 pushop.ui.status(_('remote: %s\n') % exc)
931 pushop.ui.status(_('remote: %s\n') % exc)
932 if exc.hint is not None:
932 if exc.hint is not None:
933 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
933 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
934 raise error.Abort(_('push failed on remote'))
934 raise error.Abort(_('push failed on remote'))
935 except error.PushkeyFailed as exc:
935 except error.PushkeyFailed as exc:
936 partid = int(exc.partid)
936 partid = int(exc.partid)
937 if partid not in pushop.pkfailcb:
937 if partid not in pushop.pkfailcb:
938 raise
938 raise
939 pushop.pkfailcb[partid](pushop, exc)
939 pushop.pkfailcb[partid](pushop, exc)
940 for rephand in replyhandlers:
940 for rephand in replyhandlers:
941 rephand(op)
941 rephand(op)
942
942
943 def _pushchangeset(pushop):
943 def _pushchangeset(pushop):
944 """Make the actual push of changeset bundle to remote repo"""
944 """Make the actual push of changeset bundle to remote repo"""
945 if 'changesets' in pushop.stepsdone:
945 if 'changesets' in pushop.stepsdone:
946 return
946 return
947 pushop.stepsdone.add('changesets')
947 pushop.stepsdone.add('changesets')
948 if not _pushcheckoutgoing(pushop):
948 if not _pushcheckoutgoing(pushop):
949 return
949 return
950 pushop.repo.prepushoutgoinghooks(pushop)
950 pushop.repo.prepushoutgoinghooks(pushop)
951 outgoing = pushop.outgoing
951 outgoing = pushop.outgoing
952 unbundle = pushop.remote.capable('unbundle')
952 unbundle = pushop.remote.capable('unbundle')
953 # TODO: get bundlecaps from remote
953 # TODO: get bundlecaps from remote
954 bundlecaps = None
954 bundlecaps = None
955 # create a changegroup from local
955 # create a changegroup from local
956 if pushop.revs is None and not (outgoing.excluded
956 if pushop.revs is None and not (outgoing.excluded
957 or pushop.repo.changelog.filteredrevs):
957 or pushop.repo.changelog.filteredrevs):
958 # push everything,
958 # push everything,
959 # use the fast path, no race possible on push
959 # use the fast path, no race possible on push
960 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
960 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
961 cg = changegroup.getsubset(pushop.repo,
961 cg = changegroup.getsubset(pushop.repo,
962 outgoing,
962 outgoing,
963 bundler,
963 bundler,
964 'push',
964 'push',
965 fastpath=True)
965 fastpath=True)
966 else:
966 else:
967 cg = changegroup.getchangegroup(pushop.repo, 'push', outgoing,
967 cg = changegroup.getchangegroup(pushop.repo, 'push', outgoing,
968 bundlecaps=bundlecaps)
968 bundlecaps=bundlecaps)
969
969
970 # apply changegroup to remote
970 # apply changegroup to remote
971 if unbundle:
971 if unbundle:
972 # local repo finds heads on server, finds out what
972 # local repo finds heads on server, finds out what
973 # revs it must push. once revs transferred, if server
973 # revs it must push. once revs transferred, if server
974 # finds it has different heads (someone else won
974 # finds it has different heads (someone else won
975 # commit/push race), server aborts.
975 # commit/push race), server aborts.
976 if pushop.force:
976 if pushop.force:
977 remoteheads = ['force']
977 remoteheads = ['force']
978 else:
978 else:
979 remoteheads = pushop.remoteheads
979 remoteheads = pushop.remoteheads
980 # ssh: return remote's addchangegroup()
980 # ssh: return remote's addchangegroup()
981 # http: return remote's addchangegroup() or 0 for error
981 # http: return remote's addchangegroup() or 0 for error
982 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
982 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
983 pushop.repo.url())
983 pushop.repo.url())
984 else:
984 else:
985 # we return an integer indicating remote head count
985 # we return an integer indicating remote head count
986 # change
986 # change
987 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
987 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
988 pushop.repo.url())
988 pushop.repo.url())
989
989
990 def _pushsyncphase(pushop):
990 def _pushsyncphase(pushop):
991 """synchronise phase information locally and remotely"""
991 """synchronise phase information locally and remotely"""
992 cheads = pushop.commonheads
992 cheads = pushop.commonheads
993 # even when we don't push, exchanging phase data is useful
993 # even when we don't push, exchanging phase data is useful
994 remotephases = pushop.remote.listkeys('phases')
994 remotephases = pushop.remote.listkeys('phases')
995 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
995 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
996 and remotephases # server supports phases
996 and remotephases # server supports phases
997 and pushop.cgresult is None # nothing was pushed
997 and pushop.cgresult is None # nothing was pushed
998 and remotephases.get('publishing', False)):
998 and remotephases.get('publishing', False)):
999 # When:
999 # When:
1000 # - this is a subrepo push
1000 # - this is a subrepo push
1001 # - and remote support phase
1001 # - and remote support phase
1002 # - and no changeset was pushed
1002 # - and no changeset was pushed
1003 # - and remote is publishing
1003 # - and remote is publishing
1004 # We may be in issue 3871 case!
1004 # We may be in issue 3871 case!
1005 # We drop the possible phase synchronisation done by
1005 # We drop the possible phase synchronisation done by
1006 # courtesy to publish changesets possibly locally draft
1006 # courtesy to publish changesets possibly locally draft
1007 # on the remote.
1007 # on the remote.
1008 remotephases = {'publishing': 'True'}
1008 remotephases = {'publishing': 'True'}
1009 if not remotephases: # old server or public only reply from non-publishing
1009 if not remotephases: # old server or public only reply from non-publishing
1010 _localphasemove(pushop, cheads)
1010 _localphasemove(pushop, cheads)
1011 # don't push any phase data as there is nothing to push
1011 # don't push any phase data as there is nothing to push
1012 else:
1012 else:
1013 ana = phases.analyzeremotephases(pushop.repo, cheads,
1013 ana = phases.analyzeremotephases(pushop.repo, cheads,
1014 remotephases)
1014 remotephases)
1015 pheads, droots = ana
1015 pheads, droots = ana
1016 ### Apply remote phase on local
1016 ### Apply remote phase on local
1017 if remotephases.get('publishing', False):
1017 if remotephases.get('publishing', False):
1018 _localphasemove(pushop, cheads)
1018 _localphasemove(pushop, cheads)
1019 else: # publish = False
1019 else: # publish = False
1020 _localphasemove(pushop, pheads)
1020 _localphasemove(pushop, pheads)
1021 _localphasemove(pushop, cheads, phases.draft)
1021 _localphasemove(pushop, cheads, phases.draft)
1022 ### Apply local phase on remote
1022 ### Apply local phase on remote
1023
1023
1024 if pushop.cgresult:
1024 if pushop.cgresult:
1025 if 'phases' in pushop.stepsdone:
1025 if 'phases' in pushop.stepsdone:
1026 # phases already pushed though bundle2
1026 # phases already pushed though bundle2
1027 return
1027 return
1028 outdated = pushop.outdatedphases
1028 outdated = pushop.outdatedphases
1029 else:
1029 else:
1030 outdated = pushop.fallbackoutdatedphases
1030 outdated = pushop.fallbackoutdatedphases
1031
1031
1032 pushop.stepsdone.add('phases')
1032 pushop.stepsdone.add('phases')
1033
1033
1034 # filter heads already turned public by the push
1034 # filter heads already turned public by the push
1035 outdated = [c for c in outdated if c.node() not in pheads]
1035 outdated = [c for c in outdated if c.node() not in pheads]
1036 # fallback to independent pushkey command
1036 # fallback to independent pushkey command
1037 for newremotehead in outdated:
1037 for newremotehead in outdated:
1038 r = pushop.remote.pushkey('phases',
1038 r = pushop.remote.pushkey('phases',
1039 newremotehead.hex(),
1039 newremotehead.hex(),
1040 str(phases.draft),
1040 str(phases.draft),
1041 str(phases.public))
1041 str(phases.public))
1042 if not r:
1042 if not r:
1043 pushop.ui.warn(_('updating %s to public failed!\n')
1043 pushop.ui.warn(_('updating %s to public failed!\n')
1044 % newremotehead)
1044 % newremotehead)
1045
1045
1046 def _localphasemove(pushop, nodes, phase=phases.public):
1046 def _localphasemove(pushop, nodes, phase=phases.public):
1047 """move <nodes> to <phase> in the local source repo"""
1047 """move <nodes> to <phase> in the local source repo"""
1048 if pushop.trmanager:
1048 if pushop.trmanager:
1049 phases.advanceboundary(pushop.repo,
1049 phases.advanceboundary(pushop.repo,
1050 pushop.trmanager.transaction(),
1050 pushop.trmanager.transaction(),
1051 phase,
1051 phase,
1052 nodes)
1052 nodes)
1053 else:
1053 else:
1054 # repo is not locked, do not change any phases!
1054 # repo is not locked, do not change any phases!
1055 # Informs the user that phases should have been moved when
1055 # Informs the user that phases should have been moved when
1056 # applicable.
1056 # applicable.
1057 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1057 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1058 phasestr = phases.phasenames[phase]
1058 phasestr = phases.phasenames[phase]
1059 if actualmoves:
1059 if actualmoves:
1060 pushop.ui.status(_('cannot lock source repo, skipping '
1060 pushop.ui.status(_('cannot lock source repo, skipping '
1061 'local %s phase update\n') % phasestr)
1061 'local %s phase update\n') % phasestr)
1062
1062
1063 def _pushobsolete(pushop):
1063 def _pushobsolete(pushop):
1064 """utility function to push obsolete markers to a remote"""
1064 """utility function to push obsolete markers to a remote"""
1065 if 'obsmarkers' in pushop.stepsdone:
1065 if 'obsmarkers' in pushop.stepsdone:
1066 return
1066 return
1067 repo = pushop.repo
1067 repo = pushop.repo
1068 remote = pushop.remote
1068 remote = pushop.remote
1069 pushop.stepsdone.add('obsmarkers')
1069 pushop.stepsdone.add('obsmarkers')
1070 if pushop.outobsmarkers:
1070 if pushop.outobsmarkers:
1071 pushop.ui.debug('try to push obsolete markers to remote\n')
1071 pushop.ui.debug('try to push obsolete markers to remote\n')
1072 rslts = []
1072 rslts = []
1073 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1073 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1074 for key in sorted(remotedata, reverse=True):
1074 for key in sorted(remotedata, reverse=True):
1075 # reverse sort to ensure we end with dump0
1075 # reverse sort to ensure we end with dump0
1076 data = remotedata[key]
1076 data = remotedata[key]
1077 rslts.append(remote.pushkey('obsolete', key, '', data))
1077 rslts.append(remote.pushkey('obsolete', key, '', data))
1078 if [r for r in rslts if not r]:
1078 if [r for r in rslts if not r]:
1079 msg = _('failed to push some obsolete markers!\n')
1079 msg = _('failed to push some obsolete markers!\n')
1080 repo.ui.warn(msg)
1080 repo.ui.warn(msg)
1081
1081
1082 def _pushbookmark(pushop):
1082 def _pushbookmark(pushop):
1083 """Update bookmark position on remote"""
1083 """Update bookmark position on remote"""
1084 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1084 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1085 return
1085 return
1086 pushop.stepsdone.add('bookmarks')
1086 pushop.stepsdone.add('bookmarks')
1087 ui = pushop.ui
1087 ui = pushop.ui
1088 remote = pushop.remote
1088 remote = pushop.remote
1089
1089
1090 for b, old, new in pushop.outbookmarks:
1090 for b, old, new in pushop.outbookmarks:
1091 action = 'update'
1091 action = 'update'
1092 if not old:
1092 if not old:
1093 action = 'export'
1093 action = 'export'
1094 elif not new:
1094 elif not new:
1095 action = 'delete'
1095 action = 'delete'
1096 if remote.pushkey('bookmarks', b, old, new):
1096 if remote.pushkey('bookmarks', b, old, new):
1097 ui.status(bookmsgmap[action][0] % b)
1097 ui.status(bookmsgmap[action][0] % b)
1098 else:
1098 else:
1099 ui.warn(bookmsgmap[action][1] % b)
1099 ui.warn(bookmsgmap[action][1] % b)
1100 # discovery can have set the value form invalid entry
1100 # discovery can have set the value form invalid entry
1101 if pushop.bkresult is not None:
1101 if pushop.bkresult is not None:
1102 pushop.bkresult = 1
1102 pushop.bkresult = 1
1103
1103
1104 class pulloperation(object):
1104 class pulloperation(object):
1105 """A object that represent a single pull operation
1105 """A object that represent a single pull operation
1106
1106
1107 It purpose is to carry pull related state and very common operation.
1107 It purpose is to carry pull related state and very common operation.
1108
1108
1109 A new should be created at the beginning of each pull and discarded
1109 A new should be created at the beginning of each pull and discarded
1110 afterward.
1110 afterward.
1111 """
1111 """
1112
1112
1113 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1113 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1114 remotebookmarks=None, streamclonerequested=None):
1114 remotebookmarks=None, streamclonerequested=None):
1115 # repo we pull into
1115 # repo we pull into
1116 self.repo = repo
1116 self.repo = repo
1117 # repo we pull from
1117 # repo we pull from
1118 self.remote = remote
1118 self.remote = remote
1119 # revision we try to pull (None is "all")
1119 # revision we try to pull (None is "all")
1120 self.heads = heads
1120 self.heads = heads
1121 # bookmark pulled explicitly
1121 # bookmark pulled explicitly
1122 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1122 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1123 for bookmark in bookmarks]
1123 for bookmark in bookmarks]
1124 # do we force pull?
1124 # do we force pull?
1125 self.force = force
1125 self.force = force
1126 # whether a streaming clone was requested
1126 # whether a streaming clone was requested
1127 self.streamclonerequested = streamclonerequested
1127 self.streamclonerequested = streamclonerequested
1128 # transaction manager
1128 # transaction manager
1129 self.trmanager = None
1129 self.trmanager = None
1130 # set of common changeset between local and remote before pull
1130 # set of common changeset between local and remote before pull
1131 self.common = None
1131 self.common = None
1132 # set of pulled head
1132 # set of pulled head
1133 self.rheads = None
1133 self.rheads = None
1134 # list of missing changeset to fetch remotely
1134 # list of missing changeset to fetch remotely
1135 self.fetch = None
1135 self.fetch = None
1136 # remote bookmarks data
1136 # remote bookmarks data
1137 self.remotebookmarks = remotebookmarks
1137 self.remotebookmarks = remotebookmarks
1138 # result of changegroup pulling (used as return code by pull)
1138 # result of changegroup pulling (used as return code by pull)
1139 self.cgresult = None
1139 self.cgresult = None
1140 # list of step already done
1140 # list of step already done
1141 self.stepsdone = set()
1141 self.stepsdone = set()
1142 # Whether we attempted a clone from pre-generated bundles.
1142 # Whether we attempted a clone from pre-generated bundles.
1143 self.clonebundleattempted = False
1143 self.clonebundleattempted = False
1144
1144
1145 @util.propertycache
1145 @util.propertycache
1146 def pulledsubset(self):
1146 def pulledsubset(self):
1147 """heads of the set of changeset target by the pull"""
1147 """heads of the set of changeset target by the pull"""
1148 # compute target subset
1148 # compute target subset
1149 if self.heads is None:
1149 if self.heads is None:
1150 # We pulled every thing possible
1150 # We pulled every thing possible
1151 # sync on everything common
1151 # sync on everything common
1152 c = set(self.common)
1152 c = set(self.common)
1153 ret = list(self.common)
1153 ret = list(self.common)
1154 for n in self.rheads:
1154 for n in self.rheads:
1155 if n not in c:
1155 if n not in c:
1156 ret.append(n)
1156 ret.append(n)
1157 return ret
1157 return ret
1158 else:
1158 else:
1159 # We pulled a specific subset
1159 # We pulled a specific subset
1160 # sync on this subset
1160 # sync on this subset
1161 return self.heads
1161 return self.heads
1162
1162
1163 @util.propertycache
1163 @util.propertycache
1164 def canusebundle2(self):
1164 def canusebundle2(self):
1165 return not _forcebundle1(self)
1165 return not _forcebundle1(self)
1166
1166
1167 @util.propertycache
1167 @util.propertycache
1168 def remotebundle2caps(self):
1168 def remotebundle2caps(self):
1169 return bundle2.bundle2caps(self.remote)
1169 return bundle2.bundle2caps(self.remote)
1170
1170
1171 def gettransaction(self):
1171 def gettransaction(self):
1172 # deprecated; talk to trmanager directly
1172 # deprecated; talk to trmanager directly
1173 return self.trmanager.transaction()
1173 return self.trmanager.transaction()
1174
1174
1175 class transactionmanager(object):
1175 class transactionmanager(object):
1176 """An object to manage the life cycle of a transaction
1176 """An object to manage the life cycle of a transaction
1177
1177
1178 It creates the transaction on demand and calls the appropriate hooks when
1178 It creates the transaction on demand and calls the appropriate hooks when
1179 closing the transaction."""
1179 closing the transaction."""
1180 def __init__(self, repo, source, url):
1180 def __init__(self, repo, source, url):
1181 self.repo = repo
1181 self.repo = repo
1182 self.source = source
1182 self.source = source
1183 self.url = url
1183 self.url = url
1184 self._tr = None
1184 self._tr = None
1185
1185
1186 def transaction(self):
1186 def transaction(self):
1187 """Return an open transaction object, constructing if necessary"""
1187 """Return an open transaction object, constructing if necessary"""
1188 if not self._tr:
1188 if not self._tr:
1189 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1189 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1190 self._tr = self.repo.transaction(trname)
1190 self._tr = self.repo.transaction(trname)
1191 self._tr.hookargs['source'] = self.source
1191 self._tr.hookargs['source'] = self.source
1192 self._tr.hookargs['url'] = self.url
1192 self._tr.hookargs['url'] = self.url
1193 return self._tr
1193 return self._tr
1194
1194
1195 def close(self):
1195 def close(self):
1196 """close transaction if created"""
1196 """close transaction if created"""
1197 if self._tr is not None:
1197 if self._tr is not None:
1198 self._tr.close()
1198 self._tr.close()
1199
1199
1200 def release(self):
1200 def release(self):
1201 """release transaction if created"""
1201 """release transaction if created"""
1202 if self._tr is not None:
1202 if self._tr is not None:
1203 self._tr.release()
1203 self._tr.release()
1204
1204
1205 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1205 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1206 streamclonerequested=None):
1206 streamclonerequested=None):
1207 """Fetch repository data from a remote.
1207 """Fetch repository data from a remote.
1208
1208
1209 This is the main function used to retrieve data from a remote repository.
1209 This is the main function used to retrieve data from a remote repository.
1210
1210
1211 ``repo`` is the local repository to clone into.
1211 ``repo`` is the local repository to clone into.
1212 ``remote`` is a peer instance.
1212 ``remote`` is a peer instance.
1213 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1213 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1214 default) means to pull everything from the remote.
1214 default) means to pull everything from the remote.
1215 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1215 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1216 default, all remote bookmarks are pulled.
1216 default, all remote bookmarks are pulled.
1217 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1217 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1218 initialization.
1218 initialization.
1219 ``streamclonerequested`` is a boolean indicating whether a "streaming
1219 ``streamclonerequested`` is a boolean indicating whether a "streaming
1220 clone" is requested. A "streaming clone" is essentially a raw file copy
1220 clone" is requested. A "streaming clone" is essentially a raw file copy
1221 of revlogs from the server. This only works when the local repository is
1221 of revlogs from the server. This only works when the local repository is
1222 empty. The default value of ``None`` means to respect the server
1222 empty. The default value of ``None`` means to respect the server
1223 configuration for preferring stream clones.
1223 configuration for preferring stream clones.
1224
1224
1225 Returns the ``pulloperation`` created for this pull.
1225 Returns the ``pulloperation`` created for this pull.
1226 """
1226 """
1227 if opargs is None:
1227 if opargs is None:
1228 opargs = {}
1228 opargs = {}
1229 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1229 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1230 streamclonerequested=streamclonerequested, **opargs)
1230 streamclonerequested=streamclonerequested, **opargs)
1231 if pullop.remote.local():
1231 if pullop.remote.local():
1232 missing = set(pullop.remote.requirements) - pullop.repo.supported
1232 missing = set(pullop.remote.requirements) - pullop.repo.supported
1233 if missing:
1233 if missing:
1234 msg = _("required features are not"
1234 msg = _("required features are not"
1235 " supported in the destination:"
1235 " supported in the destination:"
1236 " %s") % (', '.join(sorted(missing)))
1236 " %s") % (', '.join(sorted(missing)))
1237 raise error.Abort(msg)
1237 raise error.Abort(msg)
1238
1238
1239 wlock = lock = None
1239 wlock = lock = None
1240 try:
1240 try:
1241 wlock = pullop.repo.wlock()
1241 wlock = pullop.repo.wlock()
1242 lock = pullop.repo.lock()
1242 lock = pullop.repo.lock()
1243 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1243 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1244 streamclone.maybeperformlegacystreamclone(pullop)
1244 streamclone.maybeperformlegacystreamclone(pullop)
1245 # This should ideally be in _pullbundle2(). However, it needs to run
1245 # This should ideally be in _pullbundle2(). However, it needs to run
1246 # before discovery to avoid extra work.
1246 # before discovery to avoid extra work.
1247 _maybeapplyclonebundle(pullop)
1247 _maybeapplyclonebundle(pullop)
1248 _pulldiscovery(pullop)
1248 _pulldiscovery(pullop)
1249 if pullop.canusebundle2:
1249 if pullop.canusebundle2:
1250 _pullbundle2(pullop)
1250 _pullbundle2(pullop)
1251 _pullchangeset(pullop)
1251 _pullchangeset(pullop)
1252 _pullphase(pullop)
1252 _pullphase(pullop)
1253 _pullbookmarks(pullop)
1253 _pullbookmarks(pullop)
1254 _pullobsolete(pullop)
1254 _pullobsolete(pullop)
1255 pullop.trmanager.close()
1255 pullop.trmanager.close()
1256 finally:
1256 finally:
1257 lockmod.release(pullop.trmanager, lock, wlock)
1257 lockmod.release(pullop.trmanager, lock, wlock)
1258
1258
1259 return pullop
1259 return pullop
1260
1260
1261 # list of steps to perform discovery before pull
1261 # list of steps to perform discovery before pull
1262 pulldiscoveryorder = []
1262 pulldiscoveryorder = []
1263
1263
1264 # Mapping between step name and function
1264 # Mapping between step name and function
1265 #
1265 #
1266 # This exists to help extensions wrap steps if necessary
1266 # This exists to help extensions wrap steps if necessary
1267 pulldiscoverymapping = {}
1267 pulldiscoverymapping = {}
1268
1268
1269 def pulldiscovery(stepname):
1269 def pulldiscovery(stepname):
1270 """decorator for function performing discovery before pull
1270 """decorator for function performing discovery before pull
1271
1271
1272 The function is added to the step -> function mapping and appended to the
1272 The function is added to the step -> function mapping and appended to the
1273 list of steps. Beware that decorated function will be added in order (this
1273 list of steps. Beware that decorated function will be added in order (this
1274 may matter).
1274 may matter).
1275
1275
1276 You can only use this decorator for a new step, if you want to wrap a step
1276 You can only use this decorator for a new step, if you want to wrap a step
1277 from an extension, change the pulldiscovery dictionary directly."""
1277 from an extension, change the pulldiscovery dictionary directly."""
1278 def dec(func):
1278 def dec(func):
1279 assert stepname not in pulldiscoverymapping
1279 assert stepname not in pulldiscoverymapping
1280 pulldiscoverymapping[stepname] = func
1280 pulldiscoverymapping[stepname] = func
1281 pulldiscoveryorder.append(stepname)
1281 pulldiscoveryorder.append(stepname)
1282 return func
1282 return func
1283 return dec
1283 return dec
1284
1284
1285 def _pulldiscovery(pullop):
1285 def _pulldiscovery(pullop):
1286 """Run all discovery steps"""
1286 """Run all discovery steps"""
1287 for stepname in pulldiscoveryorder:
1287 for stepname in pulldiscoveryorder:
1288 step = pulldiscoverymapping[stepname]
1288 step = pulldiscoverymapping[stepname]
1289 step(pullop)
1289 step(pullop)
1290
1290
1291 @pulldiscovery('b1:bookmarks')
1291 @pulldiscovery('b1:bookmarks')
1292 def _pullbookmarkbundle1(pullop):
1292 def _pullbookmarkbundle1(pullop):
1293 """fetch bookmark data in bundle1 case
1293 """fetch bookmark data in bundle1 case
1294
1294
1295 If not using bundle2, we have to fetch bookmarks before changeset
1295 If not using bundle2, we have to fetch bookmarks before changeset
1296 discovery to reduce the chance and impact of race conditions."""
1296 discovery to reduce the chance and impact of race conditions."""
1297 if pullop.remotebookmarks is not None:
1297 if pullop.remotebookmarks is not None:
1298 return
1298 return
1299 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1299 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1300 # all known bundle2 servers now support listkeys, but lets be nice with
1300 # all known bundle2 servers now support listkeys, but lets be nice with
1301 # new implementation.
1301 # new implementation.
1302 return
1302 return
1303 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1303 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1304
1304
1305
1305
1306 @pulldiscovery('changegroup')
1306 @pulldiscovery('changegroup')
1307 def _pulldiscoverychangegroup(pullop):
1307 def _pulldiscoverychangegroup(pullop):
1308 """discovery phase for the pull
1308 """discovery phase for the pull
1309
1309
1310 Current handle changeset discovery only, will change handle all discovery
1310 Current handle changeset discovery only, will change handle all discovery
1311 at some point."""
1311 at some point."""
1312 tmp = discovery.findcommonincoming(pullop.repo,
1312 tmp = discovery.findcommonincoming(pullop.repo,
1313 pullop.remote,
1313 pullop.remote,
1314 heads=pullop.heads,
1314 heads=pullop.heads,
1315 force=pullop.force)
1315 force=pullop.force)
1316 common, fetch, rheads = tmp
1316 common, fetch, rheads = tmp
1317 nm = pullop.repo.unfiltered().changelog.nodemap
1317 nm = pullop.repo.unfiltered().changelog.nodemap
1318 if fetch and rheads:
1318 if fetch and rheads:
1319 # If a remote heads in filtered locally, lets drop it from the unknown
1319 # If a remote heads in filtered locally, lets drop it from the unknown
1320 # remote heads and put in back in common.
1320 # remote heads and put in back in common.
1321 #
1321 #
1322 # This is a hackish solution to catch most of "common but locally
1322 # This is a hackish solution to catch most of "common but locally
1323 # hidden situation". We do not performs discovery on unfiltered
1323 # hidden situation". We do not performs discovery on unfiltered
1324 # repository because it end up doing a pathological amount of round
1324 # repository because it end up doing a pathological amount of round
1325 # trip for w huge amount of changeset we do not care about.
1325 # trip for w huge amount of changeset we do not care about.
1326 #
1326 #
1327 # If a set of such "common but filtered" changeset exist on the server
1327 # If a set of such "common but filtered" changeset exist on the server
1328 # but are not including a remote heads, we'll not be able to detect it,
1328 # but are not including a remote heads, we'll not be able to detect it,
1329 scommon = set(common)
1329 scommon = set(common)
1330 filteredrheads = []
1330 filteredrheads = []
1331 for n in rheads:
1331 for n in rheads:
1332 if n in nm:
1332 if n in nm:
1333 if n not in scommon:
1333 if n not in scommon:
1334 common.append(n)
1334 common.append(n)
1335 else:
1335 else:
1336 filteredrheads.append(n)
1336 filteredrheads.append(n)
1337 if not filteredrheads:
1337 if not filteredrheads:
1338 fetch = []
1338 fetch = []
1339 rheads = filteredrheads
1339 rheads = filteredrheads
1340 pullop.common = common
1340 pullop.common = common
1341 pullop.fetch = fetch
1341 pullop.fetch = fetch
1342 pullop.rheads = rheads
1342 pullop.rheads = rheads
1343
1343
1344 def _pullbundle2(pullop):
1344 def _pullbundle2(pullop):
1345 """pull data using bundle2
1345 """pull data using bundle2
1346
1346
1347 For now, the only supported data are changegroup."""
1347 For now, the only supported data are changegroup."""
1348 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1348 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1349
1349
1350 # At the moment we don't do stream clones over bundle2. If that is
1350 # At the moment we don't do stream clones over bundle2. If that is
1351 # implemented then here's where the check for that will go.
1351 # implemented then here's where the check for that will go.
1352 streaming = False
1352 streaming = False
1353
1353
1354 # pulling changegroup
1354 # pulling changegroup
1355 pullop.stepsdone.add('changegroup')
1355 pullop.stepsdone.add('changegroup')
1356
1356
1357 kwargs['common'] = pullop.common
1357 kwargs['common'] = pullop.common
1358 kwargs['heads'] = pullop.heads or pullop.rheads
1358 kwargs['heads'] = pullop.heads or pullop.rheads
1359 kwargs['cg'] = pullop.fetch
1359 kwargs['cg'] = pullop.fetch
1360 if 'listkeys' in pullop.remotebundle2caps:
1360 if 'listkeys' in pullop.remotebundle2caps:
1361 kwargs['listkeys'] = ['phases']
1361 kwargs['listkeys'] = ['phases']
1362 if pullop.remotebookmarks is None:
1362 if pullop.remotebookmarks is None:
1363 # make sure to always includes bookmark data when migrating
1363 # make sure to always includes bookmark data when migrating
1364 # `hg incoming --bundle` to using this function.
1364 # `hg incoming --bundle` to using this function.
1365 kwargs['listkeys'].append('bookmarks')
1365 kwargs['listkeys'].append('bookmarks')
1366
1366
1367 # If this is a full pull / clone and the server supports the clone bundles
1367 # If this is a full pull / clone and the server supports the clone bundles
1368 # feature, tell the server whether we attempted a clone bundle. The
1368 # feature, tell the server whether we attempted a clone bundle. The
1369 # presence of this flag indicates the client supports clone bundles. This
1369 # presence of this flag indicates the client supports clone bundles. This
1370 # will enable the server to treat clients that support clone bundles
1370 # will enable the server to treat clients that support clone bundles
1371 # differently from those that don't.
1371 # differently from those that don't.
1372 if (pullop.remote.capable('clonebundles')
1372 if (pullop.remote.capable('clonebundles')
1373 and pullop.heads is None and list(pullop.common) == [nullid]):
1373 and pullop.heads is None and list(pullop.common) == [nullid]):
1374 kwargs['cbattempted'] = pullop.clonebundleattempted
1374 kwargs['cbattempted'] = pullop.clonebundleattempted
1375
1375
1376 if streaming:
1376 if streaming:
1377 pullop.repo.ui.status(_('streaming all changes\n'))
1377 pullop.repo.ui.status(_('streaming all changes\n'))
1378 elif not pullop.fetch:
1378 elif not pullop.fetch:
1379 pullop.repo.ui.status(_("no changes found\n"))
1379 pullop.repo.ui.status(_("no changes found\n"))
1380 pullop.cgresult = 0
1380 pullop.cgresult = 0
1381 else:
1381 else:
1382 if pullop.heads is None and list(pullop.common) == [nullid]:
1382 if pullop.heads is None and list(pullop.common) == [nullid]:
1383 pullop.repo.ui.status(_("requesting all changes\n"))
1383 pullop.repo.ui.status(_("requesting all changes\n"))
1384 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1384 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1385 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1385 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1386 if obsolete.commonversion(remoteversions) is not None:
1386 if obsolete.commonversion(remoteversions) is not None:
1387 kwargs['obsmarkers'] = True
1387 kwargs['obsmarkers'] = True
1388 pullop.stepsdone.add('obsmarkers')
1388 pullop.stepsdone.add('obsmarkers')
1389 _pullbundle2extraprepare(pullop, kwargs)
1389 _pullbundle2extraprepare(pullop, kwargs)
1390 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1390 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1391 try:
1391 try:
1392 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1392 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1393 except bundle2.AbortFromPart as exc:
1393 except bundle2.AbortFromPart as exc:
1394 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1394 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1395 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1395 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1396 except error.BundleValueError as exc:
1396 except error.BundleValueError as exc:
1397 raise error.Abort(_('missing support for %s') % exc)
1397 raise error.Abort(_('missing support for %s') % exc)
1398
1398
1399 if pullop.fetch:
1399 if pullop.fetch:
1400 results = [cg['return'] for cg in op.records['changegroup']]
1400 results = [cg['return'] for cg in op.records['changegroup']]
1401 pullop.cgresult = changegroup.combineresults(results)
1401 pullop.cgresult = changegroup.combineresults(results)
1402
1402
1403 # processing phases change
1403 # processing phases change
1404 for namespace, value in op.records['listkeys']:
1404 for namespace, value in op.records['listkeys']:
1405 if namespace == 'phases':
1405 if namespace == 'phases':
1406 _pullapplyphases(pullop, value)
1406 _pullapplyphases(pullop, value)
1407
1407
1408 # processing bookmark update
1408 # processing bookmark update
1409 for namespace, value in op.records['listkeys']:
1409 for namespace, value in op.records['listkeys']:
1410 if namespace == 'bookmarks':
1410 if namespace == 'bookmarks':
1411 pullop.remotebookmarks = value
1411 pullop.remotebookmarks = value
1412
1412
1413 # bookmark data were either already there or pulled in the bundle
1413 # bookmark data were either already there or pulled in the bundle
1414 if pullop.remotebookmarks is not None:
1414 if pullop.remotebookmarks is not None:
1415 _pullbookmarks(pullop)
1415 _pullbookmarks(pullop)
1416
1416
1417 def _pullbundle2extraprepare(pullop, kwargs):
1417 def _pullbundle2extraprepare(pullop, kwargs):
1418 """hook function so that extensions can extend the getbundle call"""
1418 """hook function so that extensions can extend the getbundle call"""
1419 pass
1419 pass
1420
1420
1421 def _pullchangeset(pullop):
1421 def _pullchangeset(pullop):
1422 """pull changeset from unbundle into the local repo"""
1422 """pull changeset from unbundle into the local repo"""
1423 # We delay the open of the transaction as late as possible so we
1423 # We delay the open of the transaction as late as possible so we
1424 # don't open transaction for nothing or you break future useful
1424 # don't open transaction for nothing or you break future useful
1425 # rollback call
1425 # rollback call
1426 if 'changegroup' in pullop.stepsdone:
1426 if 'changegroup' in pullop.stepsdone:
1427 return
1427 return
1428 pullop.stepsdone.add('changegroup')
1428 pullop.stepsdone.add('changegroup')
1429 if not pullop.fetch:
1429 if not pullop.fetch:
1430 pullop.repo.ui.status(_("no changes found\n"))
1430 pullop.repo.ui.status(_("no changes found\n"))
1431 pullop.cgresult = 0
1431 pullop.cgresult = 0
1432 return
1432 return
1433 tr = pullop.gettransaction()
1433 tr = pullop.gettransaction()
1434 if pullop.heads is None and list(pullop.common) == [nullid]:
1434 if pullop.heads is None and list(pullop.common) == [nullid]:
1435 pullop.repo.ui.status(_("requesting all changes\n"))
1435 pullop.repo.ui.status(_("requesting all changes\n"))
1436 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1436 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1437 # issue1320, avoid a race if remote changed after discovery
1437 # issue1320, avoid a race if remote changed after discovery
1438 pullop.heads = pullop.rheads
1438 pullop.heads = pullop.rheads
1439
1439
1440 if pullop.remote.capable('getbundle'):
1440 if pullop.remote.capable('getbundle'):
1441 # TODO: get bundlecaps from remote
1441 # TODO: get bundlecaps from remote
1442 cg = pullop.remote.getbundle('pull', common=pullop.common,
1442 cg = pullop.remote.getbundle('pull', common=pullop.common,
1443 heads=pullop.heads or pullop.rheads)
1443 heads=pullop.heads or pullop.rheads)
1444 elif pullop.heads is None:
1444 elif pullop.heads is None:
1445 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1445 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1446 elif not pullop.remote.capable('changegroupsubset'):
1446 elif not pullop.remote.capable('changegroupsubset'):
1447 raise error.Abort(_("partial pull cannot be done because "
1447 raise error.Abort(_("partial pull cannot be done because "
1448 "other repository doesn't support "
1448 "other repository doesn't support "
1449 "changegroupsubset."))
1449 "changegroupsubset."))
1450 else:
1450 else:
1451 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1451 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1452 pullop.cgresult = cg.apply(pullop.repo, tr, 'pull', pullop.remote.url())
1452 pullop.cgresult = cg.apply(pullop.repo, tr, 'pull', pullop.remote.url())
1453
1453
1454 def _pullphase(pullop):
1454 def _pullphase(pullop):
1455 # Get remote phases data from remote
1455 # Get remote phases data from remote
1456 if 'phases' in pullop.stepsdone:
1456 if 'phases' in pullop.stepsdone:
1457 return
1457 return
1458 remotephases = pullop.remote.listkeys('phases')
1458 remotephases = pullop.remote.listkeys('phases')
1459 _pullapplyphases(pullop, remotephases)
1459 _pullapplyphases(pullop, remotephases)
1460
1460
1461 def _pullapplyphases(pullop, remotephases):
1461 def _pullapplyphases(pullop, remotephases):
1462 """apply phase movement from observed remote state"""
1462 """apply phase movement from observed remote state"""
1463 if 'phases' in pullop.stepsdone:
1463 if 'phases' in pullop.stepsdone:
1464 return
1464 return
1465 pullop.stepsdone.add('phases')
1465 pullop.stepsdone.add('phases')
1466 publishing = bool(remotephases.get('publishing', False))
1466 publishing = bool(remotephases.get('publishing', False))
1467 if remotephases and not publishing:
1467 if remotephases and not publishing:
1468 # remote is new and non-publishing
1468 # remote is new and non-publishing
1469 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1469 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1470 pullop.pulledsubset,
1470 pullop.pulledsubset,
1471 remotephases)
1471 remotephases)
1472 dheads = pullop.pulledsubset
1472 dheads = pullop.pulledsubset
1473 else:
1473 else:
1474 # Remote is old or publishing all common changesets
1474 # Remote is old or publishing all common changesets
1475 # should be seen as public
1475 # should be seen as public
1476 pheads = pullop.pulledsubset
1476 pheads = pullop.pulledsubset
1477 dheads = []
1477 dheads = []
1478 unfi = pullop.repo.unfiltered()
1478 unfi = pullop.repo.unfiltered()
1479 phase = unfi._phasecache.phase
1479 phase = unfi._phasecache.phase
1480 rev = unfi.changelog.nodemap.get
1480 rev = unfi.changelog.nodemap.get
1481 public = phases.public
1481 public = phases.public
1482 draft = phases.draft
1482 draft = phases.draft
1483
1483
1484 # exclude changesets already public locally and update the others
1484 # exclude changesets already public locally and update the others
1485 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1485 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1486 if pheads:
1486 if pheads:
1487 tr = pullop.gettransaction()
1487 tr = pullop.gettransaction()
1488 phases.advanceboundary(pullop.repo, tr, public, pheads)
1488 phases.advanceboundary(pullop.repo, tr, public, pheads)
1489
1489
1490 # exclude changesets already draft locally and update the others
1490 # exclude changesets already draft locally and update the others
1491 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1491 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1492 if dheads:
1492 if dheads:
1493 tr = pullop.gettransaction()
1493 tr = pullop.gettransaction()
1494 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1494 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1495
1495
1496 def _pullbookmarks(pullop):
1496 def _pullbookmarks(pullop):
1497 """process the remote bookmark information to update the local one"""
1497 """process the remote bookmark information to update the local one"""
1498 if 'bookmarks' in pullop.stepsdone:
1498 if 'bookmarks' in pullop.stepsdone:
1499 return
1499 return
1500 pullop.stepsdone.add('bookmarks')
1500 pullop.stepsdone.add('bookmarks')
1501 repo = pullop.repo
1501 repo = pullop.repo
1502 remotebookmarks = pullop.remotebookmarks
1502 remotebookmarks = pullop.remotebookmarks
1503 remotebookmarks = bookmod.unhexlifybookmarks(remotebookmarks)
1503 remotebookmarks = bookmod.unhexlifybookmarks(remotebookmarks)
1504 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1504 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1505 pullop.remote.url(),
1505 pullop.remote.url(),
1506 pullop.gettransaction,
1506 pullop.gettransaction,
1507 explicit=pullop.explicitbookmarks)
1507 explicit=pullop.explicitbookmarks)
1508
1508
1509 def _pullobsolete(pullop):
1509 def _pullobsolete(pullop):
1510 """utility function to pull obsolete markers from a remote
1510 """utility function to pull obsolete markers from a remote
1511
1511
1512 The `gettransaction` is function that return the pull transaction, creating
1512 The `gettransaction` is function that return the pull transaction, creating
1513 one if necessary. We return the transaction to inform the calling code that
1513 one if necessary. We return the transaction to inform the calling code that
1514 a new transaction have been created (when applicable).
1514 a new transaction have been created (when applicable).
1515
1515
1516 Exists mostly to allow overriding for experimentation purpose"""
1516 Exists mostly to allow overriding for experimentation purpose"""
1517 if 'obsmarkers' in pullop.stepsdone:
1517 if 'obsmarkers' in pullop.stepsdone:
1518 return
1518 return
1519 pullop.stepsdone.add('obsmarkers')
1519 pullop.stepsdone.add('obsmarkers')
1520 tr = None
1520 tr = None
1521 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1521 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1522 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1522 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1523 remoteobs = pullop.remote.listkeys('obsolete')
1523 remoteobs = pullop.remote.listkeys('obsolete')
1524 if 'dump0' in remoteobs:
1524 if 'dump0' in remoteobs:
1525 tr = pullop.gettransaction()
1525 tr = pullop.gettransaction()
1526 markers = []
1526 markers = []
1527 for key in sorted(remoteobs, reverse=True):
1527 for key in sorted(remoteobs, reverse=True):
1528 if key.startswith('dump'):
1528 if key.startswith('dump'):
1529 data = util.b85decode(remoteobs[key])
1529 data = util.b85decode(remoteobs[key])
1530 version, newmarks = obsolete._readmarkers(data)
1530 version, newmarks = obsolete._readmarkers(data)
1531 markers += newmarks
1531 markers += newmarks
1532 if markers:
1532 if markers:
1533 pullop.repo.obsstore.add(tr, markers)
1533 pullop.repo.obsstore.add(tr, markers)
1534 pullop.repo.invalidatevolatilesets()
1534 pullop.repo.invalidatevolatilesets()
1535 return tr
1535 return tr
1536
1536
1537 def caps20to10(repo):
1537 def caps20to10(repo):
1538 """return a set with appropriate options to use bundle20 during getbundle"""
1538 """return a set with appropriate options to use bundle20 during getbundle"""
1539 caps = {'HG20'}
1539 caps = {'HG20'}
1540 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1540 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1541 caps.add('bundle2=' + urlreq.quote(capsblob))
1541 caps.add('bundle2=' + urlreq.quote(capsblob))
1542 return caps
1542 return caps
1543
1543
1544 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1544 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1545 getbundle2partsorder = []
1545 getbundle2partsorder = []
1546
1546
1547 # Mapping between step name and function
1547 # Mapping between step name and function
1548 #
1548 #
1549 # This exists to help extensions wrap steps if necessary
1549 # This exists to help extensions wrap steps if necessary
1550 getbundle2partsmapping = {}
1550 getbundle2partsmapping = {}
1551
1551
1552 def getbundle2partsgenerator(stepname, idx=None):
1552 def getbundle2partsgenerator(stepname, idx=None):
1553 """decorator for function generating bundle2 part for getbundle
1553 """decorator for function generating bundle2 part for getbundle
1554
1554
1555 The function is added to the step -> function mapping and appended to the
1555 The function is added to the step -> function mapping and appended to the
1556 list of steps. Beware that decorated functions will be added in order
1556 list of steps. Beware that decorated functions will be added in order
1557 (this may matter).
1557 (this may matter).
1558
1558
1559 You can only use this decorator for new steps, if you want to wrap a step
1559 You can only use this decorator for new steps, if you want to wrap a step
1560 from an extension, attack the getbundle2partsmapping dictionary directly."""
1560 from an extension, attack the getbundle2partsmapping dictionary directly."""
1561 def dec(func):
1561 def dec(func):
1562 assert stepname not in getbundle2partsmapping
1562 assert stepname not in getbundle2partsmapping
1563 getbundle2partsmapping[stepname] = func
1563 getbundle2partsmapping[stepname] = func
1564 if idx is None:
1564 if idx is None:
1565 getbundle2partsorder.append(stepname)
1565 getbundle2partsorder.append(stepname)
1566 else:
1566 else:
1567 getbundle2partsorder.insert(idx, stepname)
1567 getbundle2partsorder.insert(idx, stepname)
1568 return func
1568 return func
1569 return dec
1569 return dec
1570
1570
1571 def bundle2requested(bundlecaps):
1571 def bundle2requested(bundlecaps):
1572 if bundlecaps is not None:
1572 if bundlecaps is not None:
1573 return any(cap.startswith('HG2') for cap in bundlecaps)
1573 return any(cap.startswith('HG2') for cap in bundlecaps)
1574 return False
1574 return False
1575
1575
1576 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1576 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1577 **kwargs):
1577 **kwargs):
1578 """Return chunks constituting a bundle's raw data.
1578 """Return chunks constituting a bundle's raw data.
1579
1579
1580 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1580 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1581 passed.
1581 passed.
1582
1582
1583 Returns an iterator over raw chunks (of varying sizes).
1583 Returns an iterator over raw chunks (of varying sizes).
1584 """
1584 """
1585 usebundle2 = bundle2requested(bundlecaps)
1585 usebundle2 = bundle2requested(bundlecaps)
1586 # bundle10 case
1586 # bundle10 case
1587 if not usebundle2:
1587 if not usebundle2:
1588 if bundlecaps and not kwargs.get('cg', True):
1588 if bundlecaps and not kwargs.get('cg', True):
1589 raise ValueError(_('request for bundle10 must include changegroup'))
1589 raise ValueError(_('request for bundle10 must include changegroup'))
1590
1590
1591 if kwargs:
1591 if kwargs:
1592 raise ValueError(_('unsupported getbundle arguments: %s')
1592 raise ValueError(_('unsupported getbundle arguments: %s')
1593 % ', '.join(sorted(kwargs.keys())))
1593 % ', '.join(sorted(kwargs.keys())))
1594 outgoing = _computeoutgoing(repo, heads, common)
1594 outgoing = _computeoutgoing(repo, heads, common)
1595 bundler = changegroup.getbundler('01', repo, bundlecaps)
1595 bundler = changegroup.getbundler('01', repo, bundlecaps)
1596 return changegroup.getsubsetraw(repo, outgoing, bundler, source)
1596 return changegroup.getsubsetraw(repo, outgoing, bundler, source)
1597
1597
1598 # bundle20 case
1598 # bundle20 case
1599 b2caps = {}
1599 b2caps = {}
1600 for bcaps in bundlecaps:
1600 for bcaps in bundlecaps:
1601 if bcaps.startswith('bundle2='):
1601 if bcaps.startswith('bundle2='):
1602 blob = urlreq.unquote(bcaps[len('bundle2='):])
1602 blob = urlreq.unquote(bcaps[len('bundle2='):])
1603 b2caps.update(bundle2.decodecaps(blob))
1603 b2caps.update(bundle2.decodecaps(blob))
1604 bundler = bundle2.bundle20(repo.ui, b2caps)
1604 bundler = bundle2.bundle20(repo.ui, b2caps)
1605
1605
1606 kwargs['heads'] = heads
1606 kwargs['heads'] = heads
1607 kwargs['common'] = common
1607 kwargs['common'] = common
1608
1608
1609 for name in getbundle2partsorder:
1609 for name in getbundle2partsorder:
1610 func = getbundle2partsmapping[name]
1610 func = getbundle2partsmapping[name]
1611 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1611 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1612 **kwargs)
1612 **kwargs)
1613
1613
1614 return bundler.getchunks()
1614 return bundler.getchunks()
1615
1615
1616 @getbundle2partsgenerator('changegroup')
1616 @getbundle2partsgenerator('changegroup')
1617 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1617 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1618 b2caps=None, heads=None, common=None, **kwargs):
1618 b2caps=None, heads=None, common=None, **kwargs):
1619 """add a changegroup part to the requested bundle"""
1619 """add a changegroup part to the requested bundle"""
1620 cg = None
1620 cg = None
1621 if kwargs.get('cg', True):
1621 if kwargs.get('cg', True):
1622 # build changegroup bundle here.
1622 # build changegroup bundle here.
1623 version = '01'
1623 version = '01'
1624 cgversions = b2caps.get('changegroup')
1624 cgversions = b2caps.get('changegroup')
1625 if cgversions: # 3.1 and 3.2 ship with an empty value
1625 if cgversions: # 3.1 and 3.2 ship with an empty value
1626 cgversions = [v for v in cgversions
1626 cgversions = [v for v in cgversions
1627 if v in changegroup.supportedoutgoingversions(repo)]
1627 if v in changegroup.supportedoutgoingversions(repo)]
1628 if not cgversions:
1628 if not cgversions:
1629 raise ValueError(_('no common changegroup version'))
1629 raise ValueError(_('no common changegroup version'))
1630 version = max(cgversions)
1630 version = max(cgversions)
1631 outgoing = _computeoutgoing(repo, heads, common)
1631 outgoing = _computeoutgoing(repo, heads, common)
1632 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1632 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1633 bundlecaps=bundlecaps,
1633 bundlecaps=bundlecaps,
1634 version=version)
1634 version=version)
1635
1635
1636 if cg:
1636 if cg:
1637 part = bundler.newpart('changegroup', data=cg)
1637 part = bundler.newpart('changegroup', data=cg)
1638 if cgversions:
1638 if cgversions:
1639 part.addparam('version', version)
1639 part.addparam('version', version)
1640 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1640 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1641 if 'treemanifest' in repo.requirements:
1641 if 'treemanifest' in repo.requirements:
1642 part.addparam('treemanifest', '1')
1642 part.addparam('treemanifest', '1')
1643
1643
1644 @getbundle2partsgenerator('listkeys')
1644 @getbundle2partsgenerator('listkeys')
1645 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1645 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1646 b2caps=None, **kwargs):
1646 b2caps=None, **kwargs):
1647 """add parts containing listkeys namespaces to the requested bundle"""
1647 """add parts containing listkeys namespaces to the requested bundle"""
1648 listkeys = kwargs.get('listkeys', ())
1648 listkeys = kwargs.get('listkeys', ())
1649 for namespace in listkeys:
1649 for namespace in listkeys:
1650 part = bundler.newpart('listkeys')
1650 part = bundler.newpart('listkeys')
1651 part.addparam('namespace', namespace)
1651 part.addparam('namespace', namespace)
1652 keys = repo.listkeys(namespace).items()
1652 keys = repo.listkeys(namespace).items()
1653 part.data = pushkey.encodekeys(keys)
1653 part.data = pushkey.encodekeys(keys)
1654
1654
1655 @getbundle2partsgenerator('obsmarkers')
1655 @getbundle2partsgenerator('obsmarkers')
1656 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1656 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1657 b2caps=None, heads=None, **kwargs):
1657 b2caps=None, heads=None, **kwargs):
1658 """add an obsolescence markers part to the requested bundle"""
1658 """add an obsolescence markers part to the requested bundle"""
1659 if kwargs.get('obsmarkers', False):
1659 if kwargs.get('obsmarkers', False):
1660 if heads is None:
1660 if heads is None:
1661 heads = repo.heads()
1661 heads = repo.heads()
1662 subset = [c.node() for c in repo.set('::%ln', heads)]
1662 subset = [c.node() for c in repo.set('::%ln', heads)]
1663 markers = repo.obsstore.relevantmarkers(subset)
1663 markers = repo.obsstore.relevantmarkers(subset)
1664 markers = sorted(markers)
1664 markers = sorted(markers)
1665 bundle2.buildobsmarkerspart(bundler, markers)
1665 bundle2.buildobsmarkerspart(bundler, markers)
1666
1666
1667 @getbundle2partsgenerator('hgtagsfnodes')
1667 @getbundle2partsgenerator('hgtagsfnodes')
1668 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1668 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1669 b2caps=None, heads=None, common=None,
1669 b2caps=None, heads=None, common=None,
1670 **kwargs):
1670 **kwargs):
1671 """Transfer the .hgtags filenodes mapping.
1671 """Transfer the .hgtags filenodes mapping.
1672
1672
1673 Only values for heads in this bundle will be transferred.
1673 Only values for heads in this bundle will be transferred.
1674
1674
1675 The part data consists of pairs of 20 byte changeset node and .hgtags
1675 The part data consists of pairs of 20 byte changeset node and .hgtags
1676 filenodes raw values.
1676 filenodes raw values.
1677 """
1677 """
1678 # Don't send unless:
1678 # Don't send unless:
1679 # - changeset are being exchanged,
1679 # - changeset are being exchanged,
1680 # - the client supports it.
1680 # - the client supports it.
1681 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1681 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1682 return
1682 return
1683
1683
1684 outgoing = _computeoutgoing(repo, heads, common)
1684 outgoing = _computeoutgoing(repo, heads, common)
1685 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1685 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1686
1686
1687 def _getbookmarks(repo, **kwargs):
1687 def _getbookmarks(repo, **kwargs):
1688 """Returns bookmark to node mapping.
1688 """Returns bookmark to node mapping.
1689
1689
1690 This function is primarily used to generate `bookmarks` bundle2 part.
1690 This function is primarily used to generate `bookmarks` bundle2 part.
1691 It is a separate function in order to make it easy to wrap it
1691 It is a separate function in order to make it easy to wrap it
1692 in extensions. Passing `kwargs` to the function makes it easy to
1692 in extensions. Passing `kwargs` to the function makes it easy to
1693 add new parameters in extensions.
1693 add new parameters in extensions.
1694 """
1694 """
1695
1695
1696 return dict(bookmod.listbinbookmarks(repo))
1696 return dict(bookmod.listbinbookmarks(repo))
1697
1697
1698 def check_heads(repo, their_heads, context):
1698 def check_heads(repo, their_heads, context):
1699 """check if the heads of a repo have been modified
1699 """check if the heads of a repo have been modified
1700
1700
1701 Used by peer for unbundling.
1701 Used by peer for unbundling.
1702 """
1702 """
1703 heads = repo.heads()
1703 heads = repo.heads()
1704 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1704 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1705 if not (their_heads == ['force'] or their_heads == heads or
1705 if not (their_heads == ['force'] or their_heads == heads or
1706 their_heads == ['hashed', heads_hash]):
1706 their_heads == ['hashed', heads_hash]):
1707 # someone else committed/pushed/unbundled while we
1707 # someone else committed/pushed/unbundled while we
1708 # were transferring data
1708 # were transferring data
1709 raise error.PushRaced('repository changed while %s - '
1709 raise error.PushRaced('repository changed while %s - '
1710 'please try again' % context)
1710 'please try again' % context)
1711
1711
1712 def unbundle(repo, cg, heads, source, url):
1712 def unbundle(repo, cg, heads, source, url):
1713 """Apply a bundle to a repo.
1713 """Apply a bundle to a repo.
1714
1714
1715 this function makes sure the repo is locked during the application and have
1715 this function makes sure the repo is locked during the application and have
1716 mechanism to check that no push race occurred between the creation of the
1716 mechanism to check that no push race occurred between the creation of the
1717 bundle and its application.
1717 bundle and its application.
1718
1718
1719 If the push was raced as PushRaced exception is raised."""
1719 If the push was raced as PushRaced exception is raised."""
1720 r = 0
1720 r = 0
1721 # need a transaction when processing a bundle2 stream
1721 # need a transaction when processing a bundle2 stream
1722 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1722 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1723 lockandtr = [None, None, None]
1723 lockandtr = [None, None, None]
1724 recordout = None
1724 recordout = None
1725 # quick fix for output mismatch with bundle2 in 3.4
1725 # quick fix for output mismatch with bundle2 in 3.4
1726 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1726 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1727 False)
1727 False)
1728 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1728 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1729 captureoutput = True
1729 captureoutput = True
1730 try:
1730 try:
1731 # note: outside bundle1, 'heads' is expected to be empty and this
1731 # note: outside bundle1, 'heads' is expected to be empty and this
1732 # 'check_heads' call wil be a no-op
1732 # 'check_heads' call wil be a no-op
1733 check_heads(repo, heads, 'uploading changes')
1733 check_heads(repo, heads, 'uploading changes')
1734 # push can proceed
1734 # push can proceed
1735 if not isinstance(cg, bundle2.unbundle20):
1735 if not isinstance(cg, bundle2.unbundle20):
1736 # legacy case: bundle1 (changegroup 01)
1736 # legacy case: bundle1 (changegroup 01)
1737 txnname = "\n".join([source, util.hidepassword(url)])
1737 txnname = "\n".join([source, util.hidepassword(url)])
1738 with repo.lock(), repo.transaction(txnname) as tr:
1738 with repo.lock(), repo.transaction(txnname) as tr:
1739 r = cg.apply(repo, tr, source, url)
1739 r = cg.apply(repo, tr, source, url)
1740 else:
1740 else:
1741 r = None
1741 r = None
1742 try:
1742 try:
1743 def gettransaction():
1743 def gettransaction():
1744 if not lockandtr[2]:
1744 if not lockandtr[2]:
1745 lockandtr[0] = repo.wlock()
1745 lockandtr[0] = repo.wlock()
1746 lockandtr[1] = repo.lock()
1746 lockandtr[1] = repo.lock()
1747 lockandtr[2] = repo.transaction(source)
1747 lockandtr[2] = repo.transaction(source)
1748 lockandtr[2].hookargs['source'] = source
1748 lockandtr[2].hookargs['source'] = source
1749 lockandtr[2].hookargs['url'] = url
1749 lockandtr[2].hookargs['url'] = url
1750 lockandtr[2].hookargs['bundle2'] = '1'
1750 lockandtr[2].hookargs['bundle2'] = '1'
1751 return lockandtr[2]
1751 return lockandtr[2]
1752
1752
1753 # Do greedy locking by default until we're satisfied with lazy
1753 # Do greedy locking by default until we're satisfied with lazy
1754 # locking.
1754 # locking.
1755 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1755 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1756 gettransaction()
1756 gettransaction()
1757
1757
1758 op = bundle2.bundleoperation(repo, gettransaction,
1758 op = bundle2.bundleoperation(repo, gettransaction,
1759 captureoutput=captureoutput)
1759 captureoutput=captureoutput)
1760 try:
1760 try:
1761 op = bundle2.processbundle(repo, cg, op=op)
1761 op = bundle2.processbundle(repo, cg, op=op)
1762 finally:
1762 finally:
1763 r = op.reply
1763 r = op.reply
1764 if captureoutput and r is not None:
1764 if captureoutput and r is not None:
1765 repo.ui.pushbuffer(error=True, subproc=True)
1765 repo.ui.pushbuffer(error=True, subproc=True)
1766 def recordout(output):
1766 def recordout(output):
1767 r.newpart('output', data=output, mandatory=False)
1767 r.newpart('output', data=output, mandatory=False)
1768 if lockandtr[2] is not None:
1768 if lockandtr[2] is not None:
1769 lockandtr[2].close()
1769 lockandtr[2].close()
1770 except BaseException as exc:
1770 except BaseException as exc:
1771 exc.duringunbundle2 = True
1771 exc.duringunbundle2 = True
1772 if captureoutput and r is not None:
1772 if captureoutput and r is not None:
1773 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1773 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1774 def recordout(output):
1774 def recordout(output):
1775 part = bundle2.bundlepart('output', data=output,
1775 part = bundle2.bundlepart('output', data=output,
1776 mandatory=False)
1776 mandatory=False)
1777 parts.append(part)
1777 parts.append(part)
1778 raise
1778 raise
1779 finally:
1779 finally:
1780 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1780 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1781 if recordout is not None:
1781 if recordout is not None:
1782 recordout(repo.ui.popbuffer())
1782 recordout(repo.ui.popbuffer())
1783 return r
1783 return r
1784
1784
1785 def _maybeapplyclonebundle(pullop):
1785 def _maybeapplyclonebundle(pullop):
1786 """Apply a clone bundle from a remote, if possible."""
1786 """Apply a clone bundle from a remote, if possible."""
1787
1787
1788 repo = pullop.repo
1788 repo = pullop.repo
1789 remote = pullop.remote
1789 remote = pullop.remote
1790
1790
1791 if not repo.ui.configbool('ui', 'clonebundles', True):
1791 if not repo.ui.configbool('ui', 'clonebundles', True):
1792 return
1792 return
1793
1793
1794 # Only run if local repo is empty.
1794 # Only run if local repo is empty.
1795 if len(repo):
1795 if len(repo):
1796 return
1796 return
1797
1797
1798 if pullop.heads:
1798 if pullop.heads:
1799 return
1799 return
1800
1800
1801 if not remote.capable('clonebundles'):
1801 if not remote.capable('clonebundles'):
1802 return
1802 return
1803
1803
1804 res = remote._call('clonebundles')
1804 res = remote._call('clonebundles')
1805
1805
1806 # If we call the wire protocol command, that's good enough to record the
1806 # If we call the wire protocol command, that's good enough to record the
1807 # attempt.
1807 # attempt.
1808 pullop.clonebundleattempted = True
1808 pullop.clonebundleattempted = True
1809
1809
1810 entries = parseclonebundlesmanifest(repo, res)
1810 entries = parseclonebundlesmanifest(repo, res)
1811 if not entries:
1811 if not entries:
1812 repo.ui.note(_('no clone bundles available on remote; '
1812 repo.ui.note(_('no clone bundles available on remote; '
1813 'falling back to regular clone\n'))
1813 'falling back to regular clone\n'))
1814 return
1814 return
1815
1815
1816 entries = filterclonebundleentries(repo, entries)
1816 entries = filterclonebundleentries(repo, entries)
1817 if not entries:
1817 if not entries:
1818 # There is a thundering herd concern here. However, if a server
1818 # There is a thundering herd concern here. However, if a server
1819 # operator doesn't advertise bundles appropriate for its clients,
1819 # operator doesn't advertise bundles appropriate for its clients,
1820 # they deserve what's coming. Furthermore, from a client's
1820 # they deserve what's coming. Furthermore, from a client's
1821 # perspective, no automatic fallback would mean not being able to
1821 # perspective, no automatic fallback would mean not being able to
1822 # clone!
1822 # clone!
1823 repo.ui.warn(_('no compatible clone bundles available on server; '
1823 repo.ui.warn(_('no compatible clone bundles available on server; '
1824 'falling back to regular clone\n'))
1824 'falling back to regular clone\n'))
1825 repo.ui.warn(_('(you may want to report this to the server '
1825 repo.ui.warn(_('(you may want to report this to the server '
1826 'operator)\n'))
1826 'operator)\n'))
1827 return
1827 return
1828
1828
1829 entries = sortclonebundleentries(repo.ui, entries)
1829 entries = sortclonebundleentries(repo.ui, entries)
1830
1830
1831 url = entries[0]['URL']
1831 url = entries[0]['URL']
1832 repo.ui.status(_('applying clone bundle from %s\n') % url)
1832 repo.ui.status(_('applying clone bundle from %s\n') % url)
1833 if trypullbundlefromurl(repo.ui, repo, url):
1833 if trypullbundlefromurl(repo.ui, repo, url):
1834 repo.ui.status(_('finished applying clone bundle\n'))
1834 repo.ui.status(_('finished applying clone bundle\n'))
1835 # Bundle failed.
1835 # Bundle failed.
1836 #
1836 #
1837 # We abort by default to avoid the thundering herd of
1837 # We abort by default to avoid the thundering herd of
1838 # clients flooding a server that was expecting expensive
1838 # clients flooding a server that was expecting expensive
1839 # clone load to be offloaded.
1839 # clone load to be offloaded.
1840 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1840 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1841 repo.ui.warn(_('falling back to normal clone\n'))
1841 repo.ui.warn(_('falling back to normal clone\n'))
1842 else:
1842 else:
1843 raise error.Abort(_('error applying bundle'),
1843 raise error.Abort(_('error applying bundle'),
1844 hint=_('if this error persists, consider contacting '
1844 hint=_('if this error persists, consider contacting '
1845 'the server operator or disable clone '
1845 'the server operator or disable clone '
1846 'bundles via '
1846 'bundles via '
1847 '"--config ui.clonebundles=false"'))
1847 '"--config ui.clonebundles=false"'))
1848
1848
1849 def parseclonebundlesmanifest(repo, s):
1849 def parseclonebundlesmanifest(repo, s):
1850 """Parses the raw text of a clone bundles manifest.
1850 """Parses the raw text of a clone bundles manifest.
1851
1851
1852 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1852 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1853 to the URL and other keys are the attributes for the entry.
1853 to the URL and other keys are the attributes for the entry.
1854 """
1854 """
1855 m = []
1855 m = []
1856 for line in s.splitlines():
1856 for line in s.splitlines():
1857 fields = line.split()
1857 fields = line.split()
1858 if not fields:
1858 if not fields:
1859 continue
1859 continue
1860 attrs = {'URL': fields[0]}
1860 attrs = {'URL': fields[0]}
1861 for rawattr in fields[1:]:
1861 for rawattr in fields[1:]:
1862 key, value = rawattr.split('=', 1)
1862 key, value = rawattr.split('=', 1)
1863 key = urlreq.unquote(key)
1863 key = urlreq.unquote(key)
1864 value = urlreq.unquote(value)
1864 value = urlreq.unquote(value)
1865 attrs[key] = value
1865 attrs[key] = value
1866
1866
1867 # Parse BUNDLESPEC into components. This makes client-side
1867 # Parse BUNDLESPEC into components. This makes client-side
1868 # preferences easier to specify since you can prefer a single
1868 # preferences easier to specify since you can prefer a single
1869 # component of the BUNDLESPEC.
1869 # component of the BUNDLESPEC.
1870 if key == 'BUNDLESPEC':
1870 if key == 'BUNDLESPEC':
1871 try:
1871 try:
1872 comp, version, params = parsebundlespec(repo, value,
1872 comp, version, params = parsebundlespec(repo, value,
1873 externalnames=True)
1873 externalnames=True)
1874 attrs['COMPRESSION'] = comp
1874 attrs['COMPRESSION'] = comp
1875 attrs['VERSION'] = version
1875 attrs['VERSION'] = version
1876 except error.InvalidBundleSpecification:
1876 except error.InvalidBundleSpecification:
1877 pass
1877 pass
1878 except error.UnsupportedBundleSpecification:
1878 except error.UnsupportedBundleSpecification:
1879 pass
1879 pass
1880
1880
1881 m.append(attrs)
1881 m.append(attrs)
1882
1882
1883 return m
1883 return m
1884
1884
1885 def filterclonebundleentries(repo, entries):
1885 def filterclonebundleentries(repo, entries):
1886 """Remove incompatible clone bundle manifest entries.
1886 """Remove incompatible clone bundle manifest entries.
1887
1887
1888 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1888 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1889 and returns a new list consisting of only the entries that this client
1889 and returns a new list consisting of only the entries that this client
1890 should be able to apply.
1890 should be able to apply.
1891
1891
1892 There is no guarantee we'll be able to apply all returned entries because
1892 There is no guarantee we'll be able to apply all returned entries because
1893 the metadata we use to filter on may be missing or wrong.
1893 the metadata we use to filter on may be missing or wrong.
1894 """
1894 """
1895 newentries = []
1895 newentries = []
1896 for entry in entries:
1896 for entry in entries:
1897 spec = entry.get('BUNDLESPEC')
1897 spec = entry.get('BUNDLESPEC')
1898 if spec:
1898 if spec:
1899 try:
1899 try:
1900 parsebundlespec(repo, spec, strict=True)
1900 parsebundlespec(repo, spec, strict=True)
1901 except error.InvalidBundleSpecification as e:
1901 except error.InvalidBundleSpecification as e:
1902 repo.ui.debug(str(e) + '\n')
1902 repo.ui.debug(str(e) + '\n')
1903 continue
1903 continue
1904 except error.UnsupportedBundleSpecification as e:
1904 except error.UnsupportedBundleSpecification as e:
1905 repo.ui.debug('filtering %s because unsupported bundle '
1905 repo.ui.debug('filtering %s because unsupported bundle '
1906 'spec: %s\n' % (entry['URL'], str(e)))
1906 'spec: %s\n' % (entry['URL'], str(e)))
1907 continue
1907 continue
1908
1908
1909 if 'REQUIRESNI' in entry and not sslutil.hassni:
1909 if 'REQUIRESNI' in entry and not sslutil.hassni:
1910 repo.ui.debug('filtering %s because SNI not supported\n' %
1910 repo.ui.debug('filtering %s because SNI not supported\n' %
1911 entry['URL'])
1911 entry['URL'])
1912 continue
1912 continue
1913
1913
1914 newentries.append(entry)
1914 newentries.append(entry)
1915
1915
1916 return newentries
1916 return newentries
1917
1917
1918 class clonebundleentry(object):
1918 class clonebundleentry(object):
1919 """Represents an item in a clone bundles manifest.
1919 """Represents an item in a clone bundles manifest.
1920
1920
1921 This rich class is needed to support sorting since sorted() in Python 3
1921 This rich class is needed to support sorting since sorted() in Python 3
1922 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
1922 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
1923 won't work.
1923 won't work.
1924 """
1924 """
1925
1925
1926 def __init__(self, value, prefers):
1926 def __init__(self, value, prefers):
1927 self.value = value
1927 self.value = value
1928 self.prefers = prefers
1928 self.prefers = prefers
1929
1929
1930 def _cmp(self, other):
1930 def _cmp(self, other):
1931 for prefkey, prefvalue in self.prefers:
1931 for prefkey, prefvalue in self.prefers:
1932 avalue = self.value.get(prefkey)
1932 avalue = self.value.get(prefkey)
1933 bvalue = other.value.get(prefkey)
1933 bvalue = other.value.get(prefkey)
1934
1934
1935 # Special case for b missing attribute and a matches exactly.
1935 # Special case for b missing attribute and a matches exactly.
1936 if avalue is not None and bvalue is None and avalue == prefvalue:
1936 if avalue is not None and bvalue is None and avalue == prefvalue:
1937 return -1
1937 return -1
1938
1938
1939 # Special case for a missing attribute and b matches exactly.
1939 # Special case for a missing attribute and b matches exactly.
1940 if bvalue is not None and avalue is None and bvalue == prefvalue:
1940 if bvalue is not None and avalue is None and bvalue == prefvalue:
1941 return 1
1941 return 1
1942
1942
1943 # We can't compare unless attribute present on both.
1943 # We can't compare unless attribute present on both.
1944 if avalue is None or bvalue is None:
1944 if avalue is None or bvalue is None:
1945 continue
1945 continue
1946
1946
1947 # Same values should fall back to next attribute.
1947 # Same values should fall back to next attribute.
1948 if avalue == bvalue:
1948 if avalue == bvalue:
1949 continue
1949 continue
1950
1950
1951 # Exact matches come first.
1951 # Exact matches come first.
1952 if avalue == prefvalue:
1952 if avalue == prefvalue:
1953 return -1
1953 return -1
1954 if bvalue == prefvalue:
1954 if bvalue == prefvalue:
1955 return 1
1955 return 1
1956
1956
1957 # Fall back to next attribute.
1957 # Fall back to next attribute.
1958 continue
1958 continue
1959
1959
1960 # If we got here we couldn't sort by attributes and prefers. Fall
1960 # If we got here we couldn't sort by attributes and prefers. Fall
1961 # back to index order.
1961 # back to index order.
1962 return 0
1962 return 0
1963
1963
1964 def __lt__(self, other):
1964 def __lt__(self, other):
1965 return self._cmp(other) < 0
1965 return self._cmp(other) < 0
1966
1966
1967 def __gt__(self, other):
1967 def __gt__(self, other):
1968 return self._cmp(other) > 0
1968 return self._cmp(other) > 0
1969
1969
1970 def __eq__(self, other):
1970 def __eq__(self, other):
1971 return self._cmp(other) == 0
1971 return self._cmp(other) == 0
1972
1972
1973 def __le__(self, other):
1973 def __le__(self, other):
1974 return self._cmp(other) <= 0
1974 return self._cmp(other) <= 0
1975
1975
1976 def __ge__(self, other):
1976 def __ge__(self, other):
1977 return self._cmp(other) >= 0
1977 return self._cmp(other) >= 0
1978
1978
1979 def __ne__(self, other):
1979 def __ne__(self, other):
1980 return self._cmp(other) != 0
1980 return self._cmp(other) != 0
1981
1981
1982 def sortclonebundleentries(ui, entries):
1982 def sortclonebundleentries(ui, entries):
1983 prefers = ui.configlist('ui', 'clonebundleprefers', default=[])
1983 prefers = ui.configlist('ui', 'clonebundleprefers')
1984 if not prefers:
1984 if not prefers:
1985 return list(entries)
1985 return list(entries)
1986
1986
1987 prefers = [p.split('=', 1) for p in prefers]
1987 prefers = [p.split('=', 1) for p in prefers]
1988
1988
1989 items = sorted(clonebundleentry(v, prefers) for v in entries)
1989 items = sorted(clonebundleentry(v, prefers) for v in entries)
1990 return [i.value for i in items]
1990 return [i.value for i in items]
1991
1991
1992 def trypullbundlefromurl(ui, repo, url):
1992 def trypullbundlefromurl(ui, repo, url):
1993 """Attempt to apply a bundle from a URL."""
1993 """Attempt to apply a bundle from a URL."""
1994 with repo.lock(), repo.transaction('bundleurl') as tr:
1994 with repo.lock(), repo.transaction('bundleurl') as tr:
1995 try:
1995 try:
1996 fh = urlmod.open(ui, url)
1996 fh = urlmod.open(ui, url)
1997 cg = readbundle(ui, fh, 'stream')
1997 cg = readbundle(ui, fh, 'stream')
1998
1998
1999 if isinstance(cg, bundle2.unbundle20):
1999 if isinstance(cg, bundle2.unbundle20):
2000 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2000 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2001 elif isinstance(cg, streamclone.streamcloneapplier):
2001 elif isinstance(cg, streamclone.streamcloneapplier):
2002 cg.apply(repo)
2002 cg.apply(repo)
2003 else:
2003 else:
2004 cg.apply(repo, tr, 'clonebundles', url)
2004 cg.apply(repo, tr, 'clonebundles', url)
2005 return True
2005 return True
2006 except urlerr.httperror as e:
2006 except urlerr.httperror as e:
2007 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2007 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2008 except urlerr.urlerror as e:
2008 except urlerr.urlerror as e:
2009 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2009 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2010
2010
2011 return False
2011 return False
General Comments 0
You need to be logged in to leave comments. Login now