##// END OF EJS Templates
exchange: add `_getbookmarks()` function...
Stanislau Hlebik -
r30483:8491845a default
parent child Browse files
Show More
@@ -1,1951 +1,1962 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 hex,
15 hex,
16 nullid,
16 nullid,
17 )
17 )
18 from . import (
18 from . import (
19 base85,
19 base85,
20 bookmarks as bookmod,
20 bookmarks as bookmod,
21 bundle2,
21 bundle2,
22 changegroup,
22 changegroup,
23 discovery,
23 discovery,
24 error,
24 error,
25 lock as lockmod,
25 lock as lockmod,
26 obsolete,
26 obsolete,
27 phases,
27 phases,
28 pushkey,
28 pushkey,
29 scmutil,
29 scmutil,
30 sslutil,
30 sslutil,
31 streamclone,
31 streamclone,
32 tags,
32 tags,
33 url as urlmod,
33 url as urlmod,
34 util,
34 util,
35 )
35 )
36
36
37 urlerr = util.urlerr
37 urlerr = util.urlerr
38 urlreq = util.urlreq
38 urlreq = util.urlreq
39
39
40 # Maps bundle version human names to changegroup versions.
40 # Maps bundle version human names to changegroup versions.
41 _bundlespeccgversions = {'v1': '01',
41 _bundlespeccgversions = {'v1': '01',
42 'v2': '02',
42 'v2': '02',
43 'packed1': 's1',
43 'packed1': 's1',
44 'bundle2': '02', #legacy
44 'bundle2': '02', #legacy
45 }
45 }
46
46
47 def parsebundlespec(repo, spec, strict=True, externalnames=False):
47 def parsebundlespec(repo, spec, strict=True, externalnames=False):
48 """Parse a bundle string specification into parts.
48 """Parse a bundle string specification into parts.
49
49
50 Bundle specifications denote a well-defined bundle/exchange format.
50 Bundle specifications denote a well-defined bundle/exchange format.
51 The content of a given specification should not change over time in
51 The content of a given specification should not change over time in
52 order to ensure that bundles produced by a newer version of Mercurial are
52 order to ensure that bundles produced by a newer version of Mercurial are
53 readable from an older version.
53 readable from an older version.
54
54
55 The string currently has the form:
55 The string currently has the form:
56
56
57 <compression>-<type>[;<parameter0>[;<parameter1>]]
57 <compression>-<type>[;<parameter0>[;<parameter1>]]
58
58
59 Where <compression> is one of the supported compression formats
59 Where <compression> is one of the supported compression formats
60 and <type> is (currently) a version string. A ";" can follow the type and
60 and <type> is (currently) a version string. A ";" can follow the type and
61 all text afterwards is interpreted as URI encoded, ";" delimited key=value
61 all text afterwards is interpreted as URI encoded, ";" delimited key=value
62 pairs.
62 pairs.
63
63
64 If ``strict`` is True (the default) <compression> is required. Otherwise,
64 If ``strict`` is True (the default) <compression> is required. Otherwise,
65 it is optional.
65 it is optional.
66
66
67 If ``externalnames`` is False (the default), the human-centric names will
67 If ``externalnames`` is False (the default), the human-centric names will
68 be converted to their internal representation.
68 be converted to their internal representation.
69
69
70 Returns a 3-tuple of (compression, version, parameters). Compression will
70 Returns a 3-tuple of (compression, version, parameters). Compression will
71 be ``None`` if not in strict mode and a compression isn't defined.
71 be ``None`` if not in strict mode and a compression isn't defined.
72
72
73 An ``InvalidBundleSpecification`` is raised when the specification is
73 An ``InvalidBundleSpecification`` is raised when the specification is
74 not syntactically well formed.
74 not syntactically well formed.
75
75
76 An ``UnsupportedBundleSpecification`` is raised when the compression or
76 An ``UnsupportedBundleSpecification`` is raised when the compression or
77 bundle type/version is not recognized.
77 bundle type/version is not recognized.
78
78
79 Note: this function will likely eventually return a more complex data
79 Note: this function will likely eventually return a more complex data
80 structure, including bundle2 part information.
80 structure, including bundle2 part information.
81 """
81 """
82 def parseparams(s):
82 def parseparams(s):
83 if ';' not in s:
83 if ';' not in s:
84 return s, {}
84 return s, {}
85
85
86 params = {}
86 params = {}
87 version, paramstr = s.split(';', 1)
87 version, paramstr = s.split(';', 1)
88
88
89 for p in paramstr.split(';'):
89 for p in paramstr.split(';'):
90 if '=' not in p:
90 if '=' not in p:
91 raise error.InvalidBundleSpecification(
91 raise error.InvalidBundleSpecification(
92 _('invalid bundle specification: '
92 _('invalid bundle specification: '
93 'missing "=" in parameter: %s') % p)
93 'missing "=" in parameter: %s') % p)
94
94
95 key, value = p.split('=', 1)
95 key, value = p.split('=', 1)
96 key = urlreq.unquote(key)
96 key = urlreq.unquote(key)
97 value = urlreq.unquote(value)
97 value = urlreq.unquote(value)
98 params[key] = value
98 params[key] = value
99
99
100 return version, params
100 return version, params
101
101
102
102
103 if strict and '-' not in spec:
103 if strict and '-' not in spec:
104 raise error.InvalidBundleSpecification(
104 raise error.InvalidBundleSpecification(
105 _('invalid bundle specification; '
105 _('invalid bundle specification; '
106 'must be prefixed with compression: %s') % spec)
106 'must be prefixed with compression: %s') % spec)
107
107
108 if '-' in spec:
108 if '-' in spec:
109 compression, version = spec.split('-', 1)
109 compression, version = spec.split('-', 1)
110
110
111 if compression not in util.compengines.supportedbundlenames:
111 if compression not in util.compengines.supportedbundlenames:
112 raise error.UnsupportedBundleSpecification(
112 raise error.UnsupportedBundleSpecification(
113 _('%s compression is not supported') % compression)
113 _('%s compression is not supported') % compression)
114
114
115 version, params = parseparams(version)
115 version, params = parseparams(version)
116
116
117 if version not in _bundlespeccgversions:
117 if version not in _bundlespeccgversions:
118 raise error.UnsupportedBundleSpecification(
118 raise error.UnsupportedBundleSpecification(
119 _('%s is not a recognized bundle version') % version)
119 _('%s is not a recognized bundle version') % version)
120 else:
120 else:
121 # Value could be just the compression or just the version, in which
121 # Value could be just the compression or just the version, in which
122 # case some defaults are assumed (but only when not in strict mode).
122 # case some defaults are assumed (but only when not in strict mode).
123 assert not strict
123 assert not strict
124
124
125 spec, params = parseparams(spec)
125 spec, params = parseparams(spec)
126
126
127 if spec in util.compengines.supportedbundlenames:
127 if spec in util.compengines.supportedbundlenames:
128 compression = spec
128 compression = spec
129 version = 'v1'
129 version = 'v1'
130 if 'generaldelta' in repo.requirements:
130 if 'generaldelta' in repo.requirements:
131 version = 'v2'
131 version = 'v2'
132 elif spec in _bundlespeccgversions:
132 elif spec in _bundlespeccgversions:
133 if spec == 'packed1':
133 if spec == 'packed1':
134 compression = 'none'
134 compression = 'none'
135 else:
135 else:
136 compression = 'bzip2'
136 compression = 'bzip2'
137 version = spec
137 version = spec
138 else:
138 else:
139 raise error.UnsupportedBundleSpecification(
139 raise error.UnsupportedBundleSpecification(
140 _('%s is not a recognized bundle specification') % spec)
140 _('%s is not a recognized bundle specification') % spec)
141
141
142 # The specification for packed1 can optionally declare the data formats
142 # The specification for packed1 can optionally declare the data formats
143 # required to apply it. If we see this metadata, compare against what the
143 # required to apply it. If we see this metadata, compare against what the
144 # repo supports and error if the bundle isn't compatible.
144 # repo supports and error if the bundle isn't compatible.
145 if version == 'packed1' and 'requirements' in params:
145 if version == 'packed1' and 'requirements' in params:
146 requirements = set(params['requirements'].split(','))
146 requirements = set(params['requirements'].split(','))
147 missingreqs = requirements - repo.supportedformats
147 missingreqs = requirements - repo.supportedformats
148 if missingreqs:
148 if missingreqs:
149 raise error.UnsupportedBundleSpecification(
149 raise error.UnsupportedBundleSpecification(
150 _('missing support for repository features: %s') %
150 _('missing support for repository features: %s') %
151 ', '.join(sorted(missingreqs)))
151 ', '.join(sorted(missingreqs)))
152
152
153 if not externalnames:
153 if not externalnames:
154 engine = util.compengines.forbundlename(compression)
154 engine = util.compengines.forbundlename(compression)
155 compression = engine.bundletype()[1]
155 compression = engine.bundletype()[1]
156 version = _bundlespeccgversions[version]
156 version = _bundlespeccgversions[version]
157 return compression, version, params
157 return compression, version, params
158
158
159 def readbundle(ui, fh, fname, vfs=None):
159 def readbundle(ui, fh, fname, vfs=None):
160 header = changegroup.readexactly(fh, 4)
160 header = changegroup.readexactly(fh, 4)
161
161
162 alg = None
162 alg = None
163 if not fname:
163 if not fname:
164 fname = "stream"
164 fname = "stream"
165 if not header.startswith('HG') and header.startswith('\0'):
165 if not header.startswith('HG') and header.startswith('\0'):
166 fh = changegroup.headerlessfixup(fh, header)
166 fh = changegroup.headerlessfixup(fh, header)
167 header = "HG10"
167 header = "HG10"
168 alg = 'UN'
168 alg = 'UN'
169 elif vfs:
169 elif vfs:
170 fname = vfs.join(fname)
170 fname = vfs.join(fname)
171
171
172 magic, version = header[0:2], header[2:4]
172 magic, version = header[0:2], header[2:4]
173
173
174 if magic != 'HG':
174 if magic != 'HG':
175 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
175 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
176 if version == '10':
176 if version == '10':
177 if alg is None:
177 if alg is None:
178 alg = changegroup.readexactly(fh, 2)
178 alg = changegroup.readexactly(fh, 2)
179 return changegroup.cg1unpacker(fh, alg)
179 return changegroup.cg1unpacker(fh, alg)
180 elif version.startswith('2'):
180 elif version.startswith('2'):
181 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
181 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
182 elif version == 'S1':
182 elif version == 'S1':
183 return streamclone.streamcloneapplier(fh)
183 return streamclone.streamcloneapplier(fh)
184 else:
184 else:
185 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
185 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
186
186
187 def getbundlespec(ui, fh):
187 def getbundlespec(ui, fh):
188 """Infer the bundlespec from a bundle file handle.
188 """Infer the bundlespec from a bundle file handle.
189
189
190 The input file handle is seeked and the original seek position is not
190 The input file handle is seeked and the original seek position is not
191 restored.
191 restored.
192 """
192 """
193 def speccompression(alg):
193 def speccompression(alg):
194 try:
194 try:
195 return util.compengines.forbundletype(alg).bundletype()[0]
195 return util.compengines.forbundletype(alg).bundletype()[0]
196 except KeyError:
196 except KeyError:
197 return None
197 return None
198
198
199 b = readbundle(ui, fh, None)
199 b = readbundle(ui, fh, None)
200 if isinstance(b, changegroup.cg1unpacker):
200 if isinstance(b, changegroup.cg1unpacker):
201 alg = b._type
201 alg = b._type
202 if alg == '_truncatedBZ':
202 if alg == '_truncatedBZ':
203 alg = 'BZ'
203 alg = 'BZ'
204 comp = speccompression(alg)
204 comp = speccompression(alg)
205 if not comp:
205 if not comp:
206 raise error.Abort(_('unknown compression algorithm: %s') % alg)
206 raise error.Abort(_('unknown compression algorithm: %s') % alg)
207 return '%s-v1' % comp
207 return '%s-v1' % comp
208 elif isinstance(b, bundle2.unbundle20):
208 elif isinstance(b, bundle2.unbundle20):
209 if 'Compression' in b.params:
209 if 'Compression' in b.params:
210 comp = speccompression(b.params['Compression'])
210 comp = speccompression(b.params['Compression'])
211 if not comp:
211 if not comp:
212 raise error.Abort(_('unknown compression algorithm: %s') % comp)
212 raise error.Abort(_('unknown compression algorithm: %s') % comp)
213 else:
213 else:
214 comp = 'none'
214 comp = 'none'
215
215
216 version = None
216 version = None
217 for part in b.iterparts():
217 for part in b.iterparts():
218 if part.type == 'changegroup':
218 if part.type == 'changegroup':
219 version = part.params['version']
219 version = part.params['version']
220 if version in ('01', '02'):
220 if version in ('01', '02'):
221 version = 'v2'
221 version = 'v2'
222 else:
222 else:
223 raise error.Abort(_('changegroup version %s does not have '
223 raise error.Abort(_('changegroup version %s does not have '
224 'a known bundlespec') % version,
224 'a known bundlespec') % version,
225 hint=_('try upgrading your Mercurial '
225 hint=_('try upgrading your Mercurial '
226 'client'))
226 'client'))
227
227
228 if not version:
228 if not version:
229 raise error.Abort(_('could not identify changegroup version in '
229 raise error.Abort(_('could not identify changegroup version in '
230 'bundle'))
230 'bundle'))
231
231
232 return '%s-%s' % (comp, version)
232 return '%s-%s' % (comp, version)
233 elif isinstance(b, streamclone.streamcloneapplier):
233 elif isinstance(b, streamclone.streamcloneapplier):
234 requirements = streamclone.readbundle1header(fh)[2]
234 requirements = streamclone.readbundle1header(fh)[2]
235 params = 'requirements=%s' % ','.join(sorted(requirements))
235 params = 'requirements=%s' % ','.join(sorted(requirements))
236 return 'none-packed1;%s' % urlreq.quote(params)
236 return 'none-packed1;%s' % urlreq.quote(params)
237 else:
237 else:
238 raise error.Abort(_('unknown bundle type: %s') % b)
238 raise error.Abort(_('unknown bundle type: %s') % b)
239
239
240 def buildobsmarkerspart(bundler, markers):
240 def buildobsmarkerspart(bundler, markers):
241 """add an obsmarker part to the bundler with <markers>
241 """add an obsmarker part to the bundler with <markers>
242
242
243 No part is created if markers is empty.
243 No part is created if markers is empty.
244 Raises ValueError if the bundler doesn't support any known obsmarker format.
244 Raises ValueError if the bundler doesn't support any known obsmarker format.
245 """
245 """
246 if markers:
246 if markers:
247 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
247 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
248 version = obsolete.commonversion(remoteversions)
248 version = obsolete.commonversion(remoteversions)
249 if version is None:
249 if version is None:
250 raise ValueError('bundler does not support common obsmarker format')
250 raise ValueError('bundler does not support common obsmarker format')
251 stream = obsolete.encodemarkers(markers, True, version=version)
251 stream = obsolete.encodemarkers(markers, True, version=version)
252 return bundler.newpart('obsmarkers', data=stream)
252 return bundler.newpart('obsmarkers', data=stream)
253 return None
253 return None
254
254
255 def _computeoutgoing(repo, heads, common):
255 def _computeoutgoing(repo, heads, common):
256 """Computes which revs are outgoing given a set of common
256 """Computes which revs are outgoing given a set of common
257 and a set of heads.
257 and a set of heads.
258
258
259 This is a separate function so extensions can have access to
259 This is a separate function so extensions can have access to
260 the logic.
260 the logic.
261
261
262 Returns a discovery.outgoing object.
262 Returns a discovery.outgoing object.
263 """
263 """
264 cl = repo.changelog
264 cl = repo.changelog
265 if common:
265 if common:
266 hasnode = cl.hasnode
266 hasnode = cl.hasnode
267 common = [n for n in common if hasnode(n)]
267 common = [n for n in common if hasnode(n)]
268 else:
268 else:
269 common = [nullid]
269 common = [nullid]
270 if not heads:
270 if not heads:
271 heads = cl.heads()
271 heads = cl.heads()
272 return discovery.outgoing(repo, common, heads)
272 return discovery.outgoing(repo, common, heads)
273
273
274 def _forcebundle1(op):
274 def _forcebundle1(op):
275 """return true if a pull/push must use bundle1
275 """return true if a pull/push must use bundle1
276
276
277 This function is used to allow testing of the older bundle version"""
277 This function is used to allow testing of the older bundle version"""
278 ui = op.repo.ui
278 ui = op.repo.ui
279 forcebundle1 = False
279 forcebundle1 = False
280 # The goal is this config is to allow developer to choose the bundle
280 # The goal is this config is to allow developer to choose the bundle
281 # version used during exchanged. This is especially handy during test.
281 # version used during exchanged. This is especially handy during test.
282 # Value is a list of bundle version to be picked from, highest version
282 # Value is a list of bundle version to be picked from, highest version
283 # should be used.
283 # should be used.
284 #
284 #
285 # developer config: devel.legacy.exchange
285 # developer config: devel.legacy.exchange
286 exchange = ui.configlist('devel', 'legacy.exchange')
286 exchange = ui.configlist('devel', 'legacy.exchange')
287 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
287 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
288 return forcebundle1 or not op.remote.capable('bundle2')
288 return forcebundle1 or not op.remote.capable('bundle2')
289
289
290 class pushoperation(object):
290 class pushoperation(object):
291 """A object that represent a single push operation
291 """A object that represent a single push operation
292
292
293 Its purpose is to carry push related state and very common operations.
293 Its purpose is to carry push related state and very common operations.
294
294
295 A new pushoperation should be created at the beginning of each push and
295 A new pushoperation should be created at the beginning of each push and
296 discarded afterward.
296 discarded afterward.
297 """
297 """
298
298
299 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
299 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
300 bookmarks=()):
300 bookmarks=()):
301 # repo we push from
301 # repo we push from
302 self.repo = repo
302 self.repo = repo
303 self.ui = repo.ui
303 self.ui = repo.ui
304 # repo we push to
304 # repo we push to
305 self.remote = remote
305 self.remote = remote
306 # force option provided
306 # force option provided
307 self.force = force
307 self.force = force
308 # revs to be pushed (None is "all")
308 # revs to be pushed (None is "all")
309 self.revs = revs
309 self.revs = revs
310 # bookmark explicitly pushed
310 # bookmark explicitly pushed
311 self.bookmarks = bookmarks
311 self.bookmarks = bookmarks
312 # allow push of new branch
312 # allow push of new branch
313 self.newbranch = newbranch
313 self.newbranch = newbranch
314 # did a local lock get acquired?
314 # did a local lock get acquired?
315 self.locallocked = None
315 self.locallocked = None
316 # step already performed
316 # step already performed
317 # (used to check what steps have been already performed through bundle2)
317 # (used to check what steps have been already performed through bundle2)
318 self.stepsdone = set()
318 self.stepsdone = set()
319 # Integer version of the changegroup push result
319 # Integer version of the changegroup push result
320 # - None means nothing to push
320 # - None means nothing to push
321 # - 0 means HTTP error
321 # - 0 means HTTP error
322 # - 1 means we pushed and remote head count is unchanged *or*
322 # - 1 means we pushed and remote head count is unchanged *or*
323 # we have outgoing changesets but refused to push
323 # we have outgoing changesets but refused to push
324 # - other values as described by addchangegroup()
324 # - other values as described by addchangegroup()
325 self.cgresult = None
325 self.cgresult = None
326 # Boolean value for the bookmark push
326 # Boolean value for the bookmark push
327 self.bkresult = None
327 self.bkresult = None
328 # discover.outgoing object (contains common and outgoing data)
328 # discover.outgoing object (contains common and outgoing data)
329 self.outgoing = None
329 self.outgoing = None
330 # all remote heads before the push
330 # all remote heads before the push
331 self.remoteheads = None
331 self.remoteheads = None
332 # testable as a boolean indicating if any nodes are missing locally.
332 # testable as a boolean indicating if any nodes are missing locally.
333 self.incoming = None
333 self.incoming = None
334 # phases changes that must be pushed along side the changesets
334 # phases changes that must be pushed along side the changesets
335 self.outdatedphases = None
335 self.outdatedphases = None
336 # phases changes that must be pushed if changeset push fails
336 # phases changes that must be pushed if changeset push fails
337 self.fallbackoutdatedphases = None
337 self.fallbackoutdatedphases = None
338 # outgoing obsmarkers
338 # outgoing obsmarkers
339 self.outobsmarkers = set()
339 self.outobsmarkers = set()
340 # outgoing bookmarks
340 # outgoing bookmarks
341 self.outbookmarks = []
341 self.outbookmarks = []
342 # transaction manager
342 # transaction manager
343 self.trmanager = None
343 self.trmanager = None
344 # map { pushkey partid -> callback handling failure}
344 # map { pushkey partid -> callback handling failure}
345 # used to handle exception from mandatory pushkey part failure
345 # used to handle exception from mandatory pushkey part failure
346 self.pkfailcb = {}
346 self.pkfailcb = {}
347
347
348 @util.propertycache
348 @util.propertycache
349 def futureheads(self):
349 def futureheads(self):
350 """future remote heads if the changeset push succeeds"""
350 """future remote heads if the changeset push succeeds"""
351 return self.outgoing.missingheads
351 return self.outgoing.missingheads
352
352
353 @util.propertycache
353 @util.propertycache
354 def fallbackheads(self):
354 def fallbackheads(self):
355 """future remote heads if the changeset push fails"""
355 """future remote heads if the changeset push fails"""
356 if self.revs is None:
356 if self.revs is None:
357 # not target to push, all common are relevant
357 # not target to push, all common are relevant
358 return self.outgoing.commonheads
358 return self.outgoing.commonheads
359 unfi = self.repo.unfiltered()
359 unfi = self.repo.unfiltered()
360 # I want cheads = heads(::missingheads and ::commonheads)
360 # I want cheads = heads(::missingheads and ::commonheads)
361 # (missingheads is revs with secret changeset filtered out)
361 # (missingheads is revs with secret changeset filtered out)
362 #
362 #
363 # This can be expressed as:
363 # This can be expressed as:
364 # cheads = ( (missingheads and ::commonheads)
364 # cheads = ( (missingheads and ::commonheads)
365 # + (commonheads and ::missingheads))"
365 # + (commonheads and ::missingheads))"
366 # )
366 # )
367 #
367 #
368 # while trying to push we already computed the following:
368 # while trying to push we already computed the following:
369 # common = (::commonheads)
369 # common = (::commonheads)
370 # missing = ((commonheads::missingheads) - commonheads)
370 # missing = ((commonheads::missingheads) - commonheads)
371 #
371 #
372 # We can pick:
372 # We can pick:
373 # * missingheads part of common (::commonheads)
373 # * missingheads part of common (::commonheads)
374 common = self.outgoing.common
374 common = self.outgoing.common
375 nm = self.repo.changelog.nodemap
375 nm = self.repo.changelog.nodemap
376 cheads = [node for node in self.revs if nm[node] in common]
376 cheads = [node for node in self.revs if nm[node] in common]
377 # and
377 # and
378 # * commonheads parents on missing
378 # * commonheads parents on missing
379 revset = unfi.set('%ln and parents(roots(%ln))',
379 revset = unfi.set('%ln and parents(roots(%ln))',
380 self.outgoing.commonheads,
380 self.outgoing.commonheads,
381 self.outgoing.missing)
381 self.outgoing.missing)
382 cheads.extend(c.node() for c in revset)
382 cheads.extend(c.node() for c in revset)
383 return cheads
383 return cheads
384
384
385 @property
385 @property
386 def commonheads(self):
386 def commonheads(self):
387 """set of all common heads after changeset bundle push"""
387 """set of all common heads after changeset bundle push"""
388 if self.cgresult:
388 if self.cgresult:
389 return self.futureheads
389 return self.futureheads
390 else:
390 else:
391 return self.fallbackheads
391 return self.fallbackheads
392
392
393 # mapping of message used when pushing bookmark
393 # mapping of message used when pushing bookmark
394 bookmsgmap = {'update': (_("updating bookmark %s\n"),
394 bookmsgmap = {'update': (_("updating bookmark %s\n"),
395 _('updating bookmark %s failed!\n')),
395 _('updating bookmark %s failed!\n')),
396 'export': (_("exporting bookmark %s\n"),
396 'export': (_("exporting bookmark %s\n"),
397 _('exporting bookmark %s failed!\n')),
397 _('exporting bookmark %s failed!\n')),
398 'delete': (_("deleting remote bookmark %s\n"),
398 'delete': (_("deleting remote bookmark %s\n"),
399 _('deleting remote bookmark %s failed!\n')),
399 _('deleting remote bookmark %s failed!\n')),
400 }
400 }
401
401
402
402
403 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
403 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
404 opargs=None):
404 opargs=None):
405 '''Push outgoing changesets (limited by revs) from a local
405 '''Push outgoing changesets (limited by revs) from a local
406 repository to remote. Return an integer:
406 repository to remote. Return an integer:
407 - None means nothing to push
407 - None means nothing to push
408 - 0 means HTTP error
408 - 0 means HTTP error
409 - 1 means we pushed and remote head count is unchanged *or*
409 - 1 means we pushed and remote head count is unchanged *or*
410 we have outgoing changesets but refused to push
410 we have outgoing changesets but refused to push
411 - other values as described by addchangegroup()
411 - other values as described by addchangegroup()
412 '''
412 '''
413 if opargs is None:
413 if opargs is None:
414 opargs = {}
414 opargs = {}
415 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
415 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
416 **opargs)
416 **opargs)
417 if pushop.remote.local():
417 if pushop.remote.local():
418 missing = (set(pushop.repo.requirements)
418 missing = (set(pushop.repo.requirements)
419 - pushop.remote.local().supported)
419 - pushop.remote.local().supported)
420 if missing:
420 if missing:
421 msg = _("required features are not"
421 msg = _("required features are not"
422 " supported in the destination:"
422 " supported in the destination:"
423 " %s") % (', '.join(sorted(missing)))
423 " %s") % (', '.join(sorted(missing)))
424 raise error.Abort(msg)
424 raise error.Abort(msg)
425
425
426 # there are two ways to push to remote repo:
426 # there are two ways to push to remote repo:
427 #
427 #
428 # addchangegroup assumes local user can lock remote
428 # addchangegroup assumes local user can lock remote
429 # repo (local filesystem, old ssh servers).
429 # repo (local filesystem, old ssh servers).
430 #
430 #
431 # unbundle assumes local user cannot lock remote repo (new ssh
431 # unbundle assumes local user cannot lock remote repo (new ssh
432 # servers, http servers).
432 # servers, http servers).
433
433
434 if not pushop.remote.canpush():
434 if not pushop.remote.canpush():
435 raise error.Abort(_("destination does not support push"))
435 raise error.Abort(_("destination does not support push"))
436 # get local lock as we might write phase data
436 # get local lock as we might write phase data
437 localwlock = locallock = None
437 localwlock = locallock = None
438 try:
438 try:
439 # bundle2 push may receive a reply bundle touching bookmarks or other
439 # bundle2 push may receive a reply bundle touching bookmarks or other
440 # things requiring the wlock. Take it now to ensure proper ordering.
440 # things requiring the wlock. Take it now to ensure proper ordering.
441 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
441 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
442 if (not _forcebundle1(pushop)) and maypushback:
442 if (not _forcebundle1(pushop)) and maypushback:
443 localwlock = pushop.repo.wlock()
443 localwlock = pushop.repo.wlock()
444 locallock = pushop.repo.lock()
444 locallock = pushop.repo.lock()
445 pushop.locallocked = True
445 pushop.locallocked = True
446 except IOError as err:
446 except IOError as err:
447 pushop.locallocked = False
447 pushop.locallocked = False
448 if err.errno != errno.EACCES:
448 if err.errno != errno.EACCES:
449 raise
449 raise
450 # source repo cannot be locked.
450 # source repo cannot be locked.
451 # We do not abort the push, but just disable the local phase
451 # We do not abort the push, but just disable the local phase
452 # synchronisation.
452 # synchronisation.
453 msg = 'cannot lock source repository: %s\n' % err
453 msg = 'cannot lock source repository: %s\n' % err
454 pushop.ui.debug(msg)
454 pushop.ui.debug(msg)
455 try:
455 try:
456 if pushop.locallocked:
456 if pushop.locallocked:
457 pushop.trmanager = transactionmanager(pushop.repo,
457 pushop.trmanager = transactionmanager(pushop.repo,
458 'push-response',
458 'push-response',
459 pushop.remote.url())
459 pushop.remote.url())
460 pushop.repo.checkpush(pushop)
460 pushop.repo.checkpush(pushop)
461 lock = None
461 lock = None
462 unbundle = pushop.remote.capable('unbundle')
462 unbundle = pushop.remote.capable('unbundle')
463 if not unbundle:
463 if not unbundle:
464 lock = pushop.remote.lock()
464 lock = pushop.remote.lock()
465 try:
465 try:
466 _pushdiscovery(pushop)
466 _pushdiscovery(pushop)
467 if not _forcebundle1(pushop):
467 if not _forcebundle1(pushop):
468 _pushbundle2(pushop)
468 _pushbundle2(pushop)
469 _pushchangeset(pushop)
469 _pushchangeset(pushop)
470 _pushsyncphase(pushop)
470 _pushsyncphase(pushop)
471 _pushobsolete(pushop)
471 _pushobsolete(pushop)
472 _pushbookmark(pushop)
472 _pushbookmark(pushop)
473 finally:
473 finally:
474 if lock is not None:
474 if lock is not None:
475 lock.release()
475 lock.release()
476 if pushop.trmanager:
476 if pushop.trmanager:
477 pushop.trmanager.close()
477 pushop.trmanager.close()
478 finally:
478 finally:
479 if pushop.trmanager:
479 if pushop.trmanager:
480 pushop.trmanager.release()
480 pushop.trmanager.release()
481 if locallock is not None:
481 if locallock is not None:
482 locallock.release()
482 locallock.release()
483 if localwlock is not None:
483 if localwlock is not None:
484 localwlock.release()
484 localwlock.release()
485
485
486 return pushop
486 return pushop
487
487
488 # list of steps to perform discovery before push
488 # list of steps to perform discovery before push
489 pushdiscoveryorder = []
489 pushdiscoveryorder = []
490
490
491 # Mapping between step name and function
491 # Mapping between step name and function
492 #
492 #
493 # This exists to help extensions wrap steps if necessary
493 # This exists to help extensions wrap steps if necessary
494 pushdiscoverymapping = {}
494 pushdiscoverymapping = {}
495
495
496 def pushdiscovery(stepname):
496 def pushdiscovery(stepname):
497 """decorator for function performing discovery before push
497 """decorator for function performing discovery before push
498
498
499 The function is added to the step -> function mapping and appended to the
499 The function is added to the step -> function mapping and appended to the
500 list of steps. Beware that decorated function will be added in order (this
500 list of steps. Beware that decorated function will be added in order (this
501 may matter).
501 may matter).
502
502
503 You can only use this decorator for a new step, if you want to wrap a step
503 You can only use this decorator for a new step, if you want to wrap a step
504 from an extension, change the pushdiscovery dictionary directly."""
504 from an extension, change the pushdiscovery dictionary directly."""
505 def dec(func):
505 def dec(func):
506 assert stepname not in pushdiscoverymapping
506 assert stepname not in pushdiscoverymapping
507 pushdiscoverymapping[stepname] = func
507 pushdiscoverymapping[stepname] = func
508 pushdiscoveryorder.append(stepname)
508 pushdiscoveryorder.append(stepname)
509 return func
509 return func
510 return dec
510 return dec
511
511
512 def _pushdiscovery(pushop):
512 def _pushdiscovery(pushop):
513 """Run all discovery steps"""
513 """Run all discovery steps"""
514 for stepname in pushdiscoveryorder:
514 for stepname in pushdiscoveryorder:
515 step = pushdiscoverymapping[stepname]
515 step = pushdiscoverymapping[stepname]
516 step(pushop)
516 step(pushop)
517
517
518 @pushdiscovery('changeset')
518 @pushdiscovery('changeset')
519 def _pushdiscoverychangeset(pushop):
519 def _pushdiscoverychangeset(pushop):
520 """discover the changeset that need to be pushed"""
520 """discover the changeset that need to be pushed"""
521 fci = discovery.findcommonincoming
521 fci = discovery.findcommonincoming
522 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
522 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
523 common, inc, remoteheads = commoninc
523 common, inc, remoteheads = commoninc
524 fco = discovery.findcommonoutgoing
524 fco = discovery.findcommonoutgoing
525 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
525 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
526 commoninc=commoninc, force=pushop.force)
526 commoninc=commoninc, force=pushop.force)
527 pushop.outgoing = outgoing
527 pushop.outgoing = outgoing
528 pushop.remoteheads = remoteheads
528 pushop.remoteheads = remoteheads
529 pushop.incoming = inc
529 pushop.incoming = inc
530
530
531 @pushdiscovery('phase')
531 @pushdiscovery('phase')
532 def _pushdiscoveryphase(pushop):
532 def _pushdiscoveryphase(pushop):
533 """discover the phase that needs to be pushed
533 """discover the phase that needs to be pushed
534
534
535 (computed for both success and failure case for changesets push)"""
535 (computed for both success and failure case for changesets push)"""
536 outgoing = pushop.outgoing
536 outgoing = pushop.outgoing
537 unfi = pushop.repo.unfiltered()
537 unfi = pushop.repo.unfiltered()
538 remotephases = pushop.remote.listkeys('phases')
538 remotephases = pushop.remote.listkeys('phases')
539 publishing = remotephases.get('publishing', False)
539 publishing = remotephases.get('publishing', False)
540 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
540 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
541 and remotephases # server supports phases
541 and remotephases # server supports phases
542 and not pushop.outgoing.missing # no changesets to be pushed
542 and not pushop.outgoing.missing # no changesets to be pushed
543 and publishing):
543 and publishing):
544 # When:
544 # When:
545 # - this is a subrepo push
545 # - this is a subrepo push
546 # - and remote support phase
546 # - and remote support phase
547 # - and no changeset are to be pushed
547 # - and no changeset are to be pushed
548 # - and remote is publishing
548 # - and remote is publishing
549 # We may be in issue 3871 case!
549 # We may be in issue 3871 case!
550 # We drop the possible phase synchronisation done by
550 # We drop the possible phase synchronisation done by
551 # courtesy to publish changesets possibly locally draft
551 # courtesy to publish changesets possibly locally draft
552 # on the remote.
552 # on the remote.
553 remotephases = {'publishing': 'True'}
553 remotephases = {'publishing': 'True'}
554 ana = phases.analyzeremotephases(pushop.repo,
554 ana = phases.analyzeremotephases(pushop.repo,
555 pushop.fallbackheads,
555 pushop.fallbackheads,
556 remotephases)
556 remotephases)
557 pheads, droots = ana
557 pheads, droots = ana
558 extracond = ''
558 extracond = ''
559 if not publishing:
559 if not publishing:
560 extracond = ' and public()'
560 extracond = ' and public()'
561 revset = 'heads((%%ln::%%ln) %s)' % extracond
561 revset = 'heads((%%ln::%%ln) %s)' % extracond
562 # Get the list of all revs draft on remote by public here.
562 # Get the list of all revs draft on remote by public here.
563 # XXX Beware that revset break if droots is not strictly
563 # XXX Beware that revset break if droots is not strictly
564 # XXX root we may want to ensure it is but it is costly
564 # XXX root we may want to ensure it is but it is costly
565 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
565 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
566 if not outgoing.missing:
566 if not outgoing.missing:
567 future = fallback
567 future = fallback
568 else:
568 else:
569 # adds changeset we are going to push as draft
569 # adds changeset we are going to push as draft
570 #
570 #
571 # should not be necessary for publishing server, but because of an
571 # should not be necessary for publishing server, but because of an
572 # issue fixed in xxxxx we have to do it anyway.
572 # issue fixed in xxxxx we have to do it anyway.
573 fdroots = list(unfi.set('roots(%ln + %ln::)',
573 fdroots = list(unfi.set('roots(%ln + %ln::)',
574 outgoing.missing, droots))
574 outgoing.missing, droots))
575 fdroots = [f.node() for f in fdroots]
575 fdroots = [f.node() for f in fdroots]
576 future = list(unfi.set(revset, fdroots, pushop.futureheads))
576 future = list(unfi.set(revset, fdroots, pushop.futureheads))
577 pushop.outdatedphases = future
577 pushop.outdatedphases = future
578 pushop.fallbackoutdatedphases = fallback
578 pushop.fallbackoutdatedphases = fallback
579
579
580 @pushdiscovery('obsmarker')
580 @pushdiscovery('obsmarker')
581 def _pushdiscoveryobsmarkers(pushop):
581 def _pushdiscoveryobsmarkers(pushop):
582 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
582 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
583 and pushop.repo.obsstore
583 and pushop.repo.obsstore
584 and 'obsolete' in pushop.remote.listkeys('namespaces')):
584 and 'obsolete' in pushop.remote.listkeys('namespaces')):
585 repo = pushop.repo
585 repo = pushop.repo
586 # very naive computation, that can be quite expensive on big repo.
586 # very naive computation, that can be quite expensive on big repo.
587 # However: evolution is currently slow on them anyway.
587 # However: evolution is currently slow on them anyway.
588 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
588 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
589 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
589 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
590
590
591 @pushdiscovery('bookmarks')
591 @pushdiscovery('bookmarks')
592 def _pushdiscoverybookmarks(pushop):
592 def _pushdiscoverybookmarks(pushop):
593 ui = pushop.ui
593 ui = pushop.ui
594 repo = pushop.repo.unfiltered()
594 repo = pushop.repo.unfiltered()
595 remote = pushop.remote
595 remote = pushop.remote
596 ui.debug("checking for updated bookmarks\n")
596 ui.debug("checking for updated bookmarks\n")
597 ancestors = ()
597 ancestors = ()
598 if pushop.revs:
598 if pushop.revs:
599 revnums = map(repo.changelog.rev, pushop.revs)
599 revnums = map(repo.changelog.rev, pushop.revs)
600 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
600 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
601 remotebookmark = remote.listkeys('bookmarks')
601 remotebookmark = remote.listkeys('bookmarks')
602
602
603 explicit = set([repo._bookmarks.expandname(bookmark)
603 explicit = set([repo._bookmarks.expandname(bookmark)
604 for bookmark in pushop.bookmarks])
604 for bookmark in pushop.bookmarks])
605
605
606 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
606 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
607 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
607 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
608 for b, scid, dcid in advsrc:
608 for b, scid, dcid in advsrc:
609 if b in explicit:
609 if b in explicit:
610 explicit.remove(b)
610 explicit.remove(b)
611 if not ancestors or repo[scid].rev() in ancestors:
611 if not ancestors or repo[scid].rev() in ancestors:
612 pushop.outbookmarks.append((b, dcid, scid))
612 pushop.outbookmarks.append((b, dcid, scid))
613 # search added bookmark
613 # search added bookmark
614 for b, scid, dcid in addsrc:
614 for b, scid, dcid in addsrc:
615 if b in explicit:
615 if b in explicit:
616 explicit.remove(b)
616 explicit.remove(b)
617 pushop.outbookmarks.append((b, '', scid))
617 pushop.outbookmarks.append((b, '', scid))
618 # search for overwritten bookmark
618 # search for overwritten bookmark
619 for b, scid, dcid in advdst + diverge + differ:
619 for b, scid, dcid in advdst + diverge + differ:
620 if b in explicit:
620 if b in explicit:
621 explicit.remove(b)
621 explicit.remove(b)
622 pushop.outbookmarks.append((b, dcid, scid))
622 pushop.outbookmarks.append((b, dcid, scid))
623 # search for bookmark to delete
623 # search for bookmark to delete
624 for b, scid, dcid in adddst:
624 for b, scid, dcid in adddst:
625 if b in explicit:
625 if b in explicit:
626 explicit.remove(b)
626 explicit.remove(b)
627 # treat as "deleted locally"
627 # treat as "deleted locally"
628 pushop.outbookmarks.append((b, dcid, ''))
628 pushop.outbookmarks.append((b, dcid, ''))
629 # identical bookmarks shouldn't get reported
629 # identical bookmarks shouldn't get reported
630 for b, scid, dcid in same:
630 for b, scid, dcid in same:
631 if b in explicit:
631 if b in explicit:
632 explicit.remove(b)
632 explicit.remove(b)
633
633
634 if explicit:
634 if explicit:
635 explicit = sorted(explicit)
635 explicit = sorted(explicit)
636 # we should probably list all of them
636 # we should probably list all of them
637 ui.warn(_('bookmark %s does not exist on the local '
637 ui.warn(_('bookmark %s does not exist on the local '
638 'or remote repository!\n') % explicit[0])
638 'or remote repository!\n') % explicit[0])
639 pushop.bkresult = 2
639 pushop.bkresult = 2
640
640
641 pushop.outbookmarks.sort()
641 pushop.outbookmarks.sort()
642
642
643 def _pushcheckoutgoing(pushop):
643 def _pushcheckoutgoing(pushop):
644 outgoing = pushop.outgoing
644 outgoing = pushop.outgoing
645 unfi = pushop.repo.unfiltered()
645 unfi = pushop.repo.unfiltered()
646 if not outgoing.missing:
646 if not outgoing.missing:
647 # nothing to push
647 # nothing to push
648 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
648 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
649 return False
649 return False
650 # something to push
650 # something to push
651 if not pushop.force:
651 if not pushop.force:
652 # if repo.obsstore == False --> no obsolete
652 # if repo.obsstore == False --> no obsolete
653 # then, save the iteration
653 # then, save the iteration
654 if unfi.obsstore:
654 if unfi.obsstore:
655 # this message are here for 80 char limit reason
655 # this message are here for 80 char limit reason
656 mso = _("push includes obsolete changeset: %s!")
656 mso = _("push includes obsolete changeset: %s!")
657 mst = {"unstable": _("push includes unstable changeset: %s!"),
657 mst = {"unstable": _("push includes unstable changeset: %s!"),
658 "bumped": _("push includes bumped changeset: %s!"),
658 "bumped": _("push includes bumped changeset: %s!"),
659 "divergent": _("push includes divergent changeset: %s!")}
659 "divergent": _("push includes divergent changeset: %s!")}
660 # If we are to push if there is at least one
660 # If we are to push if there is at least one
661 # obsolete or unstable changeset in missing, at
661 # obsolete or unstable changeset in missing, at
662 # least one of the missinghead will be obsolete or
662 # least one of the missinghead will be obsolete or
663 # unstable. So checking heads only is ok
663 # unstable. So checking heads only is ok
664 for node in outgoing.missingheads:
664 for node in outgoing.missingheads:
665 ctx = unfi[node]
665 ctx = unfi[node]
666 if ctx.obsolete():
666 if ctx.obsolete():
667 raise error.Abort(mso % ctx)
667 raise error.Abort(mso % ctx)
668 elif ctx.troubled():
668 elif ctx.troubled():
669 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
669 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
670
670
671 discovery.checkheads(pushop)
671 discovery.checkheads(pushop)
672 return True
672 return True
673
673
674 # List of names of steps to perform for an outgoing bundle2, order matters.
674 # List of names of steps to perform for an outgoing bundle2, order matters.
675 b2partsgenorder = []
675 b2partsgenorder = []
676
676
677 # Mapping between step name and function
677 # Mapping between step name and function
678 #
678 #
679 # This exists to help extensions wrap steps if necessary
679 # This exists to help extensions wrap steps if necessary
680 b2partsgenmapping = {}
680 b2partsgenmapping = {}
681
681
682 def b2partsgenerator(stepname, idx=None):
682 def b2partsgenerator(stepname, idx=None):
683 """decorator for function generating bundle2 part
683 """decorator for function generating bundle2 part
684
684
685 The function is added to the step -> function mapping and appended to the
685 The function is added to the step -> function mapping and appended to the
686 list of steps. Beware that decorated functions will be added in order
686 list of steps. Beware that decorated functions will be added in order
687 (this may matter).
687 (this may matter).
688
688
689 You can only use this decorator for new steps, if you want to wrap a step
689 You can only use this decorator for new steps, if you want to wrap a step
690 from an extension, attack the b2partsgenmapping dictionary directly."""
690 from an extension, attack the b2partsgenmapping dictionary directly."""
691 def dec(func):
691 def dec(func):
692 assert stepname not in b2partsgenmapping
692 assert stepname not in b2partsgenmapping
693 b2partsgenmapping[stepname] = func
693 b2partsgenmapping[stepname] = func
694 if idx is None:
694 if idx is None:
695 b2partsgenorder.append(stepname)
695 b2partsgenorder.append(stepname)
696 else:
696 else:
697 b2partsgenorder.insert(idx, stepname)
697 b2partsgenorder.insert(idx, stepname)
698 return func
698 return func
699 return dec
699 return dec
700
700
701 def _pushb2ctxcheckheads(pushop, bundler):
701 def _pushb2ctxcheckheads(pushop, bundler):
702 """Generate race condition checking parts
702 """Generate race condition checking parts
703
703
704 Exists as an independent function to aid extensions
704 Exists as an independent function to aid extensions
705 """
705 """
706 if not pushop.force:
706 if not pushop.force:
707 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
707 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
708
708
709 @b2partsgenerator('changeset')
709 @b2partsgenerator('changeset')
710 def _pushb2ctx(pushop, bundler):
710 def _pushb2ctx(pushop, bundler):
711 """handle changegroup push through bundle2
711 """handle changegroup push through bundle2
712
712
713 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
713 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
714 """
714 """
715 if 'changesets' in pushop.stepsdone:
715 if 'changesets' in pushop.stepsdone:
716 return
716 return
717 pushop.stepsdone.add('changesets')
717 pushop.stepsdone.add('changesets')
718 # Send known heads to the server for race detection.
718 # Send known heads to the server for race detection.
719 if not _pushcheckoutgoing(pushop):
719 if not _pushcheckoutgoing(pushop):
720 return
720 return
721 pushop.repo.prepushoutgoinghooks(pushop)
721 pushop.repo.prepushoutgoinghooks(pushop)
722
722
723 _pushb2ctxcheckheads(pushop, bundler)
723 _pushb2ctxcheckheads(pushop, bundler)
724
724
725 b2caps = bundle2.bundle2caps(pushop.remote)
725 b2caps = bundle2.bundle2caps(pushop.remote)
726 version = '01'
726 version = '01'
727 cgversions = b2caps.get('changegroup')
727 cgversions = b2caps.get('changegroup')
728 if cgversions: # 3.1 and 3.2 ship with an empty value
728 if cgversions: # 3.1 and 3.2 ship with an empty value
729 cgversions = [v for v in cgversions
729 cgversions = [v for v in cgversions
730 if v in changegroup.supportedoutgoingversions(
730 if v in changegroup.supportedoutgoingversions(
731 pushop.repo)]
731 pushop.repo)]
732 if not cgversions:
732 if not cgversions:
733 raise ValueError(_('no common changegroup version'))
733 raise ValueError(_('no common changegroup version'))
734 version = max(cgversions)
734 version = max(cgversions)
735 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
735 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
736 pushop.outgoing,
736 pushop.outgoing,
737 version=version)
737 version=version)
738 cgpart = bundler.newpart('changegroup', data=cg)
738 cgpart = bundler.newpart('changegroup', data=cg)
739 if cgversions:
739 if cgversions:
740 cgpart.addparam('version', version)
740 cgpart.addparam('version', version)
741 if 'treemanifest' in pushop.repo.requirements:
741 if 'treemanifest' in pushop.repo.requirements:
742 cgpart.addparam('treemanifest', '1')
742 cgpart.addparam('treemanifest', '1')
743 def handlereply(op):
743 def handlereply(op):
744 """extract addchangegroup returns from server reply"""
744 """extract addchangegroup returns from server reply"""
745 cgreplies = op.records.getreplies(cgpart.id)
745 cgreplies = op.records.getreplies(cgpart.id)
746 assert len(cgreplies['changegroup']) == 1
746 assert len(cgreplies['changegroup']) == 1
747 pushop.cgresult = cgreplies['changegroup'][0]['return']
747 pushop.cgresult = cgreplies['changegroup'][0]['return']
748 return handlereply
748 return handlereply
749
749
750 @b2partsgenerator('phase')
750 @b2partsgenerator('phase')
751 def _pushb2phases(pushop, bundler):
751 def _pushb2phases(pushop, bundler):
752 """handle phase push through bundle2"""
752 """handle phase push through bundle2"""
753 if 'phases' in pushop.stepsdone:
753 if 'phases' in pushop.stepsdone:
754 return
754 return
755 b2caps = bundle2.bundle2caps(pushop.remote)
755 b2caps = bundle2.bundle2caps(pushop.remote)
756 if not 'pushkey' in b2caps:
756 if not 'pushkey' in b2caps:
757 return
757 return
758 pushop.stepsdone.add('phases')
758 pushop.stepsdone.add('phases')
759 part2node = []
759 part2node = []
760
760
761 def handlefailure(pushop, exc):
761 def handlefailure(pushop, exc):
762 targetid = int(exc.partid)
762 targetid = int(exc.partid)
763 for partid, node in part2node:
763 for partid, node in part2node:
764 if partid == targetid:
764 if partid == targetid:
765 raise error.Abort(_('updating %s to public failed') % node)
765 raise error.Abort(_('updating %s to public failed') % node)
766
766
767 enc = pushkey.encode
767 enc = pushkey.encode
768 for newremotehead in pushop.outdatedphases:
768 for newremotehead in pushop.outdatedphases:
769 part = bundler.newpart('pushkey')
769 part = bundler.newpart('pushkey')
770 part.addparam('namespace', enc('phases'))
770 part.addparam('namespace', enc('phases'))
771 part.addparam('key', enc(newremotehead.hex()))
771 part.addparam('key', enc(newremotehead.hex()))
772 part.addparam('old', enc(str(phases.draft)))
772 part.addparam('old', enc(str(phases.draft)))
773 part.addparam('new', enc(str(phases.public)))
773 part.addparam('new', enc(str(phases.public)))
774 part2node.append((part.id, newremotehead))
774 part2node.append((part.id, newremotehead))
775 pushop.pkfailcb[part.id] = handlefailure
775 pushop.pkfailcb[part.id] = handlefailure
776
776
777 def handlereply(op):
777 def handlereply(op):
778 for partid, node in part2node:
778 for partid, node in part2node:
779 partrep = op.records.getreplies(partid)
779 partrep = op.records.getreplies(partid)
780 results = partrep['pushkey']
780 results = partrep['pushkey']
781 assert len(results) <= 1
781 assert len(results) <= 1
782 msg = None
782 msg = None
783 if not results:
783 if not results:
784 msg = _('server ignored update of %s to public!\n') % node
784 msg = _('server ignored update of %s to public!\n') % node
785 elif not int(results[0]['return']):
785 elif not int(results[0]['return']):
786 msg = _('updating %s to public failed!\n') % node
786 msg = _('updating %s to public failed!\n') % node
787 if msg is not None:
787 if msg is not None:
788 pushop.ui.warn(msg)
788 pushop.ui.warn(msg)
789 return handlereply
789 return handlereply
790
790
791 @b2partsgenerator('obsmarkers')
791 @b2partsgenerator('obsmarkers')
792 def _pushb2obsmarkers(pushop, bundler):
792 def _pushb2obsmarkers(pushop, bundler):
793 if 'obsmarkers' in pushop.stepsdone:
793 if 'obsmarkers' in pushop.stepsdone:
794 return
794 return
795 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
795 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
796 if obsolete.commonversion(remoteversions) is None:
796 if obsolete.commonversion(remoteversions) is None:
797 return
797 return
798 pushop.stepsdone.add('obsmarkers')
798 pushop.stepsdone.add('obsmarkers')
799 if pushop.outobsmarkers:
799 if pushop.outobsmarkers:
800 markers = sorted(pushop.outobsmarkers)
800 markers = sorted(pushop.outobsmarkers)
801 buildobsmarkerspart(bundler, markers)
801 buildobsmarkerspart(bundler, markers)
802
802
803 @b2partsgenerator('bookmarks')
803 @b2partsgenerator('bookmarks')
804 def _pushb2bookmarks(pushop, bundler):
804 def _pushb2bookmarks(pushop, bundler):
805 """handle bookmark push through bundle2"""
805 """handle bookmark push through bundle2"""
806 if 'bookmarks' in pushop.stepsdone:
806 if 'bookmarks' in pushop.stepsdone:
807 return
807 return
808 b2caps = bundle2.bundle2caps(pushop.remote)
808 b2caps = bundle2.bundle2caps(pushop.remote)
809 if 'pushkey' not in b2caps:
809 if 'pushkey' not in b2caps:
810 return
810 return
811 pushop.stepsdone.add('bookmarks')
811 pushop.stepsdone.add('bookmarks')
812 part2book = []
812 part2book = []
813 enc = pushkey.encode
813 enc = pushkey.encode
814
814
815 def handlefailure(pushop, exc):
815 def handlefailure(pushop, exc):
816 targetid = int(exc.partid)
816 targetid = int(exc.partid)
817 for partid, book, action in part2book:
817 for partid, book, action in part2book:
818 if partid == targetid:
818 if partid == targetid:
819 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
819 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
820 # we should not be called for part we did not generated
820 # we should not be called for part we did not generated
821 assert False
821 assert False
822
822
823 for book, old, new in pushop.outbookmarks:
823 for book, old, new in pushop.outbookmarks:
824 part = bundler.newpart('pushkey')
824 part = bundler.newpart('pushkey')
825 part.addparam('namespace', enc('bookmarks'))
825 part.addparam('namespace', enc('bookmarks'))
826 part.addparam('key', enc(book))
826 part.addparam('key', enc(book))
827 part.addparam('old', enc(old))
827 part.addparam('old', enc(old))
828 part.addparam('new', enc(new))
828 part.addparam('new', enc(new))
829 action = 'update'
829 action = 'update'
830 if not old:
830 if not old:
831 action = 'export'
831 action = 'export'
832 elif not new:
832 elif not new:
833 action = 'delete'
833 action = 'delete'
834 part2book.append((part.id, book, action))
834 part2book.append((part.id, book, action))
835 pushop.pkfailcb[part.id] = handlefailure
835 pushop.pkfailcb[part.id] = handlefailure
836
836
837 def handlereply(op):
837 def handlereply(op):
838 ui = pushop.ui
838 ui = pushop.ui
839 for partid, book, action in part2book:
839 for partid, book, action in part2book:
840 partrep = op.records.getreplies(partid)
840 partrep = op.records.getreplies(partid)
841 results = partrep['pushkey']
841 results = partrep['pushkey']
842 assert len(results) <= 1
842 assert len(results) <= 1
843 if not results:
843 if not results:
844 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
844 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
845 else:
845 else:
846 ret = int(results[0]['return'])
846 ret = int(results[0]['return'])
847 if ret:
847 if ret:
848 ui.status(bookmsgmap[action][0] % book)
848 ui.status(bookmsgmap[action][0] % book)
849 else:
849 else:
850 ui.warn(bookmsgmap[action][1] % book)
850 ui.warn(bookmsgmap[action][1] % book)
851 if pushop.bkresult is not None:
851 if pushop.bkresult is not None:
852 pushop.bkresult = 1
852 pushop.bkresult = 1
853 return handlereply
853 return handlereply
854
854
855
855
856 def _pushbundle2(pushop):
856 def _pushbundle2(pushop):
857 """push data to the remote using bundle2
857 """push data to the remote using bundle2
858
858
859 The only currently supported type of data is changegroup but this will
859 The only currently supported type of data is changegroup but this will
860 evolve in the future."""
860 evolve in the future."""
861 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
861 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
862 pushback = (pushop.trmanager
862 pushback = (pushop.trmanager
863 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
863 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
864
864
865 # create reply capability
865 # create reply capability
866 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
866 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
867 allowpushback=pushback))
867 allowpushback=pushback))
868 bundler.newpart('replycaps', data=capsblob)
868 bundler.newpart('replycaps', data=capsblob)
869 replyhandlers = []
869 replyhandlers = []
870 for partgenname in b2partsgenorder:
870 for partgenname in b2partsgenorder:
871 partgen = b2partsgenmapping[partgenname]
871 partgen = b2partsgenmapping[partgenname]
872 ret = partgen(pushop, bundler)
872 ret = partgen(pushop, bundler)
873 if callable(ret):
873 if callable(ret):
874 replyhandlers.append(ret)
874 replyhandlers.append(ret)
875 # do not push if nothing to push
875 # do not push if nothing to push
876 if bundler.nbparts <= 1:
876 if bundler.nbparts <= 1:
877 return
877 return
878 stream = util.chunkbuffer(bundler.getchunks())
878 stream = util.chunkbuffer(bundler.getchunks())
879 try:
879 try:
880 try:
880 try:
881 reply = pushop.remote.unbundle(
881 reply = pushop.remote.unbundle(
882 stream, ['force'], pushop.remote.url())
882 stream, ['force'], pushop.remote.url())
883 except error.BundleValueError as exc:
883 except error.BundleValueError as exc:
884 raise error.Abort(_('missing support for %s') % exc)
884 raise error.Abort(_('missing support for %s') % exc)
885 try:
885 try:
886 trgetter = None
886 trgetter = None
887 if pushback:
887 if pushback:
888 trgetter = pushop.trmanager.transaction
888 trgetter = pushop.trmanager.transaction
889 op = bundle2.processbundle(pushop.repo, reply, trgetter)
889 op = bundle2.processbundle(pushop.repo, reply, trgetter)
890 except error.BundleValueError as exc:
890 except error.BundleValueError as exc:
891 raise error.Abort(_('missing support for %s') % exc)
891 raise error.Abort(_('missing support for %s') % exc)
892 except bundle2.AbortFromPart as exc:
892 except bundle2.AbortFromPart as exc:
893 pushop.ui.status(_('remote: %s\n') % exc)
893 pushop.ui.status(_('remote: %s\n') % exc)
894 raise error.Abort(_('push failed on remote'), hint=exc.hint)
894 raise error.Abort(_('push failed on remote'), hint=exc.hint)
895 except error.PushkeyFailed as exc:
895 except error.PushkeyFailed as exc:
896 partid = int(exc.partid)
896 partid = int(exc.partid)
897 if partid not in pushop.pkfailcb:
897 if partid not in pushop.pkfailcb:
898 raise
898 raise
899 pushop.pkfailcb[partid](pushop, exc)
899 pushop.pkfailcb[partid](pushop, exc)
900 for rephand in replyhandlers:
900 for rephand in replyhandlers:
901 rephand(op)
901 rephand(op)
902
902
903 def _pushchangeset(pushop):
903 def _pushchangeset(pushop):
904 """Make the actual push of changeset bundle to remote repo"""
904 """Make the actual push of changeset bundle to remote repo"""
905 if 'changesets' in pushop.stepsdone:
905 if 'changesets' in pushop.stepsdone:
906 return
906 return
907 pushop.stepsdone.add('changesets')
907 pushop.stepsdone.add('changesets')
908 if not _pushcheckoutgoing(pushop):
908 if not _pushcheckoutgoing(pushop):
909 return
909 return
910 pushop.repo.prepushoutgoinghooks(pushop)
910 pushop.repo.prepushoutgoinghooks(pushop)
911 outgoing = pushop.outgoing
911 outgoing = pushop.outgoing
912 unbundle = pushop.remote.capable('unbundle')
912 unbundle = pushop.remote.capable('unbundle')
913 # TODO: get bundlecaps from remote
913 # TODO: get bundlecaps from remote
914 bundlecaps = None
914 bundlecaps = None
915 # create a changegroup from local
915 # create a changegroup from local
916 if pushop.revs is None and not (outgoing.excluded
916 if pushop.revs is None and not (outgoing.excluded
917 or pushop.repo.changelog.filteredrevs):
917 or pushop.repo.changelog.filteredrevs):
918 # push everything,
918 # push everything,
919 # use the fast path, no race possible on push
919 # use the fast path, no race possible on push
920 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
920 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
921 cg = changegroup.getsubset(pushop.repo,
921 cg = changegroup.getsubset(pushop.repo,
922 outgoing,
922 outgoing,
923 bundler,
923 bundler,
924 'push',
924 'push',
925 fastpath=True)
925 fastpath=True)
926 else:
926 else:
927 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
927 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
928 bundlecaps)
928 bundlecaps)
929
929
930 # apply changegroup to remote
930 # apply changegroup to remote
931 if unbundle:
931 if unbundle:
932 # local repo finds heads on server, finds out what
932 # local repo finds heads on server, finds out what
933 # revs it must push. once revs transferred, if server
933 # revs it must push. once revs transferred, if server
934 # finds it has different heads (someone else won
934 # finds it has different heads (someone else won
935 # commit/push race), server aborts.
935 # commit/push race), server aborts.
936 if pushop.force:
936 if pushop.force:
937 remoteheads = ['force']
937 remoteheads = ['force']
938 else:
938 else:
939 remoteheads = pushop.remoteheads
939 remoteheads = pushop.remoteheads
940 # ssh: return remote's addchangegroup()
940 # ssh: return remote's addchangegroup()
941 # http: return remote's addchangegroup() or 0 for error
941 # http: return remote's addchangegroup() or 0 for error
942 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
942 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
943 pushop.repo.url())
943 pushop.repo.url())
944 else:
944 else:
945 # we return an integer indicating remote head count
945 # we return an integer indicating remote head count
946 # change
946 # change
947 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
947 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
948 pushop.repo.url())
948 pushop.repo.url())
949
949
950 def _pushsyncphase(pushop):
950 def _pushsyncphase(pushop):
951 """synchronise phase information locally and remotely"""
951 """synchronise phase information locally and remotely"""
952 cheads = pushop.commonheads
952 cheads = pushop.commonheads
953 # even when we don't push, exchanging phase data is useful
953 # even when we don't push, exchanging phase data is useful
954 remotephases = pushop.remote.listkeys('phases')
954 remotephases = pushop.remote.listkeys('phases')
955 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
955 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
956 and remotephases # server supports phases
956 and remotephases # server supports phases
957 and pushop.cgresult is None # nothing was pushed
957 and pushop.cgresult is None # nothing was pushed
958 and remotephases.get('publishing', False)):
958 and remotephases.get('publishing', False)):
959 # When:
959 # When:
960 # - this is a subrepo push
960 # - this is a subrepo push
961 # - and remote support phase
961 # - and remote support phase
962 # - and no changeset was pushed
962 # - and no changeset was pushed
963 # - and remote is publishing
963 # - and remote is publishing
964 # We may be in issue 3871 case!
964 # We may be in issue 3871 case!
965 # We drop the possible phase synchronisation done by
965 # We drop the possible phase synchronisation done by
966 # courtesy to publish changesets possibly locally draft
966 # courtesy to publish changesets possibly locally draft
967 # on the remote.
967 # on the remote.
968 remotephases = {'publishing': 'True'}
968 remotephases = {'publishing': 'True'}
969 if not remotephases: # old server or public only reply from non-publishing
969 if not remotephases: # old server or public only reply from non-publishing
970 _localphasemove(pushop, cheads)
970 _localphasemove(pushop, cheads)
971 # don't push any phase data as there is nothing to push
971 # don't push any phase data as there is nothing to push
972 else:
972 else:
973 ana = phases.analyzeremotephases(pushop.repo, cheads,
973 ana = phases.analyzeremotephases(pushop.repo, cheads,
974 remotephases)
974 remotephases)
975 pheads, droots = ana
975 pheads, droots = ana
976 ### Apply remote phase on local
976 ### Apply remote phase on local
977 if remotephases.get('publishing', False):
977 if remotephases.get('publishing', False):
978 _localphasemove(pushop, cheads)
978 _localphasemove(pushop, cheads)
979 else: # publish = False
979 else: # publish = False
980 _localphasemove(pushop, pheads)
980 _localphasemove(pushop, pheads)
981 _localphasemove(pushop, cheads, phases.draft)
981 _localphasemove(pushop, cheads, phases.draft)
982 ### Apply local phase on remote
982 ### Apply local phase on remote
983
983
984 if pushop.cgresult:
984 if pushop.cgresult:
985 if 'phases' in pushop.stepsdone:
985 if 'phases' in pushop.stepsdone:
986 # phases already pushed though bundle2
986 # phases already pushed though bundle2
987 return
987 return
988 outdated = pushop.outdatedphases
988 outdated = pushop.outdatedphases
989 else:
989 else:
990 outdated = pushop.fallbackoutdatedphases
990 outdated = pushop.fallbackoutdatedphases
991
991
992 pushop.stepsdone.add('phases')
992 pushop.stepsdone.add('phases')
993
993
994 # filter heads already turned public by the push
994 # filter heads already turned public by the push
995 outdated = [c for c in outdated if c.node() not in pheads]
995 outdated = [c for c in outdated if c.node() not in pheads]
996 # fallback to independent pushkey command
996 # fallback to independent pushkey command
997 for newremotehead in outdated:
997 for newremotehead in outdated:
998 r = pushop.remote.pushkey('phases',
998 r = pushop.remote.pushkey('phases',
999 newremotehead.hex(),
999 newremotehead.hex(),
1000 str(phases.draft),
1000 str(phases.draft),
1001 str(phases.public))
1001 str(phases.public))
1002 if not r:
1002 if not r:
1003 pushop.ui.warn(_('updating %s to public failed!\n')
1003 pushop.ui.warn(_('updating %s to public failed!\n')
1004 % newremotehead)
1004 % newremotehead)
1005
1005
1006 def _localphasemove(pushop, nodes, phase=phases.public):
1006 def _localphasemove(pushop, nodes, phase=phases.public):
1007 """move <nodes> to <phase> in the local source repo"""
1007 """move <nodes> to <phase> in the local source repo"""
1008 if pushop.trmanager:
1008 if pushop.trmanager:
1009 phases.advanceboundary(pushop.repo,
1009 phases.advanceboundary(pushop.repo,
1010 pushop.trmanager.transaction(),
1010 pushop.trmanager.transaction(),
1011 phase,
1011 phase,
1012 nodes)
1012 nodes)
1013 else:
1013 else:
1014 # repo is not locked, do not change any phases!
1014 # repo is not locked, do not change any phases!
1015 # Informs the user that phases should have been moved when
1015 # Informs the user that phases should have been moved when
1016 # applicable.
1016 # applicable.
1017 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1017 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1018 phasestr = phases.phasenames[phase]
1018 phasestr = phases.phasenames[phase]
1019 if actualmoves:
1019 if actualmoves:
1020 pushop.ui.status(_('cannot lock source repo, skipping '
1020 pushop.ui.status(_('cannot lock source repo, skipping '
1021 'local %s phase update\n') % phasestr)
1021 'local %s phase update\n') % phasestr)
1022
1022
1023 def _pushobsolete(pushop):
1023 def _pushobsolete(pushop):
1024 """utility function to push obsolete markers to a remote"""
1024 """utility function to push obsolete markers to a remote"""
1025 if 'obsmarkers' in pushop.stepsdone:
1025 if 'obsmarkers' in pushop.stepsdone:
1026 return
1026 return
1027 repo = pushop.repo
1027 repo = pushop.repo
1028 remote = pushop.remote
1028 remote = pushop.remote
1029 pushop.stepsdone.add('obsmarkers')
1029 pushop.stepsdone.add('obsmarkers')
1030 if pushop.outobsmarkers:
1030 if pushop.outobsmarkers:
1031 pushop.ui.debug('try to push obsolete markers to remote\n')
1031 pushop.ui.debug('try to push obsolete markers to remote\n')
1032 rslts = []
1032 rslts = []
1033 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1033 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1034 for key in sorted(remotedata, reverse=True):
1034 for key in sorted(remotedata, reverse=True):
1035 # reverse sort to ensure we end with dump0
1035 # reverse sort to ensure we end with dump0
1036 data = remotedata[key]
1036 data = remotedata[key]
1037 rslts.append(remote.pushkey('obsolete', key, '', data))
1037 rslts.append(remote.pushkey('obsolete', key, '', data))
1038 if [r for r in rslts if not r]:
1038 if [r for r in rslts if not r]:
1039 msg = _('failed to push some obsolete markers!\n')
1039 msg = _('failed to push some obsolete markers!\n')
1040 repo.ui.warn(msg)
1040 repo.ui.warn(msg)
1041
1041
1042 def _pushbookmark(pushop):
1042 def _pushbookmark(pushop):
1043 """Update bookmark position on remote"""
1043 """Update bookmark position on remote"""
1044 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1044 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1045 return
1045 return
1046 pushop.stepsdone.add('bookmarks')
1046 pushop.stepsdone.add('bookmarks')
1047 ui = pushop.ui
1047 ui = pushop.ui
1048 remote = pushop.remote
1048 remote = pushop.remote
1049
1049
1050 for b, old, new in pushop.outbookmarks:
1050 for b, old, new in pushop.outbookmarks:
1051 action = 'update'
1051 action = 'update'
1052 if not old:
1052 if not old:
1053 action = 'export'
1053 action = 'export'
1054 elif not new:
1054 elif not new:
1055 action = 'delete'
1055 action = 'delete'
1056 if remote.pushkey('bookmarks', b, old, new):
1056 if remote.pushkey('bookmarks', b, old, new):
1057 ui.status(bookmsgmap[action][0] % b)
1057 ui.status(bookmsgmap[action][0] % b)
1058 else:
1058 else:
1059 ui.warn(bookmsgmap[action][1] % b)
1059 ui.warn(bookmsgmap[action][1] % b)
1060 # discovery can have set the value form invalid entry
1060 # discovery can have set the value form invalid entry
1061 if pushop.bkresult is not None:
1061 if pushop.bkresult is not None:
1062 pushop.bkresult = 1
1062 pushop.bkresult = 1
1063
1063
1064 class pulloperation(object):
1064 class pulloperation(object):
1065 """A object that represent a single pull operation
1065 """A object that represent a single pull operation
1066
1066
1067 It purpose is to carry pull related state and very common operation.
1067 It purpose is to carry pull related state and very common operation.
1068
1068
1069 A new should be created at the beginning of each pull and discarded
1069 A new should be created at the beginning of each pull and discarded
1070 afterward.
1070 afterward.
1071 """
1071 """
1072
1072
1073 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1073 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1074 remotebookmarks=None, streamclonerequested=None):
1074 remotebookmarks=None, streamclonerequested=None):
1075 # repo we pull into
1075 # repo we pull into
1076 self.repo = repo
1076 self.repo = repo
1077 # repo we pull from
1077 # repo we pull from
1078 self.remote = remote
1078 self.remote = remote
1079 # revision we try to pull (None is "all")
1079 # revision we try to pull (None is "all")
1080 self.heads = heads
1080 self.heads = heads
1081 # bookmark pulled explicitly
1081 # bookmark pulled explicitly
1082 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1082 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1083 for bookmark in bookmarks]
1083 for bookmark in bookmarks]
1084 # do we force pull?
1084 # do we force pull?
1085 self.force = force
1085 self.force = force
1086 # whether a streaming clone was requested
1086 # whether a streaming clone was requested
1087 self.streamclonerequested = streamclonerequested
1087 self.streamclonerequested = streamclonerequested
1088 # transaction manager
1088 # transaction manager
1089 self.trmanager = None
1089 self.trmanager = None
1090 # set of common changeset between local and remote before pull
1090 # set of common changeset between local and remote before pull
1091 self.common = None
1091 self.common = None
1092 # set of pulled head
1092 # set of pulled head
1093 self.rheads = None
1093 self.rheads = None
1094 # list of missing changeset to fetch remotely
1094 # list of missing changeset to fetch remotely
1095 self.fetch = None
1095 self.fetch = None
1096 # remote bookmarks data
1096 # remote bookmarks data
1097 self.remotebookmarks = remotebookmarks
1097 self.remotebookmarks = remotebookmarks
1098 # result of changegroup pulling (used as return code by pull)
1098 # result of changegroup pulling (used as return code by pull)
1099 self.cgresult = None
1099 self.cgresult = None
1100 # list of step already done
1100 # list of step already done
1101 self.stepsdone = set()
1101 self.stepsdone = set()
1102 # Whether we attempted a clone from pre-generated bundles.
1102 # Whether we attempted a clone from pre-generated bundles.
1103 self.clonebundleattempted = False
1103 self.clonebundleattempted = False
1104
1104
1105 @util.propertycache
1105 @util.propertycache
1106 def pulledsubset(self):
1106 def pulledsubset(self):
1107 """heads of the set of changeset target by the pull"""
1107 """heads of the set of changeset target by the pull"""
1108 # compute target subset
1108 # compute target subset
1109 if self.heads is None:
1109 if self.heads is None:
1110 # We pulled every thing possible
1110 # We pulled every thing possible
1111 # sync on everything common
1111 # sync on everything common
1112 c = set(self.common)
1112 c = set(self.common)
1113 ret = list(self.common)
1113 ret = list(self.common)
1114 for n in self.rheads:
1114 for n in self.rheads:
1115 if n not in c:
1115 if n not in c:
1116 ret.append(n)
1116 ret.append(n)
1117 return ret
1117 return ret
1118 else:
1118 else:
1119 # We pulled a specific subset
1119 # We pulled a specific subset
1120 # sync on this subset
1120 # sync on this subset
1121 return self.heads
1121 return self.heads
1122
1122
1123 @util.propertycache
1123 @util.propertycache
1124 def canusebundle2(self):
1124 def canusebundle2(self):
1125 return not _forcebundle1(self)
1125 return not _forcebundle1(self)
1126
1126
1127 @util.propertycache
1127 @util.propertycache
1128 def remotebundle2caps(self):
1128 def remotebundle2caps(self):
1129 return bundle2.bundle2caps(self.remote)
1129 return bundle2.bundle2caps(self.remote)
1130
1130
1131 def gettransaction(self):
1131 def gettransaction(self):
1132 # deprecated; talk to trmanager directly
1132 # deprecated; talk to trmanager directly
1133 return self.trmanager.transaction()
1133 return self.trmanager.transaction()
1134
1134
1135 class transactionmanager(object):
1135 class transactionmanager(object):
1136 """An object to manage the life cycle of a transaction
1136 """An object to manage the life cycle of a transaction
1137
1137
1138 It creates the transaction on demand and calls the appropriate hooks when
1138 It creates the transaction on demand and calls the appropriate hooks when
1139 closing the transaction."""
1139 closing the transaction."""
1140 def __init__(self, repo, source, url):
1140 def __init__(self, repo, source, url):
1141 self.repo = repo
1141 self.repo = repo
1142 self.source = source
1142 self.source = source
1143 self.url = url
1143 self.url = url
1144 self._tr = None
1144 self._tr = None
1145
1145
1146 def transaction(self):
1146 def transaction(self):
1147 """Return an open transaction object, constructing if necessary"""
1147 """Return an open transaction object, constructing if necessary"""
1148 if not self._tr:
1148 if not self._tr:
1149 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1149 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1150 self._tr = self.repo.transaction(trname)
1150 self._tr = self.repo.transaction(trname)
1151 self._tr.hookargs['source'] = self.source
1151 self._tr.hookargs['source'] = self.source
1152 self._tr.hookargs['url'] = self.url
1152 self._tr.hookargs['url'] = self.url
1153 return self._tr
1153 return self._tr
1154
1154
1155 def close(self):
1155 def close(self):
1156 """close transaction if created"""
1156 """close transaction if created"""
1157 if self._tr is not None:
1157 if self._tr is not None:
1158 self._tr.close()
1158 self._tr.close()
1159
1159
1160 def release(self):
1160 def release(self):
1161 """release transaction if created"""
1161 """release transaction if created"""
1162 if self._tr is not None:
1162 if self._tr is not None:
1163 self._tr.release()
1163 self._tr.release()
1164
1164
1165 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1165 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1166 streamclonerequested=None):
1166 streamclonerequested=None):
1167 """Fetch repository data from a remote.
1167 """Fetch repository data from a remote.
1168
1168
1169 This is the main function used to retrieve data from a remote repository.
1169 This is the main function used to retrieve data from a remote repository.
1170
1170
1171 ``repo`` is the local repository to clone into.
1171 ``repo`` is the local repository to clone into.
1172 ``remote`` is a peer instance.
1172 ``remote`` is a peer instance.
1173 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1173 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1174 default) means to pull everything from the remote.
1174 default) means to pull everything from the remote.
1175 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1175 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1176 default, all remote bookmarks are pulled.
1176 default, all remote bookmarks are pulled.
1177 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1177 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1178 initialization.
1178 initialization.
1179 ``streamclonerequested`` is a boolean indicating whether a "streaming
1179 ``streamclonerequested`` is a boolean indicating whether a "streaming
1180 clone" is requested. A "streaming clone" is essentially a raw file copy
1180 clone" is requested. A "streaming clone" is essentially a raw file copy
1181 of revlogs from the server. This only works when the local repository is
1181 of revlogs from the server. This only works when the local repository is
1182 empty. The default value of ``None`` means to respect the server
1182 empty. The default value of ``None`` means to respect the server
1183 configuration for preferring stream clones.
1183 configuration for preferring stream clones.
1184
1184
1185 Returns the ``pulloperation`` created for this pull.
1185 Returns the ``pulloperation`` created for this pull.
1186 """
1186 """
1187 if opargs is None:
1187 if opargs is None:
1188 opargs = {}
1188 opargs = {}
1189 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1189 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1190 streamclonerequested=streamclonerequested, **opargs)
1190 streamclonerequested=streamclonerequested, **opargs)
1191 if pullop.remote.local():
1191 if pullop.remote.local():
1192 missing = set(pullop.remote.requirements) - pullop.repo.supported
1192 missing = set(pullop.remote.requirements) - pullop.repo.supported
1193 if missing:
1193 if missing:
1194 msg = _("required features are not"
1194 msg = _("required features are not"
1195 " supported in the destination:"
1195 " supported in the destination:"
1196 " %s") % (', '.join(sorted(missing)))
1196 " %s") % (', '.join(sorted(missing)))
1197 raise error.Abort(msg)
1197 raise error.Abort(msg)
1198
1198
1199 wlock = lock = None
1199 wlock = lock = None
1200 try:
1200 try:
1201 wlock = pullop.repo.wlock()
1201 wlock = pullop.repo.wlock()
1202 lock = pullop.repo.lock()
1202 lock = pullop.repo.lock()
1203 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1203 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1204 streamclone.maybeperformlegacystreamclone(pullop)
1204 streamclone.maybeperformlegacystreamclone(pullop)
1205 # This should ideally be in _pullbundle2(). However, it needs to run
1205 # This should ideally be in _pullbundle2(). However, it needs to run
1206 # before discovery to avoid extra work.
1206 # before discovery to avoid extra work.
1207 _maybeapplyclonebundle(pullop)
1207 _maybeapplyclonebundle(pullop)
1208 _pulldiscovery(pullop)
1208 _pulldiscovery(pullop)
1209 if pullop.canusebundle2:
1209 if pullop.canusebundle2:
1210 _pullbundle2(pullop)
1210 _pullbundle2(pullop)
1211 _pullchangeset(pullop)
1211 _pullchangeset(pullop)
1212 _pullphase(pullop)
1212 _pullphase(pullop)
1213 _pullbookmarks(pullop)
1213 _pullbookmarks(pullop)
1214 _pullobsolete(pullop)
1214 _pullobsolete(pullop)
1215 pullop.trmanager.close()
1215 pullop.trmanager.close()
1216 finally:
1216 finally:
1217 lockmod.release(pullop.trmanager, lock, wlock)
1217 lockmod.release(pullop.trmanager, lock, wlock)
1218
1218
1219 return pullop
1219 return pullop
1220
1220
1221 # list of steps to perform discovery before pull
1221 # list of steps to perform discovery before pull
1222 pulldiscoveryorder = []
1222 pulldiscoveryorder = []
1223
1223
1224 # Mapping between step name and function
1224 # Mapping between step name and function
1225 #
1225 #
1226 # This exists to help extensions wrap steps if necessary
1226 # This exists to help extensions wrap steps if necessary
1227 pulldiscoverymapping = {}
1227 pulldiscoverymapping = {}
1228
1228
1229 def pulldiscovery(stepname):
1229 def pulldiscovery(stepname):
1230 """decorator for function performing discovery before pull
1230 """decorator for function performing discovery before pull
1231
1231
1232 The function is added to the step -> function mapping and appended to the
1232 The function is added to the step -> function mapping and appended to the
1233 list of steps. Beware that decorated function will be added in order (this
1233 list of steps. Beware that decorated function will be added in order (this
1234 may matter).
1234 may matter).
1235
1235
1236 You can only use this decorator for a new step, if you want to wrap a step
1236 You can only use this decorator for a new step, if you want to wrap a step
1237 from an extension, change the pulldiscovery dictionary directly."""
1237 from an extension, change the pulldiscovery dictionary directly."""
1238 def dec(func):
1238 def dec(func):
1239 assert stepname not in pulldiscoverymapping
1239 assert stepname not in pulldiscoverymapping
1240 pulldiscoverymapping[stepname] = func
1240 pulldiscoverymapping[stepname] = func
1241 pulldiscoveryorder.append(stepname)
1241 pulldiscoveryorder.append(stepname)
1242 return func
1242 return func
1243 return dec
1243 return dec
1244
1244
1245 def _pulldiscovery(pullop):
1245 def _pulldiscovery(pullop):
1246 """Run all discovery steps"""
1246 """Run all discovery steps"""
1247 for stepname in pulldiscoveryorder:
1247 for stepname in pulldiscoveryorder:
1248 step = pulldiscoverymapping[stepname]
1248 step = pulldiscoverymapping[stepname]
1249 step(pullop)
1249 step(pullop)
1250
1250
1251 @pulldiscovery('b1:bookmarks')
1251 @pulldiscovery('b1:bookmarks')
1252 def _pullbookmarkbundle1(pullop):
1252 def _pullbookmarkbundle1(pullop):
1253 """fetch bookmark data in bundle1 case
1253 """fetch bookmark data in bundle1 case
1254
1254
1255 If not using bundle2, we have to fetch bookmarks before changeset
1255 If not using bundle2, we have to fetch bookmarks before changeset
1256 discovery to reduce the chance and impact of race conditions."""
1256 discovery to reduce the chance and impact of race conditions."""
1257 if pullop.remotebookmarks is not None:
1257 if pullop.remotebookmarks is not None:
1258 return
1258 return
1259 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1259 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1260 # all known bundle2 servers now support listkeys, but lets be nice with
1260 # all known bundle2 servers now support listkeys, but lets be nice with
1261 # new implementation.
1261 # new implementation.
1262 return
1262 return
1263 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1263 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1264
1264
1265
1265
1266 @pulldiscovery('changegroup')
1266 @pulldiscovery('changegroup')
1267 def _pulldiscoverychangegroup(pullop):
1267 def _pulldiscoverychangegroup(pullop):
1268 """discovery phase for the pull
1268 """discovery phase for the pull
1269
1269
1270 Current handle changeset discovery only, will change handle all discovery
1270 Current handle changeset discovery only, will change handle all discovery
1271 at some point."""
1271 at some point."""
1272 tmp = discovery.findcommonincoming(pullop.repo,
1272 tmp = discovery.findcommonincoming(pullop.repo,
1273 pullop.remote,
1273 pullop.remote,
1274 heads=pullop.heads,
1274 heads=pullop.heads,
1275 force=pullop.force)
1275 force=pullop.force)
1276 common, fetch, rheads = tmp
1276 common, fetch, rheads = tmp
1277 nm = pullop.repo.unfiltered().changelog.nodemap
1277 nm = pullop.repo.unfiltered().changelog.nodemap
1278 if fetch and rheads:
1278 if fetch and rheads:
1279 # If a remote heads in filtered locally, lets drop it from the unknown
1279 # If a remote heads in filtered locally, lets drop it from the unknown
1280 # remote heads and put in back in common.
1280 # remote heads and put in back in common.
1281 #
1281 #
1282 # This is a hackish solution to catch most of "common but locally
1282 # This is a hackish solution to catch most of "common but locally
1283 # hidden situation". We do not performs discovery on unfiltered
1283 # hidden situation". We do not performs discovery on unfiltered
1284 # repository because it end up doing a pathological amount of round
1284 # repository because it end up doing a pathological amount of round
1285 # trip for w huge amount of changeset we do not care about.
1285 # trip for w huge amount of changeset we do not care about.
1286 #
1286 #
1287 # If a set of such "common but filtered" changeset exist on the server
1287 # If a set of such "common but filtered" changeset exist on the server
1288 # but are not including a remote heads, we'll not be able to detect it,
1288 # but are not including a remote heads, we'll not be able to detect it,
1289 scommon = set(common)
1289 scommon = set(common)
1290 filteredrheads = []
1290 filteredrheads = []
1291 for n in rheads:
1291 for n in rheads:
1292 if n in nm:
1292 if n in nm:
1293 if n not in scommon:
1293 if n not in scommon:
1294 common.append(n)
1294 common.append(n)
1295 else:
1295 else:
1296 filteredrheads.append(n)
1296 filteredrheads.append(n)
1297 if not filteredrheads:
1297 if not filteredrheads:
1298 fetch = []
1298 fetch = []
1299 rheads = filteredrheads
1299 rheads = filteredrheads
1300 pullop.common = common
1300 pullop.common = common
1301 pullop.fetch = fetch
1301 pullop.fetch = fetch
1302 pullop.rheads = rheads
1302 pullop.rheads = rheads
1303
1303
1304 def _pullbundle2(pullop):
1304 def _pullbundle2(pullop):
1305 """pull data using bundle2
1305 """pull data using bundle2
1306
1306
1307 For now, the only supported data are changegroup."""
1307 For now, the only supported data are changegroup."""
1308 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1308 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1309
1309
1310 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1310 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1311
1311
1312 # pulling changegroup
1312 # pulling changegroup
1313 pullop.stepsdone.add('changegroup')
1313 pullop.stepsdone.add('changegroup')
1314
1314
1315 kwargs['common'] = pullop.common
1315 kwargs['common'] = pullop.common
1316 kwargs['heads'] = pullop.heads or pullop.rheads
1316 kwargs['heads'] = pullop.heads or pullop.rheads
1317 kwargs['cg'] = pullop.fetch
1317 kwargs['cg'] = pullop.fetch
1318 if 'listkeys' in pullop.remotebundle2caps:
1318 if 'listkeys' in pullop.remotebundle2caps:
1319 kwargs['listkeys'] = ['phases']
1319 kwargs['listkeys'] = ['phases']
1320 if pullop.remotebookmarks is None:
1320 if pullop.remotebookmarks is None:
1321 # make sure to always includes bookmark data when migrating
1321 # make sure to always includes bookmark data when migrating
1322 # `hg incoming --bundle` to using this function.
1322 # `hg incoming --bundle` to using this function.
1323 kwargs['listkeys'].append('bookmarks')
1323 kwargs['listkeys'].append('bookmarks')
1324
1324
1325 # If this is a full pull / clone and the server supports the clone bundles
1325 # If this is a full pull / clone and the server supports the clone bundles
1326 # feature, tell the server whether we attempted a clone bundle. The
1326 # feature, tell the server whether we attempted a clone bundle. The
1327 # presence of this flag indicates the client supports clone bundles. This
1327 # presence of this flag indicates the client supports clone bundles. This
1328 # will enable the server to treat clients that support clone bundles
1328 # will enable the server to treat clients that support clone bundles
1329 # differently from those that don't.
1329 # differently from those that don't.
1330 if (pullop.remote.capable('clonebundles')
1330 if (pullop.remote.capable('clonebundles')
1331 and pullop.heads is None and list(pullop.common) == [nullid]):
1331 and pullop.heads is None and list(pullop.common) == [nullid]):
1332 kwargs['cbattempted'] = pullop.clonebundleattempted
1332 kwargs['cbattempted'] = pullop.clonebundleattempted
1333
1333
1334 if streaming:
1334 if streaming:
1335 pullop.repo.ui.status(_('streaming all changes\n'))
1335 pullop.repo.ui.status(_('streaming all changes\n'))
1336 elif not pullop.fetch:
1336 elif not pullop.fetch:
1337 pullop.repo.ui.status(_("no changes found\n"))
1337 pullop.repo.ui.status(_("no changes found\n"))
1338 pullop.cgresult = 0
1338 pullop.cgresult = 0
1339 else:
1339 else:
1340 if pullop.heads is None and list(pullop.common) == [nullid]:
1340 if pullop.heads is None and list(pullop.common) == [nullid]:
1341 pullop.repo.ui.status(_("requesting all changes\n"))
1341 pullop.repo.ui.status(_("requesting all changes\n"))
1342 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1342 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1343 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1343 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1344 if obsolete.commonversion(remoteversions) is not None:
1344 if obsolete.commonversion(remoteversions) is not None:
1345 kwargs['obsmarkers'] = True
1345 kwargs['obsmarkers'] = True
1346 pullop.stepsdone.add('obsmarkers')
1346 pullop.stepsdone.add('obsmarkers')
1347 _pullbundle2extraprepare(pullop, kwargs)
1347 _pullbundle2extraprepare(pullop, kwargs)
1348 bundle = pullop.remote.getbundle('pull', **kwargs)
1348 bundle = pullop.remote.getbundle('pull', **kwargs)
1349 try:
1349 try:
1350 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1350 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1351 except error.BundleValueError as exc:
1351 except error.BundleValueError as exc:
1352 raise error.Abort(_('missing support for %s') % exc)
1352 raise error.Abort(_('missing support for %s') % exc)
1353
1353
1354 if pullop.fetch:
1354 if pullop.fetch:
1355 results = [cg['return'] for cg in op.records['changegroup']]
1355 results = [cg['return'] for cg in op.records['changegroup']]
1356 pullop.cgresult = changegroup.combineresults(results)
1356 pullop.cgresult = changegroup.combineresults(results)
1357
1357
1358 # processing phases change
1358 # processing phases change
1359 for namespace, value in op.records['listkeys']:
1359 for namespace, value in op.records['listkeys']:
1360 if namespace == 'phases':
1360 if namespace == 'phases':
1361 _pullapplyphases(pullop, value)
1361 _pullapplyphases(pullop, value)
1362
1362
1363 # processing bookmark update
1363 # processing bookmark update
1364 for namespace, value in op.records['listkeys']:
1364 for namespace, value in op.records['listkeys']:
1365 if namespace == 'bookmarks':
1365 if namespace == 'bookmarks':
1366 pullop.remotebookmarks = value
1366 pullop.remotebookmarks = value
1367
1367
1368 # bookmark data were either already there or pulled in the bundle
1368 # bookmark data were either already there or pulled in the bundle
1369 if pullop.remotebookmarks is not None:
1369 if pullop.remotebookmarks is not None:
1370 _pullbookmarks(pullop)
1370 _pullbookmarks(pullop)
1371
1371
1372 def _pullbundle2extraprepare(pullop, kwargs):
1372 def _pullbundle2extraprepare(pullop, kwargs):
1373 """hook function so that extensions can extend the getbundle call"""
1373 """hook function so that extensions can extend the getbundle call"""
1374 pass
1374 pass
1375
1375
1376 def _pullchangeset(pullop):
1376 def _pullchangeset(pullop):
1377 """pull changeset from unbundle into the local repo"""
1377 """pull changeset from unbundle into the local repo"""
1378 # We delay the open of the transaction as late as possible so we
1378 # We delay the open of the transaction as late as possible so we
1379 # don't open transaction for nothing or you break future useful
1379 # don't open transaction for nothing or you break future useful
1380 # rollback call
1380 # rollback call
1381 if 'changegroup' in pullop.stepsdone:
1381 if 'changegroup' in pullop.stepsdone:
1382 return
1382 return
1383 pullop.stepsdone.add('changegroup')
1383 pullop.stepsdone.add('changegroup')
1384 if not pullop.fetch:
1384 if not pullop.fetch:
1385 pullop.repo.ui.status(_("no changes found\n"))
1385 pullop.repo.ui.status(_("no changes found\n"))
1386 pullop.cgresult = 0
1386 pullop.cgresult = 0
1387 return
1387 return
1388 pullop.gettransaction()
1388 pullop.gettransaction()
1389 if pullop.heads is None and list(pullop.common) == [nullid]:
1389 if pullop.heads is None and list(pullop.common) == [nullid]:
1390 pullop.repo.ui.status(_("requesting all changes\n"))
1390 pullop.repo.ui.status(_("requesting all changes\n"))
1391 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1391 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1392 # issue1320, avoid a race if remote changed after discovery
1392 # issue1320, avoid a race if remote changed after discovery
1393 pullop.heads = pullop.rheads
1393 pullop.heads = pullop.rheads
1394
1394
1395 if pullop.remote.capable('getbundle'):
1395 if pullop.remote.capable('getbundle'):
1396 # TODO: get bundlecaps from remote
1396 # TODO: get bundlecaps from remote
1397 cg = pullop.remote.getbundle('pull', common=pullop.common,
1397 cg = pullop.remote.getbundle('pull', common=pullop.common,
1398 heads=pullop.heads or pullop.rheads)
1398 heads=pullop.heads or pullop.rheads)
1399 elif pullop.heads is None:
1399 elif pullop.heads is None:
1400 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1400 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1401 elif not pullop.remote.capable('changegroupsubset'):
1401 elif not pullop.remote.capable('changegroupsubset'):
1402 raise error.Abort(_("partial pull cannot be done because "
1402 raise error.Abort(_("partial pull cannot be done because "
1403 "other repository doesn't support "
1403 "other repository doesn't support "
1404 "changegroupsubset."))
1404 "changegroupsubset."))
1405 else:
1405 else:
1406 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1406 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1407 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1407 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1408
1408
1409 def _pullphase(pullop):
1409 def _pullphase(pullop):
1410 # Get remote phases data from remote
1410 # Get remote phases data from remote
1411 if 'phases' in pullop.stepsdone:
1411 if 'phases' in pullop.stepsdone:
1412 return
1412 return
1413 remotephases = pullop.remote.listkeys('phases')
1413 remotephases = pullop.remote.listkeys('phases')
1414 _pullapplyphases(pullop, remotephases)
1414 _pullapplyphases(pullop, remotephases)
1415
1415
1416 def _pullapplyphases(pullop, remotephases):
1416 def _pullapplyphases(pullop, remotephases):
1417 """apply phase movement from observed remote state"""
1417 """apply phase movement from observed remote state"""
1418 if 'phases' in pullop.stepsdone:
1418 if 'phases' in pullop.stepsdone:
1419 return
1419 return
1420 pullop.stepsdone.add('phases')
1420 pullop.stepsdone.add('phases')
1421 publishing = bool(remotephases.get('publishing', False))
1421 publishing = bool(remotephases.get('publishing', False))
1422 if remotephases and not publishing:
1422 if remotephases and not publishing:
1423 # remote is new and non-publishing
1423 # remote is new and non-publishing
1424 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1424 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1425 pullop.pulledsubset,
1425 pullop.pulledsubset,
1426 remotephases)
1426 remotephases)
1427 dheads = pullop.pulledsubset
1427 dheads = pullop.pulledsubset
1428 else:
1428 else:
1429 # Remote is old or publishing all common changesets
1429 # Remote is old or publishing all common changesets
1430 # should be seen as public
1430 # should be seen as public
1431 pheads = pullop.pulledsubset
1431 pheads = pullop.pulledsubset
1432 dheads = []
1432 dheads = []
1433 unfi = pullop.repo.unfiltered()
1433 unfi = pullop.repo.unfiltered()
1434 phase = unfi._phasecache.phase
1434 phase = unfi._phasecache.phase
1435 rev = unfi.changelog.nodemap.get
1435 rev = unfi.changelog.nodemap.get
1436 public = phases.public
1436 public = phases.public
1437 draft = phases.draft
1437 draft = phases.draft
1438
1438
1439 # exclude changesets already public locally and update the others
1439 # exclude changesets already public locally and update the others
1440 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1440 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1441 if pheads:
1441 if pheads:
1442 tr = pullop.gettransaction()
1442 tr = pullop.gettransaction()
1443 phases.advanceboundary(pullop.repo, tr, public, pheads)
1443 phases.advanceboundary(pullop.repo, tr, public, pheads)
1444
1444
1445 # exclude changesets already draft locally and update the others
1445 # exclude changesets already draft locally and update the others
1446 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1446 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1447 if dheads:
1447 if dheads:
1448 tr = pullop.gettransaction()
1448 tr = pullop.gettransaction()
1449 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1449 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1450
1450
1451 def _pullbookmarks(pullop):
1451 def _pullbookmarks(pullop):
1452 """process the remote bookmark information to update the local one"""
1452 """process the remote bookmark information to update the local one"""
1453 if 'bookmarks' in pullop.stepsdone:
1453 if 'bookmarks' in pullop.stepsdone:
1454 return
1454 return
1455 pullop.stepsdone.add('bookmarks')
1455 pullop.stepsdone.add('bookmarks')
1456 repo = pullop.repo
1456 repo = pullop.repo
1457 remotebookmarks = pullop.remotebookmarks
1457 remotebookmarks = pullop.remotebookmarks
1458 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1458 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1459 pullop.remote.url(),
1459 pullop.remote.url(),
1460 pullop.gettransaction,
1460 pullop.gettransaction,
1461 explicit=pullop.explicitbookmarks)
1461 explicit=pullop.explicitbookmarks)
1462
1462
1463 def _pullobsolete(pullop):
1463 def _pullobsolete(pullop):
1464 """utility function to pull obsolete markers from a remote
1464 """utility function to pull obsolete markers from a remote
1465
1465
1466 The `gettransaction` is function that return the pull transaction, creating
1466 The `gettransaction` is function that return the pull transaction, creating
1467 one if necessary. We return the transaction to inform the calling code that
1467 one if necessary. We return the transaction to inform the calling code that
1468 a new transaction have been created (when applicable).
1468 a new transaction have been created (when applicable).
1469
1469
1470 Exists mostly to allow overriding for experimentation purpose"""
1470 Exists mostly to allow overriding for experimentation purpose"""
1471 if 'obsmarkers' in pullop.stepsdone:
1471 if 'obsmarkers' in pullop.stepsdone:
1472 return
1472 return
1473 pullop.stepsdone.add('obsmarkers')
1473 pullop.stepsdone.add('obsmarkers')
1474 tr = None
1474 tr = None
1475 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1475 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1476 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1476 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1477 remoteobs = pullop.remote.listkeys('obsolete')
1477 remoteobs = pullop.remote.listkeys('obsolete')
1478 if 'dump0' in remoteobs:
1478 if 'dump0' in remoteobs:
1479 tr = pullop.gettransaction()
1479 tr = pullop.gettransaction()
1480 markers = []
1480 markers = []
1481 for key in sorted(remoteobs, reverse=True):
1481 for key in sorted(remoteobs, reverse=True):
1482 if key.startswith('dump'):
1482 if key.startswith('dump'):
1483 data = base85.b85decode(remoteobs[key])
1483 data = base85.b85decode(remoteobs[key])
1484 version, newmarks = obsolete._readmarkers(data)
1484 version, newmarks = obsolete._readmarkers(data)
1485 markers += newmarks
1485 markers += newmarks
1486 if markers:
1486 if markers:
1487 pullop.repo.obsstore.add(tr, markers)
1487 pullop.repo.obsstore.add(tr, markers)
1488 pullop.repo.invalidatevolatilesets()
1488 pullop.repo.invalidatevolatilesets()
1489 return tr
1489 return tr
1490
1490
1491 def caps20to10(repo):
1491 def caps20to10(repo):
1492 """return a set with appropriate options to use bundle20 during getbundle"""
1492 """return a set with appropriate options to use bundle20 during getbundle"""
1493 caps = set(['HG20'])
1493 caps = set(['HG20'])
1494 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1494 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1495 caps.add('bundle2=' + urlreq.quote(capsblob))
1495 caps.add('bundle2=' + urlreq.quote(capsblob))
1496 return caps
1496 return caps
1497
1497
1498 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1498 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1499 getbundle2partsorder = []
1499 getbundle2partsorder = []
1500
1500
1501 # Mapping between step name and function
1501 # Mapping between step name and function
1502 #
1502 #
1503 # This exists to help extensions wrap steps if necessary
1503 # This exists to help extensions wrap steps if necessary
1504 getbundle2partsmapping = {}
1504 getbundle2partsmapping = {}
1505
1505
1506 def getbundle2partsgenerator(stepname, idx=None):
1506 def getbundle2partsgenerator(stepname, idx=None):
1507 """decorator for function generating bundle2 part for getbundle
1507 """decorator for function generating bundle2 part for getbundle
1508
1508
1509 The function is added to the step -> function mapping and appended to the
1509 The function is added to the step -> function mapping and appended to the
1510 list of steps. Beware that decorated functions will be added in order
1510 list of steps. Beware that decorated functions will be added in order
1511 (this may matter).
1511 (this may matter).
1512
1512
1513 You can only use this decorator for new steps, if you want to wrap a step
1513 You can only use this decorator for new steps, if you want to wrap a step
1514 from an extension, attack the getbundle2partsmapping dictionary directly."""
1514 from an extension, attack the getbundle2partsmapping dictionary directly."""
1515 def dec(func):
1515 def dec(func):
1516 assert stepname not in getbundle2partsmapping
1516 assert stepname not in getbundle2partsmapping
1517 getbundle2partsmapping[stepname] = func
1517 getbundle2partsmapping[stepname] = func
1518 if idx is None:
1518 if idx is None:
1519 getbundle2partsorder.append(stepname)
1519 getbundle2partsorder.append(stepname)
1520 else:
1520 else:
1521 getbundle2partsorder.insert(idx, stepname)
1521 getbundle2partsorder.insert(idx, stepname)
1522 return func
1522 return func
1523 return dec
1523 return dec
1524
1524
1525 def bundle2requested(bundlecaps):
1525 def bundle2requested(bundlecaps):
1526 if bundlecaps is not None:
1526 if bundlecaps is not None:
1527 return any(cap.startswith('HG2') for cap in bundlecaps)
1527 return any(cap.startswith('HG2') for cap in bundlecaps)
1528 return False
1528 return False
1529
1529
1530 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1530 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1531 **kwargs):
1531 **kwargs):
1532 """Return chunks constituting a bundle's raw data.
1532 """Return chunks constituting a bundle's raw data.
1533
1533
1534 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1534 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1535 passed.
1535 passed.
1536
1536
1537 Returns an iterator over raw chunks (of varying sizes).
1537 Returns an iterator over raw chunks (of varying sizes).
1538 """
1538 """
1539 usebundle2 = bundle2requested(bundlecaps)
1539 usebundle2 = bundle2requested(bundlecaps)
1540 # bundle10 case
1540 # bundle10 case
1541 if not usebundle2:
1541 if not usebundle2:
1542 if bundlecaps and not kwargs.get('cg', True):
1542 if bundlecaps and not kwargs.get('cg', True):
1543 raise ValueError(_('request for bundle10 must include changegroup'))
1543 raise ValueError(_('request for bundle10 must include changegroup'))
1544
1544
1545 if kwargs:
1545 if kwargs:
1546 raise ValueError(_('unsupported getbundle arguments: %s')
1546 raise ValueError(_('unsupported getbundle arguments: %s')
1547 % ', '.join(sorted(kwargs.keys())))
1547 % ', '.join(sorted(kwargs.keys())))
1548 outgoing = _computeoutgoing(repo, heads, common)
1548 outgoing = _computeoutgoing(repo, heads, common)
1549 bundler = changegroup.getbundler('01', repo, bundlecaps)
1549 bundler = changegroup.getbundler('01', repo, bundlecaps)
1550 return changegroup.getsubsetraw(repo, outgoing, bundler, source)
1550 return changegroup.getsubsetraw(repo, outgoing, bundler, source)
1551
1551
1552 # bundle20 case
1552 # bundle20 case
1553 b2caps = {}
1553 b2caps = {}
1554 for bcaps in bundlecaps:
1554 for bcaps in bundlecaps:
1555 if bcaps.startswith('bundle2='):
1555 if bcaps.startswith('bundle2='):
1556 blob = urlreq.unquote(bcaps[len('bundle2='):])
1556 blob = urlreq.unquote(bcaps[len('bundle2='):])
1557 b2caps.update(bundle2.decodecaps(blob))
1557 b2caps.update(bundle2.decodecaps(blob))
1558 bundler = bundle2.bundle20(repo.ui, b2caps)
1558 bundler = bundle2.bundle20(repo.ui, b2caps)
1559
1559
1560 kwargs['heads'] = heads
1560 kwargs['heads'] = heads
1561 kwargs['common'] = common
1561 kwargs['common'] = common
1562
1562
1563 for name in getbundle2partsorder:
1563 for name in getbundle2partsorder:
1564 func = getbundle2partsmapping[name]
1564 func = getbundle2partsmapping[name]
1565 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1565 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1566 **kwargs)
1566 **kwargs)
1567
1567
1568 return bundler.getchunks()
1568 return bundler.getchunks()
1569
1569
1570 @getbundle2partsgenerator('changegroup')
1570 @getbundle2partsgenerator('changegroup')
1571 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1571 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1572 b2caps=None, heads=None, common=None, **kwargs):
1572 b2caps=None, heads=None, common=None, **kwargs):
1573 """add a changegroup part to the requested bundle"""
1573 """add a changegroup part to the requested bundle"""
1574 cg = None
1574 cg = None
1575 if kwargs.get('cg', True):
1575 if kwargs.get('cg', True):
1576 # build changegroup bundle here.
1576 # build changegroup bundle here.
1577 version = '01'
1577 version = '01'
1578 cgversions = b2caps.get('changegroup')
1578 cgversions = b2caps.get('changegroup')
1579 if cgversions: # 3.1 and 3.2 ship with an empty value
1579 if cgversions: # 3.1 and 3.2 ship with an empty value
1580 cgversions = [v for v in cgversions
1580 cgversions = [v for v in cgversions
1581 if v in changegroup.supportedoutgoingversions(repo)]
1581 if v in changegroup.supportedoutgoingversions(repo)]
1582 if not cgversions:
1582 if not cgversions:
1583 raise ValueError(_('no common changegroup version'))
1583 raise ValueError(_('no common changegroup version'))
1584 version = max(cgversions)
1584 version = max(cgversions)
1585 outgoing = _computeoutgoing(repo, heads, common)
1585 outgoing = _computeoutgoing(repo, heads, common)
1586 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1586 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1587 bundlecaps=bundlecaps,
1587 bundlecaps=bundlecaps,
1588 version=version)
1588 version=version)
1589
1589
1590 if cg:
1590 if cg:
1591 part = bundler.newpart('changegroup', data=cg)
1591 part = bundler.newpart('changegroup', data=cg)
1592 if cgversions:
1592 if cgversions:
1593 part.addparam('version', version)
1593 part.addparam('version', version)
1594 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1594 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1595 if 'treemanifest' in repo.requirements:
1595 if 'treemanifest' in repo.requirements:
1596 part.addparam('treemanifest', '1')
1596 part.addparam('treemanifest', '1')
1597
1597
1598 @getbundle2partsgenerator('listkeys')
1598 @getbundle2partsgenerator('listkeys')
1599 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1599 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1600 b2caps=None, **kwargs):
1600 b2caps=None, **kwargs):
1601 """add parts containing listkeys namespaces to the requested bundle"""
1601 """add parts containing listkeys namespaces to the requested bundle"""
1602 listkeys = kwargs.get('listkeys', ())
1602 listkeys = kwargs.get('listkeys', ())
1603 for namespace in listkeys:
1603 for namespace in listkeys:
1604 part = bundler.newpart('listkeys')
1604 part = bundler.newpart('listkeys')
1605 part.addparam('namespace', namespace)
1605 part.addparam('namespace', namespace)
1606 keys = repo.listkeys(namespace).items()
1606 keys = repo.listkeys(namespace).items()
1607 part.data = pushkey.encodekeys(keys)
1607 part.data = pushkey.encodekeys(keys)
1608
1608
1609 @getbundle2partsgenerator('obsmarkers')
1609 @getbundle2partsgenerator('obsmarkers')
1610 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1610 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1611 b2caps=None, heads=None, **kwargs):
1611 b2caps=None, heads=None, **kwargs):
1612 """add an obsolescence markers part to the requested bundle"""
1612 """add an obsolescence markers part to the requested bundle"""
1613 if kwargs.get('obsmarkers', False):
1613 if kwargs.get('obsmarkers', False):
1614 if heads is None:
1614 if heads is None:
1615 heads = repo.heads()
1615 heads = repo.heads()
1616 subset = [c.node() for c in repo.set('::%ln', heads)]
1616 subset = [c.node() for c in repo.set('::%ln', heads)]
1617 markers = repo.obsstore.relevantmarkers(subset)
1617 markers = repo.obsstore.relevantmarkers(subset)
1618 markers = sorted(markers)
1618 markers = sorted(markers)
1619 buildobsmarkerspart(bundler, markers)
1619 buildobsmarkerspart(bundler, markers)
1620
1620
1621 @getbundle2partsgenerator('hgtagsfnodes')
1621 @getbundle2partsgenerator('hgtagsfnodes')
1622 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1622 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1623 b2caps=None, heads=None, common=None,
1623 b2caps=None, heads=None, common=None,
1624 **kwargs):
1624 **kwargs):
1625 """Transfer the .hgtags filenodes mapping.
1625 """Transfer the .hgtags filenodes mapping.
1626
1626
1627 Only values for heads in this bundle will be transferred.
1627 Only values for heads in this bundle will be transferred.
1628
1628
1629 The part data consists of pairs of 20 byte changeset node and .hgtags
1629 The part data consists of pairs of 20 byte changeset node and .hgtags
1630 filenodes raw values.
1630 filenodes raw values.
1631 """
1631 """
1632 # Don't send unless:
1632 # Don't send unless:
1633 # - changeset are being exchanged,
1633 # - changeset are being exchanged,
1634 # - the client supports it.
1634 # - the client supports it.
1635 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1635 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1636 return
1636 return
1637
1637
1638 outgoing = _computeoutgoing(repo, heads, common)
1638 outgoing = _computeoutgoing(repo, heads, common)
1639
1639
1640 if not outgoing.missingheads:
1640 if not outgoing.missingheads:
1641 return
1641 return
1642
1642
1643 cache = tags.hgtagsfnodescache(repo.unfiltered())
1643 cache = tags.hgtagsfnodescache(repo.unfiltered())
1644 chunks = []
1644 chunks = []
1645
1645
1646 # .hgtags fnodes are only relevant for head changesets. While we could
1646 # .hgtags fnodes are only relevant for head changesets. While we could
1647 # transfer values for all known nodes, there will likely be little to
1647 # transfer values for all known nodes, there will likely be little to
1648 # no benefit.
1648 # no benefit.
1649 #
1649 #
1650 # We don't bother using a generator to produce output data because
1650 # We don't bother using a generator to produce output data because
1651 # a) we only have 40 bytes per head and even esoteric numbers of heads
1651 # a) we only have 40 bytes per head and even esoteric numbers of heads
1652 # consume little memory (1M heads is 40MB) b) we don't want to send the
1652 # consume little memory (1M heads is 40MB) b) we don't want to send the
1653 # part if we don't have entries and knowing if we have entries requires
1653 # part if we don't have entries and knowing if we have entries requires
1654 # cache lookups.
1654 # cache lookups.
1655 for node in outgoing.missingheads:
1655 for node in outgoing.missingheads:
1656 # Don't compute missing, as this may slow down serving.
1656 # Don't compute missing, as this may slow down serving.
1657 fnode = cache.getfnode(node, computemissing=False)
1657 fnode = cache.getfnode(node, computemissing=False)
1658 if fnode is not None:
1658 if fnode is not None:
1659 chunks.extend([node, fnode])
1659 chunks.extend([node, fnode])
1660
1660
1661 if chunks:
1661 if chunks:
1662 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1662 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1663
1663
1664 def _getbookmarks(repo, **kwargs):
1665 """Returns bookmark to node mapping.
1666
1667 This function is primarily used to generate `bookmarks` bundle2 part.
1668 It is a separate function in order to make it easy to wrap it
1669 in extensions. Passing `kwargs` to the function makes it easy to
1670 add new parameters in extensions.
1671 """
1672
1673 return dict(bookmod.listbinbookmarks(repo))
1674
1664 def check_heads(repo, their_heads, context):
1675 def check_heads(repo, their_heads, context):
1665 """check if the heads of a repo have been modified
1676 """check if the heads of a repo have been modified
1666
1677
1667 Used by peer for unbundling.
1678 Used by peer for unbundling.
1668 """
1679 """
1669 heads = repo.heads()
1680 heads = repo.heads()
1670 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1681 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1671 if not (their_heads == ['force'] or their_heads == heads or
1682 if not (their_heads == ['force'] or their_heads == heads or
1672 their_heads == ['hashed', heads_hash]):
1683 their_heads == ['hashed', heads_hash]):
1673 # someone else committed/pushed/unbundled while we
1684 # someone else committed/pushed/unbundled while we
1674 # were transferring data
1685 # were transferring data
1675 raise error.PushRaced('repository changed while %s - '
1686 raise error.PushRaced('repository changed while %s - '
1676 'please try again' % context)
1687 'please try again' % context)
1677
1688
1678 def unbundle(repo, cg, heads, source, url):
1689 def unbundle(repo, cg, heads, source, url):
1679 """Apply a bundle to a repo.
1690 """Apply a bundle to a repo.
1680
1691
1681 this function makes sure the repo is locked during the application and have
1692 this function makes sure the repo is locked during the application and have
1682 mechanism to check that no push race occurred between the creation of the
1693 mechanism to check that no push race occurred between the creation of the
1683 bundle and its application.
1694 bundle and its application.
1684
1695
1685 If the push was raced as PushRaced exception is raised."""
1696 If the push was raced as PushRaced exception is raised."""
1686 r = 0
1697 r = 0
1687 # need a transaction when processing a bundle2 stream
1698 # need a transaction when processing a bundle2 stream
1688 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1699 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1689 lockandtr = [None, None, None]
1700 lockandtr = [None, None, None]
1690 recordout = None
1701 recordout = None
1691 # quick fix for output mismatch with bundle2 in 3.4
1702 # quick fix for output mismatch with bundle2 in 3.4
1692 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1703 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1693 False)
1704 False)
1694 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1705 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1695 captureoutput = True
1706 captureoutput = True
1696 try:
1707 try:
1697 check_heads(repo, heads, 'uploading changes')
1708 check_heads(repo, heads, 'uploading changes')
1698 # push can proceed
1709 # push can proceed
1699 if util.safehasattr(cg, 'params'):
1710 if util.safehasattr(cg, 'params'):
1700 r = None
1711 r = None
1701 try:
1712 try:
1702 def gettransaction():
1713 def gettransaction():
1703 if not lockandtr[2]:
1714 if not lockandtr[2]:
1704 lockandtr[0] = repo.wlock()
1715 lockandtr[0] = repo.wlock()
1705 lockandtr[1] = repo.lock()
1716 lockandtr[1] = repo.lock()
1706 lockandtr[2] = repo.transaction(source)
1717 lockandtr[2] = repo.transaction(source)
1707 lockandtr[2].hookargs['source'] = source
1718 lockandtr[2].hookargs['source'] = source
1708 lockandtr[2].hookargs['url'] = url
1719 lockandtr[2].hookargs['url'] = url
1709 lockandtr[2].hookargs['bundle2'] = '1'
1720 lockandtr[2].hookargs['bundle2'] = '1'
1710 return lockandtr[2]
1721 return lockandtr[2]
1711
1722
1712 # Do greedy locking by default until we're satisfied with lazy
1723 # Do greedy locking by default until we're satisfied with lazy
1713 # locking.
1724 # locking.
1714 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1725 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1715 gettransaction()
1726 gettransaction()
1716
1727
1717 op = bundle2.bundleoperation(repo, gettransaction,
1728 op = bundle2.bundleoperation(repo, gettransaction,
1718 captureoutput=captureoutput)
1729 captureoutput=captureoutput)
1719 try:
1730 try:
1720 op = bundle2.processbundle(repo, cg, op=op)
1731 op = bundle2.processbundle(repo, cg, op=op)
1721 finally:
1732 finally:
1722 r = op.reply
1733 r = op.reply
1723 if captureoutput and r is not None:
1734 if captureoutput and r is not None:
1724 repo.ui.pushbuffer(error=True, subproc=True)
1735 repo.ui.pushbuffer(error=True, subproc=True)
1725 def recordout(output):
1736 def recordout(output):
1726 r.newpart('output', data=output, mandatory=False)
1737 r.newpart('output', data=output, mandatory=False)
1727 if lockandtr[2] is not None:
1738 if lockandtr[2] is not None:
1728 lockandtr[2].close()
1739 lockandtr[2].close()
1729 except BaseException as exc:
1740 except BaseException as exc:
1730 exc.duringunbundle2 = True
1741 exc.duringunbundle2 = True
1731 if captureoutput and r is not None:
1742 if captureoutput and r is not None:
1732 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1743 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1733 def recordout(output):
1744 def recordout(output):
1734 part = bundle2.bundlepart('output', data=output,
1745 part = bundle2.bundlepart('output', data=output,
1735 mandatory=False)
1746 mandatory=False)
1736 parts.append(part)
1747 parts.append(part)
1737 raise
1748 raise
1738 else:
1749 else:
1739 lockandtr[1] = repo.lock()
1750 lockandtr[1] = repo.lock()
1740 r = cg.apply(repo, source, url)
1751 r = cg.apply(repo, source, url)
1741 finally:
1752 finally:
1742 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1753 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1743 if recordout is not None:
1754 if recordout is not None:
1744 recordout(repo.ui.popbuffer())
1755 recordout(repo.ui.popbuffer())
1745 return r
1756 return r
1746
1757
1747 def _maybeapplyclonebundle(pullop):
1758 def _maybeapplyclonebundle(pullop):
1748 """Apply a clone bundle from a remote, if possible."""
1759 """Apply a clone bundle from a remote, if possible."""
1749
1760
1750 repo = pullop.repo
1761 repo = pullop.repo
1751 remote = pullop.remote
1762 remote = pullop.remote
1752
1763
1753 if not repo.ui.configbool('ui', 'clonebundles', True):
1764 if not repo.ui.configbool('ui', 'clonebundles', True):
1754 return
1765 return
1755
1766
1756 # Only run if local repo is empty.
1767 # Only run if local repo is empty.
1757 if len(repo):
1768 if len(repo):
1758 return
1769 return
1759
1770
1760 if pullop.heads:
1771 if pullop.heads:
1761 return
1772 return
1762
1773
1763 if not remote.capable('clonebundles'):
1774 if not remote.capable('clonebundles'):
1764 return
1775 return
1765
1776
1766 res = remote._call('clonebundles')
1777 res = remote._call('clonebundles')
1767
1778
1768 # If we call the wire protocol command, that's good enough to record the
1779 # If we call the wire protocol command, that's good enough to record the
1769 # attempt.
1780 # attempt.
1770 pullop.clonebundleattempted = True
1781 pullop.clonebundleattempted = True
1771
1782
1772 entries = parseclonebundlesmanifest(repo, res)
1783 entries = parseclonebundlesmanifest(repo, res)
1773 if not entries:
1784 if not entries:
1774 repo.ui.note(_('no clone bundles available on remote; '
1785 repo.ui.note(_('no clone bundles available on remote; '
1775 'falling back to regular clone\n'))
1786 'falling back to regular clone\n'))
1776 return
1787 return
1777
1788
1778 entries = filterclonebundleentries(repo, entries)
1789 entries = filterclonebundleentries(repo, entries)
1779 if not entries:
1790 if not entries:
1780 # There is a thundering herd concern here. However, if a server
1791 # There is a thundering herd concern here. However, if a server
1781 # operator doesn't advertise bundles appropriate for its clients,
1792 # operator doesn't advertise bundles appropriate for its clients,
1782 # they deserve what's coming. Furthermore, from a client's
1793 # they deserve what's coming. Furthermore, from a client's
1783 # perspective, no automatic fallback would mean not being able to
1794 # perspective, no automatic fallback would mean not being able to
1784 # clone!
1795 # clone!
1785 repo.ui.warn(_('no compatible clone bundles available on server; '
1796 repo.ui.warn(_('no compatible clone bundles available on server; '
1786 'falling back to regular clone\n'))
1797 'falling back to regular clone\n'))
1787 repo.ui.warn(_('(you may want to report this to the server '
1798 repo.ui.warn(_('(you may want to report this to the server '
1788 'operator)\n'))
1799 'operator)\n'))
1789 return
1800 return
1790
1801
1791 entries = sortclonebundleentries(repo.ui, entries)
1802 entries = sortclonebundleentries(repo.ui, entries)
1792
1803
1793 url = entries[0]['URL']
1804 url = entries[0]['URL']
1794 repo.ui.status(_('applying clone bundle from %s\n') % url)
1805 repo.ui.status(_('applying clone bundle from %s\n') % url)
1795 if trypullbundlefromurl(repo.ui, repo, url):
1806 if trypullbundlefromurl(repo.ui, repo, url):
1796 repo.ui.status(_('finished applying clone bundle\n'))
1807 repo.ui.status(_('finished applying clone bundle\n'))
1797 # Bundle failed.
1808 # Bundle failed.
1798 #
1809 #
1799 # We abort by default to avoid the thundering herd of
1810 # We abort by default to avoid the thundering herd of
1800 # clients flooding a server that was expecting expensive
1811 # clients flooding a server that was expecting expensive
1801 # clone load to be offloaded.
1812 # clone load to be offloaded.
1802 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1813 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1803 repo.ui.warn(_('falling back to normal clone\n'))
1814 repo.ui.warn(_('falling back to normal clone\n'))
1804 else:
1815 else:
1805 raise error.Abort(_('error applying bundle'),
1816 raise error.Abort(_('error applying bundle'),
1806 hint=_('if this error persists, consider contacting '
1817 hint=_('if this error persists, consider contacting '
1807 'the server operator or disable clone '
1818 'the server operator or disable clone '
1808 'bundles via '
1819 'bundles via '
1809 '"--config ui.clonebundles=false"'))
1820 '"--config ui.clonebundles=false"'))
1810
1821
1811 def parseclonebundlesmanifest(repo, s):
1822 def parseclonebundlesmanifest(repo, s):
1812 """Parses the raw text of a clone bundles manifest.
1823 """Parses the raw text of a clone bundles manifest.
1813
1824
1814 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1825 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1815 to the URL and other keys are the attributes for the entry.
1826 to the URL and other keys are the attributes for the entry.
1816 """
1827 """
1817 m = []
1828 m = []
1818 for line in s.splitlines():
1829 for line in s.splitlines():
1819 fields = line.split()
1830 fields = line.split()
1820 if not fields:
1831 if not fields:
1821 continue
1832 continue
1822 attrs = {'URL': fields[0]}
1833 attrs = {'URL': fields[0]}
1823 for rawattr in fields[1:]:
1834 for rawattr in fields[1:]:
1824 key, value = rawattr.split('=', 1)
1835 key, value = rawattr.split('=', 1)
1825 key = urlreq.unquote(key)
1836 key = urlreq.unquote(key)
1826 value = urlreq.unquote(value)
1837 value = urlreq.unquote(value)
1827 attrs[key] = value
1838 attrs[key] = value
1828
1839
1829 # Parse BUNDLESPEC into components. This makes client-side
1840 # Parse BUNDLESPEC into components. This makes client-side
1830 # preferences easier to specify since you can prefer a single
1841 # preferences easier to specify since you can prefer a single
1831 # component of the BUNDLESPEC.
1842 # component of the BUNDLESPEC.
1832 if key == 'BUNDLESPEC':
1843 if key == 'BUNDLESPEC':
1833 try:
1844 try:
1834 comp, version, params = parsebundlespec(repo, value,
1845 comp, version, params = parsebundlespec(repo, value,
1835 externalnames=True)
1846 externalnames=True)
1836 attrs['COMPRESSION'] = comp
1847 attrs['COMPRESSION'] = comp
1837 attrs['VERSION'] = version
1848 attrs['VERSION'] = version
1838 except error.InvalidBundleSpecification:
1849 except error.InvalidBundleSpecification:
1839 pass
1850 pass
1840 except error.UnsupportedBundleSpecification:
1851 except error.UnsupportedBundleSpecification:
1841 pass
1852 pass
1842
1853
1843 m.append(attrs)
1854 m.append(attrs)
1844
1855
1845 return m
1856 return m
1846
1857
1847 def filterclonebundleentries(repo, entries):
1858 def filterclonebundleentries(repo, entries):
1848 """Remove incompatible clone bundle manifest entries.
1859 """Remove incompatible clone bundle manifest entries.
1849
1860
1850 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1861 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1851 and returns a new list consisting of only the entries that this client
1862 and returns a new list consisting of only the entries that this client
1852 should be able to apply.
1863 should be able to apply.
1853
1864
1854 There is no guarantee we'll be able to apply all returned entries because
1865 There is no guarantee we'll be able to apply all returned entries because
1855 the metadata we use to filter on may be missing or wrong.
1866 the metadata we use to filter on may be missing or wrong.
1856 """
1867 """
1857 newentries = []
1868 newentries = []
1858 for entry in entries:
1869 for entry in entries:
1859 spec = entry.get('BUNDLESPEC')
1870 spec = entry.get('BUNDLESPEC')
1860 if spec:
1871 if spec:
1861 try:
1872 try:
1862 parsebundlespec(repo, spec, strict=True)
1873 parsebundlespec(repo, spec, strict=True)
1863 except error.InvalidBundleSpecification as e:
1874 except error.InvalidBundleSpecification as e:
1864 repo.ui.debug(str(e) + '\n')
1875 repo.ui.debug(str(e) + '\n')
1865 continue
1876 continue
1866 except error.UnsupportedBundleSpecification as e:
1877 except error.UnsupportedBundleSpecification as e:
1867 repo.ui.debug('filtering %s because unsupported bundle '
1878 repo.ui.debug('filtering %s because unsupported bundle '
1868 'spec: %s\n' % (entry['URL'], str(e)))
1879 'spec: %s\n' % (entry['URL'], str(e)))
1869 continue
1880 continue
1870
1881
1871 if 'REQUIRESNI' in entry and not sslutil.hassni:
1882 if 'REQUIRESNI' in entry and not sslutil.hassni:
1872 repo.ui.debug('filtering %s because SNI not supported\n' %
1883 repo.ui.debug('filtering %s because SNI not supported\n' %
1873 entry['URL'])
1884 entry['URL'])
1874 continue
1885 continue
1875
1886
1876 newentries.append(entry)
1887 newentries.append(entry)
1877
1888
1878 return newentries
1889 return newentries
1879
1890
1880 def sortclonebundleentries(ui, entries):
1891 def sortclonebundleentries(ui, entries):
1881 prefers = ui.configlist('ui', 'clonebundleprefers', default=[])
1892 prefers = ui.configlist('ui', 'clonebundleprefers', default=[])
1882 if not prefers:
1893 if not prefers:
1883 return list(entries)
1894 return list(entries)
1884
1895
1885 prefers = [p.split('=', 1) for p in prefers]
1896 prefers = [p.split('=', 1) for p in prefers]
1886
1897
1887 # Our sort function.
1898 # Our sort function.
1888 def compareentry(a, b):
1899 def compareentry(a, b):
1889 for prefkey, prefvalue in prefers:
1900 for prefkey, prefvalue in prefers:
1890 avalue = a.get(prefkey)
1901 avalue = a.get(prefkey)
1891 bvalue = b.get(prefkey)
1902 bvalue = b.get(prefkey)
1892
1903
1893 # Special case for b missing attribute and a matches exactly.
1904 # Special case for b missing attribute and a matches exactly.
1894 if avalue is not None and bvalue is None and avalue == prefvalue:
1905 if avalue is not None and bvalue is None and avalue == prefvalue:
1895 return -1
1906 return -1
1896
1907
1897 # Special case for a missing attribute and b matches exactly.
1908 # Special case for a missing attribute and b matches exactly.
1898 if bvalue is not None and avalue is None and bvalue == prefvalue:
1909 if bvalue is not None and avalue is None and bvalue == prefvalue:
1899 return 1
1910 return 1
1900
1911
1901 # We can't compare unless attribute present on both.
1912 # We can't compare unless attribute present on both.
1902 if avalue is None or bvalue is None:
1913 if avalue is None or bvalue is None:
1903 continue
1914 continue
1904
1915
1905 # Same values should fall back to next attribute.
1916 # Same values should fall back to next attribute.
1906 if avalue == bvalue:
1917 if avalue == bvalue:
1907 continue
1918 continue
1908
1919
1909 # Exact matches come first.
1920 # Exact matches come first.
1910 if avalue == prefvalue:
1921 if avalue == prefvalue:
1911 return -1
1922 return -1
1912 if bvalue == prefvalue:
1923 if bvalue == prefvalue:
1913 return 1
1924 return 1
1914
1925
1915 # Fall back to next attribute.
1926 # Fall back to next attribute.
1916 continue
1927 continue
1917
1928
1918 # If we got here we couldn't sort by attributes and prefers. Fall
1929 # If we got here we couldn't sort by attributes and prefers. Fall
1919 # back to index order.
1930 # back to index order.
1920 return 0
1931 return 0
1921
1932
1922 return sorted(entries, cmp=compareentry)
1933 return sorted(entries, cmp=compareentry)
1923
1934
1924 def trypullbundlefromurl(ui, repo, url):
1935 def trypullbundlefromurl(ui, repo, url):
1925 """Attempt to apply a bundle from a URL."""
1936 """Attempt to apply a bundle from a URL."""
1926 lock = repo.lock()
1937 lock = repo.lock()
1927 try:
1938 try:
1928 tr = repo.transaction('bundleurl')
1939 tr = repo.transaction('bundleurl')
1929 try:
1940 try:
1930 try:
1941 try:
1931 fh = urlmod.open(ui, url)
1942 fh = urlmod.open(ui, url)
1932 cg = readbundle(ui, fh, 'stream')
1943 cg = readbundle(ui, fh, 'stream')
1933
1944
1934 if isinstance(cg, bundle2.unbundle20):
1945 if isinstance(cg, bundle2.unbundle20):
1935 bundle2.processbundle(repo, cg, lambda: tr)
1946 bundle2.processbundle(repo, cg, lambda: tr)
1936 elif isinstance(cg, streamclone.streamcloneapplier):
1947 elif isinstance(cg, streamclone.streamcloneapplier):
1937 cg.apply(repo)
1948 cg.apply(repo)
1938 else:
1949 else:
1939 cg.apply(repo, 'clonebundles', url)
1950 cg.apply(repo, 'clonebundles', url)
1940 tr.close()
1951 tr.close()
1941 return True
1952 return True
1942 except urlerr.httperror as e:
1953 except urlerr.httperror as e:
1943 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1954 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1944 except urlerr.urlerror as e:
1955 except urlerr.urlerror as e:
1945 ui.warn(_('error fetching bundle: %s\n') % e.reason[1])
1956 ui.warn(_('error fetching bundle: %s\n') % e.reason[1])
1946
1957
1947 return False
1958 return False
1948 finally:
1959 finally:
1949 tr.release()
1960 tr.release()
1950 finally:
1961 finally:
1951 lock.release()
1962 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now