##// END OF EJS Templates
pushoperation: fix language issues in docstring
Nathan Goldbaum -
r28456:d9d51da7 default
parent child Browse files
Show More
@@ -1,1937 +1,1937
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import urllib
11 import urllib
12 import urllib2
12 import urllib2
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 nullid,
17 nullid,
18 )
18 )
19 from . import (
19 from . import (
20 base85,
20 base85,
21 bookmarks as bookmod,
21 bookmarks as bookmod,
22 bundle2,
22 bundle2,
23 changegroup,
23 changegroup,
24 discovery,
24 discovery,
25 error,
25 error,
26 lock as lockmod,
26 lock as lockmod,
27 obsolete,
27 obsolete,
28 phases,
28 phases,
29 pushkey,
29 pushkey,
30 scmutil,
30 scmutil,
31 sslutil,
31 sslutil,
32 streamclone,
32 streamclone,
33 tags,
33 tags,
34 url as urlmod,
34 url as urlmod,
35 util,
35 util,
36 )
36 )
37
37
38 # Maps bundle compression human names to internal representation.
38 # Maps bundle compression human names to internal representation.
39 _bundlespeccompressions = {'none': None,
39 _bundlespeccompressions = {'none': None,
40 'bzip2': 'BZ',
40 'bzip2': 'BZ',
41 'gzip': 'GZ',
41 'gzip': 'GZ',
42 }
42 }
43
43
44 # Maps bundle version human names to changegroup versions.
44 # Maps bundle version human names to changegroup versions.
45 _bundlespeccgversions = {'v1': '01',
45 _bundlespeccgversions = {'v1': '01',
46 'v2': '02',
46 'v2': '02',
47 'packed1': 's1',
47 'packed1': 's1',
48 'bundle2': '02', #legacy
48 'bundle2': '02', #legacy
49 }
49 }
50
50
51 def parsebundlespec(repo, spec, strict=True, externalnames=False):
51 def parsebundlespec(repo, spec, strict=True, externalnames=False):
52 """Parse a bundle string specification into parts.
52 """Parse a bundle string specification into parts.
53
53
54 Bundle specifications denote a well-defined bundle/exchange format.
54 Bundle specifications denote a well-defined bundle/exchange format.
55 The content of a given specification should not change over time in
55 The content of a given specification should not change over time in
56 order to ensure that bundles produced by a newer version of Mercurial are
56 order to ensure that bundles produced by a newer version of Mercurial are
57 readable from an older version.
57 readable from an older version.
58
58
59 The string currently has the form:
59 The string currently has the form:
60
60
61 <compression>-<type>[;<parameter0>[;<parameter1>]]
61 <compression>-<type>[;<parameter0>[;<parameter1>]]
62
62
63 Where <compression> is one of the supported compression formats
63 Where <compression> is one of the supported compression formats
64 and <type> is (currently) a version string. A ";" can follow the type and
64 and <type> is (currently) a version string. A ";" can follow the type and
65 all text afterwards is interpretted as URI encoded, ";" delimited key=value
65 all text afterwards is interpretted as URI encoded, ";" delimited key=value
66 pairs.
66 pairs.
67
67
68 If ``strict`` is True (the default) <compression> is required. Otherwise,
68 If ``strict`` is True (the default) <compression> is required. Otherwise,
69 it is optional.
69 it is optional.
70
70
71 If ``externalnames`` is False (the default), the human-centric names will
71 If ``externalnames`` is False (the default), the human-centric names will
72 be converted to their internal representation.
72 be converted to their internal representation.
73
73
74 Returns a 3-tuple of (compression, version, parameters). Compression will
74 Returns a 3-tuple of (compression, version, parameters). Compression will
75 be ``None`` if not in strict mode and a compression isn't defined.
75 be ``None`` if not in strict mode and a compression isn't defined.
76
76
77 An ``InvalidBundleSpecification`` is raised when the specification is
77 An ``InvalidBundleSpecification`` is raised when the specification is
78 not syntactically well formed.
78 not syntactically well formed.
79
79
80 An ``UnsupportedBundleSpecification`` is raised when the compression or
80 An ``UnsupportedBundleSpecification`` is raised when the compression or
81 bundle type/version is not recognized.
81 bundle type/version is not recognized.
82
82
83 Note: this function will likely eventually return a more complex data
83 Note: this function will likely eventually return a more complex data
84 structure, including bundle2 part information.
84 structure, including bundle2 part information.
85 """
85 """
86 def parseparams(s):
86 def parseparams(s):
87 if ';' not in s:
87 if ';' not in s:
88 return s, {}
88 return s, {}
89
89
90 params = {}
90 params = {}
91 version, paramstr = s.split(';', 1)
91 version, paramstr = s.split(';', 1)
92
92
93 for p in paramstr.split(';'):
93 for p in paramstr.split(';'):
94 if '=' not in p:
94 if '=' not in p:
95 raise error.InvalidBundleSpecification(
95 raise error.InvalidBundleSpecification(
96 _('invalid bundle specification: '
96 _('invalid bundle specification: '
97 'missing "=" in parameter: %s') % p)
97 'missing "=" in parameter: %s') % p)
98
98
99 key, value = p.split('=', 1)
99 key, value = p.split('=', 1)
100 key = urllib.unquote(key)
100 key = urllib.unquote(key)
101 value = urllib.unquote(value)
101 value = urllib.unquote(value)
102 params[key] = value
102 params[key] = value
103
103
104 return version, params
104 return version, params
105
105
106
106
107 if strict and '-' not in spec:
107 if strict and '-' not in spec:
108 raise error.InvalidBundleSpecification(
108 raise error.InvalidBundleSpecification(
109 _('invalid bundle specification; '
109 _('invalid bundle specification; '
110 'must be prefixed with compression: %s') % spec)
110 'must be prefixed with compression: %s') % spec)
111
111
112 if '-' in spec:
112 if '-' in spec:
113 compression, version = spec.split('-', 1)
113 compression, version = spec.split('-', 1)
114
114
115 if compression not in _bundlespeccompressions:
115 if compression not in _bundlespeccompressions:
116 raise error.UnsupportedBundleSpecification(
116 raise error.UnsupportedBundleSpecification(
117 _('%s compression is not supported') % compression)
117 _('%s compression is not supported') % compression)
118
118
119 version, params = parseparams(version)
119 version, params = parseparams(version)
120
120
121 if version not in _bundlespeccgversions:
121 if version not in _bundlespeccgversions:
122 raise error.UnsupportedBundleSpecification(
122 raise error.UnsupportedBundleSpecification(
123 _('%s is not a recognized bundle version') % version)
123 _('%s is not a recognized bundle version') % version)
124 else:
124 else:
125 # Value could be just the compression or just the version, in which
125 # Value could be just the compression or just the version, in which
126 # case some defaults are assumed (but only when not in strict mode).
126 # case some defaults are assumed (but only when not in strict mode).
127 assert not strict
127 assert not strict
128
128
129 spec, params = parseparams(spec)
129 spec, params = parseparams(spec)
130
130
131 if spec in _bundlespeccompressions:
131 if spec in _bundlespeccompressions:
132 compression = spec
132 compression = spec
133 version = 'v1'
133 version = 'v1'
134 if 'generaldelta' in repo.requirements:
134 if 'generaldelta' in repo.requirements:
135 version = 'v2'
135 version = 'v2'
136 elif spec in _bundlespeccgversions:
136 elif spec in _bundlespeccgversions:
137 if spec == 'packed1':
137 if spec == 'packed1':
138 compression = 'none'
138 compression = 'none'
139 else:
139 else:
140 compression = 'bzip2'
140 compression = 'bzip2'
141 version = spec
141 version = spec
142 else:
142 else:
143 raise error.UnsupportedBundleSpecification(
143 raise error.UnsupportedBundleSpecification(
144 _('%s is not a recognized bundle specification') % spec)
144 _('%s is not a recognized bundle specification') % spec)
145
145
146 # The specification for packed1 can optionally declare the data formats
146 # The specification for packed1 can optionally declare the data formats
147 # required to apply it. If we see this metadata, compare against what the
147 # required to apply it. If we see this metadata, compare against what the
148 # repo supports and error if the bundle isn't compatible.
148 # repo supports and error if the bundle isn't compatible.
149 if version == 'packed1' and 'requirements' in params:
149 if version == 'packed1' and 'requirements' in params:
150 requirements = set(params['requirements'].split(','))
150 requirements = set(params['requirements'].split(','))
151 missingreqs = requirements - repo.supportedformats
151 missingreqs = requirements - repo.supportedformats
152 if missingreqs:
152 if missingreqs:
153 raise error.UnsupportedBundleSpecification(
153 raise error.UnsupportedBundleSpecification(
154 _('missing support for repository features: %s') %
154 _('missing support for repository features: %s') %
155 ', '.join(sorted(missingreqs)))
155 ', '.join(sorted(missingreqs)))
156
156
157 if not externalnames:
157 if not externalnames:
158 compression = _bundlespeccompressions[compression]
158 compression = _bundlespeccompressions[compression]
159 version = _bundlespeccgversions[version]
159 version = _bundlespeccgversions[version]
160 return compression, version, params
160 return compression, version, params
161
161
162 def readbundle(ui, fh, fname, vfs=None):
162 def readbundle(ui, fh, fname, vfs=None):
163 header = changegroup.readexactly(fh, 4)
163 header = changegroup.readexactly(fh, 4)
164
164
165 alg = None
165 alg = None
166 if not fname:
166 if not fname:
167 fname = "stream"
167 fname = "stream"
168 if not header.startswith('HG') and header.startswith('\0'):
168 if not header.startswith('HG') and header.startswith('\0'):
169 fh = changegroup.headerlessfixup(fh, header)
169 fh = changegroup.headerlessfixup(fh, header)
170 header = "HG10"
170 header = "HG10"
171 alg = 'UN'
171 alg = 'UN'
172 elif vfs:
172 elif vfs:
173 fname = vfs.join(fname)
173 fname = vfs.join(fname)
174
174
175 magic, version = header[0:2], header[2:4]
175 magic, version = header[0:2], header[2:4]
176
176
177 if magic != 'HG':
177 if magic != 'HG':
178 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
178 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
179 if version == '10':
179 if version == '10':
180 if alg is None:
180 if alg is None:
181 alg = changegroup.readexactly(fh, 2)
181 alg = changegroup.readexactly(fh, 2)
182 return changegroup.cg1unpacker(fh, alg)
182 return changegroup.cg1unpacker(fh, alg)
183 elif version.startswith('2'):
183 elif version.startswith('2'):
184 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
184 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
185 elif version == 'S1':
185 elif version == 'S1':
186 return streamclone.streamcloneapplier(fh)
186 return streamclone.streamcloneapplier(fh)
187 else:
187 else:
188 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
188 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
189
189
190 def getbundlespec(ui, fh):
190 def getbundlespec(ui, fh):
191 """Infer the bundlespec from a bundle file handle.
191 """Infer the bundlespec from a bundle file handle.
192
192
193 The input file handle is seeked and the original seek position is not
193 The input file handle is seeked and the original seek position is not
194 restored.
194 restored.
195 """
195 """
196 def speccompression(alg):
196 def speccompression(alg):
197 for k, v in _bundlespeccompressions.items():
197 for k, v in _bundlespeccompressions.items():
198 if v == alg:
198 if v == alg:
199 return k
199 return k
200 return None
200 return None
201
201
202 b = readbundle(ui, fh, None)
202 b = readbundle(ui, fh, None)
203 if isinstance(b, changegroup.cg1unpacker):
203 if isinstance(b, changegroup.cg1unpacker):
204 alg = b._type
204 alg = b._type
205 if alg == '_truncatedBZ':
205 if alg == '_truncatedBZ':
206 alg = 'BZ'
206 alg = 'BZ'
207 comp = speccompression(alg)
207 comp = speccompression(alg)
208 if not comp:
208 if not comp:
209 raise error.Abort(_('unknown compression algorithm: %s') % alg)
209 raise error.Abort(_('unknown compression algorithm: %s') % alg)
210 return '%s-v1' % comp
210 return '%s-v1' % comp
211 elif isinstance(b, bundle2.unbundle20):
211 elif isinstance(b, bundle2.unbundle20):
212 if 'Compression' in b.params:
212 if 'Compression' in b.params:
213 comp = speccompression(b.params['Compression'])
213 comp = speccompression(b.params['Compression'])
214 if not comp:
214 if not comp:
215 raise error.Abort(_('unknown compression algorithm: %s') % comp)
215 raise error.Abort(_('unknown compression algorithm: %s') % comp)
216 else:
216 else:
217 comp = 'none'
217 comp = 'none'
218
218
219 version = None
219 version = None
220 for part in b.iterparts():
220 for part in b.iterparts():
221 if part.type == 'changegroup':
221 if part.type == 'changegroup':
222 version = part.params['version']
222 version = part.params['version']
223 if version in ('01', '02'):
223 if version in ('01', '02'):
224 version = 'v2'
224 version = 'v2'
225 else:
225 else:
226 raise error.Abort(_('changegroup version %s does not have '
226 raise error.Abort(_('changegroup version %s does not have '
227 'a known bundlespec') % version,
227 'a known bundlespec') % version,
228 hint=_('try upgrading your Mercurial '
228 hint=_('try upgrading your Mercurial '
229 'client'))
229 'client'))
230
230
231 if not version:
231 if not version:
232 raise error.Abort(_('could not identify changegroup version in '
232 raise error.Abort(_('could not identify changegroup version in '
233 'bundle'))
233 'bundle'))
234
234
235 return '%s-%s' % (comp, version)
235 return '%s-%s' % (comp, version)
236 elif isinstance(b, streamclone.streamcloneapplier):
236 elif isinstance(b, streamclone.streamcloneapplier):
237 requirements = streamclone.readbundle1header(fh)[2]
237 requirements = streamclone.readbundle1header(fh)[2]
238 params = 'requirements=%s' % ','.join(sorted(requirements))
238 params = 'requirements=%s' % ','.join(sorted(requirements))
239 return 'none-packed1;%s' % urllib.quote(params)
239 return 'none-packed1;%s' % urllib.quote(params)
240 else:
240 else:
241 raise error.Abort(_('unknown bundle type: %s') % b)
241 raise error.Abort(_('unknown bundle type: %s') % b)
242
242
243 def buildobsmarkerspart(bundler, markers):
243 def buildobsmarkerspart(bundler, markers):
244 """add an obsmarker part to the bundler with <markers>
244 """add an obsmarker part to the bundler with <markers>
245
245
246 No part is created if markers is empty.
246 No part is created if markers is empty.
247 Raises ValueError if the bundler doesn't support any known obsmarker format.
247 Raises ValueError if the bundler doesn't support any known obsmarker format.
248 """
248 """
249 if markers:
249 if markers:
250 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
250 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
251 version = obsolete.commonversion(remoteversions)
251 version = obsolete.commonversion(remoteversions)
252 if version is None:
252 if version is None:
253 raise ValueError('bundler does not support common obsmarker format')
253 raise ValueError('bundler does not support common obsmarker format')
254 stream = obsolete.encodemarkers(markers, True, version=version)
254 stream = obsolete.encodemarkers(markers, True, version=version)
255 return bundler.newpart('obsmarkers', data=stream)
255 return bundler.newpart('obsmarkers', data=stream)
256 return None
256 return None
257
257
258 def _canusebundle2(op):
258 def _canusebundle2(op):
259 """return true if a pull/push can use bundle2
259 """return true if a pull/push can use bundle2
260
260
261 Feel free to nuke this function when we drop the experimental option"""
261 Feel free to nuke this function when we drop the experimental option"""
262 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
262 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
263 and op.remote.capable('bundle2'))
263 and op.remote.capable('bundle2'))
264
264
265
265
266 class pushoperation(object):
266 class pushoperation(object):
267 """A object that represent a single push operation
267 """A object that represent a single push operation
268
268
269 It purpose is to carry push related state and very common operation.
269 Its purpose is to carry push related state and very common operations.
270
270
271 A new should be created at the beginning of each push and discarded
271 A new pushoperation should be created at the beginning of each push and
272 afterward.
272 discarded afterward.
273 """
273 """
274
274
275 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
275 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
276 bookmarks=()):
276 bookmarks=()):
277 # repo we push from
277 # repo we push from
278 self.repo = repo
278 self.repo = repo
279 self.ui = repo.ui
279 self.ui = repo.ui
280 # repo we push to
280 # repo we push to
281 self.remote = remote
281 self.remote = remote
282 # force option provided
282 # force option provided
283 self.force = force
283 self.force = force
284 # revs to be pushed (None is "all")
284 # revs to be pushed (None is "all")
285 self.revs = revs
285 self.revs = revs
286 # bookmark explicitly pushed
286 # bookmark explicitly pushed
287 self.bookmarks = bookmarks
287 self.bookmarks = bookmarks
288 # allow push of new branch
288 # allow push of new branch
289 self.newbranch = newbranch
289 self.newbranch = newbranch
290 # did a local lock get acquired?
290 # did a local lock get acquired?
291 self.locallocked = None
291 self.locallocked = None
292 # step already performed
292 # step already performed
293 # (used to check what steps have been already performed through bundle2)
293 # (used to check what steps have been already performed through bundle2)
294 self.stepsdone = set()
294 self.stepsdone = set()
295 # Integer version of the changegroup push result
295 # Integer version of the changegroup push result
296 # - None means nothing to push
296 # - None means nothing to push
297 # - 0 means HTTP error
297 # - 0 means HTTP error
298 # - 1 means we pushed and remote head count is unchanged *or*
298 # - 1 means we pushed and remote head count is unchanged *or*
299 # we have outgoing changesets but refused to push
299 # we have outgoing changesets but refused to push
300 # - other values as described by addchangegroup()
300 # - other values as described by addchangegroup()
301 self.cgresult = None
301 self.cgresult = None
302 # Boolean value for the bookmark push
302 # Boolean value for the bookmark push
303 self.bkresult = None
303 self.bkresult = None
304 # discover.outgoing object (contains common and outgoing data)
304 # discover.outgoing object (contains common and outgoing data)
305 self.outgoing = None
305 self.outgoing = None
306 # all remote heads before the push
306 # all remote heads before the push
307 self.remoteheads = None
307 self.remoteheads = None
308 # testable as a boolean indicating if any nodes are missing locally.
308 # testable as a boolean indicating if any nodes are missing locally.
309 self.incoming = None
309 self.incoming = None
310 # phases changes that must be pushed along side the changesets
310 # phases changes that must be pushed along side the changesets
311 self.outdatedphases = None
311 self.outdatedphases = None
312 # phases changes that must be pushed if changeset push fails
312 # phases changes that must be pushed if changeset push fails
313 self.fallbackoutdatedphases = None
313 self.fallbackoutdatedphases = None
314 # outgoing obsmarkers
314 # outgoing obsmarkers
315 self.outobsmarkers = set()
315 self.outobsmarkers = set()
316 # outgoing bookmarks
316 # outgoing bookmarks
317 self.outbookmarks = []
317 self.outbookmarks = []
318 # transaction manager
318 # transaction manager
319 self.trmanager = None
319 self.trmanager = None
320 # map { pushkey partid -> callback handling failure}
320 # map { pushkey partid -> callback handling failure}
321 # used to handle exception from mandatory pushkey part failure
321 # used to handle exception from mandatory pushkey part failure
322 self.pkfailcb = {}
322 self.pkfailcb = {}
323
323
324 @util.propertycache
324 @util.propertycache
325 def futureheads(self):
325 def futureheads(self):
326 """future remote heads if the changeset push succeeds"""
326 """future remote heads if the changeset push succeeds"""
327 return self.outgoing.missingheads
327 return self.outgoing.missingheads
328
328
329 @util.propertycache
329 @util.propertycache
330 def fallbackheads(self):
330 def fallbackheads(self):
331 """future remote heads if the changeset push fails"""
331 """future remote heads if the changeset push fails"""
332 if self.revs is None:
332 if self.revs is None:
333 # not target to push, all common are relevant
333 # not target to push, all common are relevant
334 return self.outgoing.commonheads
334 return self.outgoing.commonheads
335 unfi = self.repo.unfiltered()
335 unfi = self.repo.unfiltered()
336 # I want cheads = heads(::missingheads and ::commonheads)
336 # I want cheads = heads(::missingheads and ::commonheads)
337 # (missingheads is revs with secret changeset filtered out)
337 # (missingheads is revs with secret changeset filtered out)
338 #
338 #
339 # This can be expressed as:
339 # This can be expressed as:
340 # cheads = ( (missingheads and ::commonheads)
340 # cheads = ( (missingheads and ::commonheads)
341 # + (commonheads and ::missingheads))"
341 # + (commonheads and ::missingheads))"
342 # )
342 # )
343 #
343 #
344 # while trying to push we already computed the following:
344 # while trying to push we already computed the following:
345 # common = (::commonheads)
345 # common = (::commonheads)
346 # missing = ((commonheads::missingheads) - commonheads)
346 # missing = ((commonheads::missingheads) - commonheads)
347 #
347 #
348 # We can pick:
348 # We can pick:
349 # * missingheads part of common (::commonheads)
349 # * missingheads part of common (::commonheads)
350 common = self.outgoing.common
350 common = self.outgoing.common
351 nm = self.repo.changelog.nodemap
351 nm = self.repo.changelog.nodemap
352 cheads = [node for node in self.revs if nm[node] in common]
352 cheads = [node for node in self.revs if nm[node] in common]
353 # and
353 # and
354 # * commonheads parents on missing
354 # * commonheads parents on missing
355 revset = unfi.set('%ln and parents(roots(%ln))',
355 revset = unfi.set('%ln and parents(roots(%ln))',
356 self.outgoing.commonheads,
356 self.outgoing.commonheads,
357 self.outgoing.missing)
357 self.outgoing.missing)
358 cheads.extend(c.node() for c in revset)
358 cheads.extend(c.node() for c in revset)
359 return cheads
359 return cheads
360
360
361 @property
361 @property
362 def commonheads(self):
362 def commonheads(self):
363 """set of all common heads after changeset bundle push"""
363 """set of all common heads after changeset bundle push"""
364 if self.cgresult:
364 if self.cgresult:
365 return self.futureheads
365 return self.futureheads
366 else:
366 else:
367 return self.fallbackheads
367 return self.fallbackheads
368
368
369 # mapping of message used when pushing bookmark
369 # mapping of message used when pushing bookmark
370 bookmsgmap = {'update': (_("updating bookmark %s\n"),
370 bookmsgmap = {'update': (_("updating bookmark %s\n"),
371 _('updating bookmark %s failed!\n')),
371 _('updating bookmark %s failed!\n')),
372 'export': (_("exporting bookmark %s\n"),
372 'export': (_("exporting bookmark %s\n"),
373 _('exporting bookmark %s failed!\n')),
373 _('exporting bookmark %s failed!\n')),
374 'delete': (_("deleting remote bookmark %s\n"),
374 'delete': (_("deleting remote bookmark %s\n"),
375 _('deleting remote bookmark %s failed!\n')),
375 _('deleting remote bookmark %s failed!\n')),
376 }
376 }
377
377
378
378
379 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
379 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
380 opargs=None):
380 opargs=None):
381 '''Push outgoing changesets (limited by revs) from a local
381 '''Push outgoing changesets (limited by revs) from a local
382 repository to remote. Return an integer:
382 repository to remote. Return an integer:
383 - None means nothing to push
383 - None means nothing to push
384 - 0 means HTTP error
384 - 0 means HTTP error
385 - 1 means we pushed and remote head count is unchanged *or*
385 - 1 means we pushed and remote head count is unchanged *or*
386 we have outgoing changesets but refused to push
386 we have outgoing changesets but refused to push
387 - other values as described by addchangegroup()
387 - other values as described by addchangegroup()
388 '''
388 '''
389 if opargs is None:
389 if opargs is None:
390 opargs = {}
390 opargs = {}
391 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
391 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
392 **opargs)
392 **opargs)
393 if pushop.remote.local():
393 if pushop.remote.local():
394 missing = (set(pushop.repo.requirements)
394 missing = (set(pushop.repo.requirements)
395 - pushop.remote.local().supported)
395 - pushop.remote.local().supported)
396 if missing:
396 if missing:
397 msg = _("required features are not"
397 msg = _("required features are not"
398 " supported in the destination:"
398 " supported in the destination:"
399 " %s") % (', '.join(sorted(missing)))
399 " %s") % (', '.join(sorted(missing)))
400 raise error.Abort(msg)
400 raise error.Abort(msg)
401
401
402 # there are two ways to push to remote repo:
402 # there are two ways to push to remote repo:
403 #
403 #
404 # addchangegroup assumes local user can lock remote
404 # addchangegroup assumes local user can lock remote
405 # repo (local filesystem, old ssh servers).
405 # repo (local filesystem, old ssh servers).
406 #
406 #
407 # unbundle assumes local user cannot lock remote repo (new ssh
407 # unbundle assumes local user cannot lock remote repo (new ssh
408 # servers, http servers).
408 # servers, http servers).
409
409
410 if not pushop.remote.canpush():
410 if not pushop.remote.canpush():
411 raise error.Abort(_("destination does not support push"))
411 raise error.Abort(_("destination does not support push"))
412 # get local lock as we might write phase data
412 # get local lock as we might write phase data
413 localwlock = locallock = None
413 localwlock = locallock = None
414 try:
414 try:
415 # bundle2 push may receive a reply bundle touching bookmarks or other
415 # bundle2 push may receive a reply bundle touching bookmarks or other
416 # things requiring the wlock. Take it now to ensure proper ordering.
416 # things requiring the wlock. Take it now to ensure proper ordering.
417 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
417 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
418 if _canusebundle2(pushop) and maypushback:
418 if _canusebundle2(pushop) and maypushback:
419 localwlock = pushop.repo.wlock()
419 localwlock = pushop.repo.wlock()
420 locallock = pushop.repo.lock()
420 locallock = pushop.repo.lock()
421 pushop.locallocked = True
421 pushop.locallocked = True
422 except IOError as err:
422 except IOError as err:
423 pushop.locallocked = False
423 pushop.locallocked = False
424 if err.errno != errno.EACCES:
424 if err.errno != errno.EACCES:
425 raise
425 raise
426 # source repo cannot be locked.
426 # source repo cannot be locked.
427 # We do not abort the push, but just disable the local phase
427 # We do not abort the push, but just disable the local phase
428 # synchronisation.
428 # synchronisation.
429 msg = 'cannot lock source repository: %s\n' % err
429 msg = 'cannot lock source repository: %s\n' % err
430 pushop.ui.debug(msg)
430 pushop.ui.debug(msg)
431 try:
431 try:
432 if pushop.locallocked:
432 if pushop.locallocked:
433 pushop.trmanager = transactionmanager(pushop.repo,
433 pushop.trmanager = transactionmanager(pushop.repo,
434 'push-response',
434 'push-response',
435 pushop.remote.url())
435 pushop.remote.url())
436 pushop.repo.checkpush(pushop)
436 pushop.repo.checkpush(pushop)
437 lock = None
437 lock = None
438 unbundle = pushop.remote.capable('unbundle')
438 unbundle = pushop.remote.capable('unbundle')
439 if not unbundle:
439 if not unbundle:
440 lock = pushop.remote.lock()
440 lock = pushop.remote.lock()
441 try:
441 try:
442 _pushdiscovery(pushop)
442 _pushdiscovery(pushop)
443 if _canusebundle2(pushop):
443 if _canusebundle2(pushop):
444 _pushbundle2(pushop)
444 _pushbundle2(pushop)
445 _pushchangeset(pushop)
445 _pushchangeset(pushop)
446 _pushsyncphase(pushop)
446 _pushsyncphase(pushop)
447 _pushobsolete(pushop)
447 _pushobsolete(pushop)
448 _pushbookmark(pushop)
448 _pushbookmark(pushop)
449 finally:
449 finally:
450 if lock is not None:
450 if lock is not None:
451 lock.release()
451 lock.release()
452 if pushop.trmanager:
452 if pushop.trmanager:
453 pushop.trmanager.close()
453 pushop.trmanager.close()
454 finally:
454 finally:
455 if pushop.trmanager:
455 if pushop.trmanager:
456 pushop.trmanager.release()
456 pushop.trmanager.release()
457 if locallock is not None:
457 if locallock is not None:
458 locallock.release()
458 locallock.release()
459 if localwlock is not None:
459 if localwlock is not None:
460 localwlock.release()
460 localwlock.release()
461
461
462 return pushop
462 return pushop
463
463
464 # list of steps to perform discovery before push
464 # list of steps to perform discovery before push
465 pushdiscoveryorder = []
465 pushdiscoveryorder = []
466
466
467 # Mapping between step name and function
467 # Mapping between step name and function
468 #
468 #
469 # This exists to help extensions wrap steps if necessary
469 # This exists to help extensions wrap steps if necessary
470 pushdiscoverymapping = {}
470 pushdiscoverymapping = {}
471
471
472 def pushdiscovery(stepname):
472 def pushdiscovery(stepname):
473 """decorator for function performing discovery before push
473 """decorator for function performing discovery before push
474
474
475 The function is added to the step -> function mapping and appended to the
475 The function is added to the step -> function mapping and appended to the
476 list of steps. Beware that decorated function will be added in order (this
476 list of steps. Beware that decorated function will be added in order (this
477 may matter).
477 may matter).
478
478
479 You can only use this decorator for a new step, if you want to wrap a step
479 You can only use this decorator for a new step, if you want to wrap a step
480 from an extension, change the pushdiscovery dictionary directly."""
480 from an extension, change the pushdiscovery dictionary directly."""
481 def dec(func):
481 def dec(func):
482 assert stepname not in pushdiscoverymapping
482 assert stepname not in pushdiscoverymapping
483 pushdiscoverymapping[stepname] = func
483 pushdiscoverymapping[stepname] = func
484 pushdiscoveryorder.append(stepname)
484 pushdiscoveryorder.append(stepname)
485 return func
485 return func
486 return dec
486 return dec
487
487
488 def _pushdiscovery(pushop):
488 def _pushdiscovery(pushop):
489 """Run all discovery steps"""
489 """Run all discovery steps"""
490 for stepname in pushdiscoveryorder:
490 for stepname in pushdiscoveryorder:
491 step = pushdiscoverymapping[stepname]
491 step = pushdiscoverymapping[stepname]
492 step(pushop)
492 step(pushop)
493
493
494 @pushdiscovery('changeset')
494 @pushdiscovery('changeset')
495 def _pushdiscoverychangeset(pushop):
495 def _pushdiscoverychangeset(pushop):
496 """discover the changeset that need to be pushed"""
496 """discover the changeset that need to be pushed"""
497 fci = discovery.findcommonincoming
497 fci = discovery.findcommonincoming
498 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
498 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
499 common, inc, remoteheads = commoninc
499 common, inc, remoteheads = commoninc
500 fco = discovery.findcommonoutgoing
500 fco = discovery.findcommonoutgoing
501 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
501 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
502 commoninc=commoninc, force=pushop.force)
502 commoninc=commoninc, force=pushop.force)
503 pushop.outgoing = outgoing
503 pushop.outgoing = outgoing
504 pushop.remoteheads = remoteheads
504 pushop.remoteheads = remoteheads
505 pushop.incoming = inc
505 pushop.incoming = inc
506
506
507 @pushdiscovery('phase')
507 @pushdiscovery('phase')
508 def _pushdiscoveryphase(pushop):
508 def _pushdiscoveryphase(pushop):
509 """discover the phase that needs to be pushed
509 """discover the phase that needs to be pushed
510
510
511 (computed for both success and failure case for changesets push)"""
511 (computed for both success and failure case for changesets push)"""
512 outgoing = pushop.outgoing
512 outgoing = pushop.outgoing
513 unfi = pushop.repo.unfiltered()
513 unfi = pushop.repo.unfiltered()
514 remotephases = pushop.remote.listkeys('phases')
514 remotephases = pushop.remote.listkeys('phases')
515 publishing = remotephases.get('publishing', False)
515 publishing = remotephases.get('publishing', False)
516 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
516 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
517 and remotephases # server supports phases
517 and remotephases # server supports phases
518 and not pushop.outgoing.missing # no changesets to be pushed
518 and not pushop.outgoing.missing # no changesets to be pushed
519 and publishing):
519 and publishing):
520 # When:
520 # When:
521 # - this is a subrepo push
521 # - this is a subrepo push
522 # - and remote support phase
522 # - and remote support phase
523 # - and no changeset are to be pushed
523 # - and no changeset are to be pushed
524 # - and remote is publishing
524 # - and remote is publishing
525 # We may be in issue 3871 case!
525 # We may be in issue 3871 case!
526 # We drop the possible phase synchronisation done by
526 # We drop the possible phase synchronisation done by
527 # courtesy to publish changesets possibly locally draft
527 # courtesy to publish changesets possibly locally draft
528 # on the remote.
528 # on the remote.
529 remotephases = {'publishing': 'True'}
529 remotephases = {'publishing': 'True'}
530 ana = phases.analyzeremotephases(pushop.repo,
530 ana = phases.analyzeremotephases(pushop.repo,
531 pushop.fallbackheads,
531 pushop.fallbackheads,
532 remotephases)
532 remotephases)
533 pheads, droots = ana
533 pheads, droots = ana
534 extracond = ''
534 extracond = ''
535 if not publishing:
535 if not publishing:
536 extracond = ' and public()'
536 extracond = ' and public()'
537 revset = 'heads((%%ln::%%ln) %s)' % extracond
537 revset = 'heads((%%ln::%%ln) %s)' % extracond
538 # Get the list of all revs draft on remote by public here.
538 # Get the list of all revs draft on remote by public here.
539 # XXX Beware that revset break if droots is not strictly
539 # XXX Beware that revset break if droots is not strictly
540 # XXX root we may want to ensure it is but it is costly
540 # XXX root we may want to ensure it is but it is costly
541 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
541 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
542 if not outgoing.missing:
542 if not outgoing.missing:
543 future = fallback
543 future = fallback
544 else:
544 else:
545 # adds changeset we are going to push as draft
545 # adds changeset we are going to push as draft
546 #
546 #
547 # should not be necessary for publishing server, but because of an
547 # should not be necessary for publishing server, but because of an
548 # issue fixed in xxxxx we have to do it anyway.
548 # issue fixed in xxxxx we have to do it anyway.
549 fdroots = list(unfi.set('roots(%ln + %ln::)',
549 fdroots = list(unfi.set('roots(%ln + %ln::)',
550 outgoing.missing, droots))
550 outgoing.missing, droots))
551 fdroots = [f.node() for f in fdroots]
551 fdroots = [f.node() for f in fdroots]
552 future = list(unfi.set(revset, fdroots, pushop.futureheads))
552 future = list(unfi.set(revset, fdroots, pushop.futureheads))
553 pushop.outdatedphases = future
553 pushop.outdatedphases = future
554 pushop.fallbackoutdatedphases = fallback
554 pushop.fallbackoutdatedphases = fallback
555
555
556 @pushdiscovery('obsmarker')
556 @pushdiscovery('obsmarker')
557 def _pushdiscoveryobsmarkers(pushop):
557 def _pushdiscoveryobsmarkers(pushop):
558 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
558 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
559 and pushop.repo.obsstore
559 and pushop.repo.obsstore
560 and 'obsolete' in pushop.remote.listkeys('namespaces')):
560 and 'obsolete' in pushop.remote.listkeys('namespaces')):
561 repo = pushop.repo
561 repo = pushop.repo
562 # very naive computation, that can be quite expensive on big repo.
562 # very naive computation, that can be quite expensive on big repo.
563 # However: evolution is currently slow on them anyway.
563 # However: evolution is currently slow on them anyway.
564 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
564 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
565 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
565 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
566
566
567 @pushdiscovery('bookmarks')
567 @pushdiscovery('bookmarks')
568 def _pushdiscoverybookmarks(pushop):
568 def _pushdiscoverybookmarks(pushop):
569 ui = pushop.ui
569 ui = pushop.ui
570 repo = pushop.repo.unfiltered()
570 repo = pushop.repo.unfiltered()
571 remote = pushop.remote
571 remote = pushop.remote
572 ui.debug("checking for updated bookmarks\n")
572 ui.debug("checking for updated bookmarks\n")
573 ancestors = ()
573 ancestors = ()
574 if pushop.revs:
574 if pushop.revs:
575 revnums = map(repo.changelog.rev, pushop.revs)
575 revnums = map(repo.changelog.rev, pushop.revs)
576 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
576 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
577 remotebookmark = remote.listkeys('bookmarks')
577 remotebookmark = remote.listkeys('bookmarks')
578
578
579 explicit = set([repo._bookmarks.expandname(bookmark)
579 explicit = set([repo._bookmarks.expandname(bookmark)
580 for bookmark in pushop.bookmarks])
580 for bookmark in pushop.bookmarks])
581
581
582 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
582 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
583 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
583 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
584 for b, scid, dcid in advsrc:
584 for b, scid, dcid in advsrc:
585 if b in explicit:
585 if b in explicit:
586 explicit.remove(b)
586 explicit.remove(b)
587 if not ancestors or repo[scid].rev() in ancestors:
587 if not ancestors or repo[scid].rev() in ancestors:
588 pushop.outbookmarks.append((b, dcid, scid))
588 pushop.outbookmarks.append((b, dcid, scid))
589 # search added bookmark
589 # search added bookmark
590 for b, scid, dcid in addsrc:
590 for b, scid, dcid in addsrc:
591 if b in explicit:
591 if b in explicit:
592 explicit.remove(b)
592 explicit.remove(b)
593 pushop.outbookmarks.append((b, '', scid))
593 pushop.outbookmarks.append((b, '', scid))
594 # search for overwritten bookmark
594 # search for overwritten bookmark
595 for b, scid, dcid in advdst + diverge + differ:
595 for b, scid, dcid in advdst + diverge + differ:
596 if b in explicit:
596 if b in explicit:
597 explicit.remove(b)
597 explicit.remove(b)
598 pushop.outbookmarks.append((b, dcid, scid))
598 pushop.outbookmarks.append((b, dcid, scid))
599 # search for bookmark to delete
599 # search for bookmark to delete
600 for b, scid, dcid in adddst:
600 for b, scid, dcid in adddst:
601 if b in explicit:
601 if b in explicit:
602 explicit.remove(b)
602 explicit.remove(b)
603 # treat as "deleted locally"
603 # treat as "deleted locally"
604 pushop.outbookmarks.append((b, dcid, ''))
604 pushop.outbookmarks.append((b, dcid, ''))
605 # identical bookmarks shouldn't get reported
605 # identical bookmarks shouldn't get reported
606 for b, scid, dcid in same:
606 for b, scid, dcid in same:
607 if b in explicit:
607 if b in explicit:
608 explicit.remove(b)
608 explicit.remove(b)
609
609
610 if explicit:
610 if explicit:
611 explicit = sorted(explicit)
611 explicit = sorted(explicit)
612 # we should probably list all of them
612 # we should probably list all of them
613 ui.warn(_('bookmark %s does not exist on the local '
613 ui.warn(_('bookmark %s does not exist on the local '
614 'or remote repository!\n') % explicit[0])
614 'or remote repository!\n') % explicit[0])
615 pushop.bkresult = 2
615 pushop.bkresult = 2
616
616
617 pushop.outbookmarks.sort()
617 pushop.outbookmarks.sort()
618
618
619 def _pushcheckoutgoing(pushop):
619 def _pushcheckoutgoing(pushop):
620 outgoing = pushop.outgoing
620 outgoing = pushop.outgoing
621 unfi = pushop.repo.unfiltered()
621 unfi = pushop.repo.unfiltered()
622 if not outgoing.missing:
622 if not outgoing.missing:
623 # nothing to push
623 # nothing to push
624 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
624 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
625 return False
625 return False
626 # something to push
626 # something to push
627 if not pushop.force:
627 if not pushop.force:
628 # if repo.obsstore == False --> no obsolete
628 # if repo.obsstore == False --> no obsolete
629 # then, save the iteration
629 # then, save the iteration
630 if unfi.obsstore:
630 if unfi.obsstore:
631 # this message are here for 80 char limit reason
631 # this message are here for 80 char limit reason
632 mso = _("push includes obsolete changeset: %s!")
632 mso = _("push includes obsolete changeset: %s!")
633 mst = {"unstable": _("push includes unstable changeset: %s!"),
633 mst = {"unstable": _("push includes unstable changeset: %s!"),
634 "bumped": _("push includes bumped changeset: %s!"),
634 "bumped": _("push includes bumped changeset: %s!"),
635 "divergent": _("push includes divergent changeset: %s!")}
635 "divergent": _("push includes divergent changeset: %s!")}
636 # If we are to push if there is at least one
636 # If we are to push if there is at least one
637 # obsolete or unstable changeset in missing, at
637 # obsolete or unstable changeset in missing, at
638 # least one of the missinghead will be obsolete or
638 # least one of the missinghead will be obsolete or
639 # unstable. So checking heads only is ok
639 # unstable. So checking heads only is ok
640 for node in outgoing.missingheads:
640 for node in outgoing.missingheads:
641 ctx = unfi[node]
641 ctx = unfi[node]
642 if ctx.obsolete():
642 if ctx.obsolete():
643 raise error.Abort(mso % ctx)
643 raise error.Abort(mso % ctx)
644 elif ctx.troubled():
644 elif ctx.troubled():
645 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
645 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
646
646
647 discovery.checkheads(pushop)
647 discovery.checkheads(pushop)
648 return True
648 return True
649
649
650 # List of names of steps to perform for an outgoing bundle2, order matters.
650 # List of names of steps to perform for an outgoing bundle2, order matters.
651 b2partsgenorder = []
651 b2partsgenorder = []
652
652
653 # Mapping between step name and function
653 # Mapping between step name and function
654 #
654 #
655 # This exists to help extensions wrap steps if necessary
655 # This exists to help extensions wrap steps if necessary
656 b2partsgenmapping = {}
656 b2partsgenmapping = {}
657
657
658 def b2partsgenerator(stepname, idx=None):
658 def b2partsgenerator(stepname, idx=None):
659 """decorator for function generating bundle2 part
659 """decorator for function generating bundle2 part
660
660
661 The function is added to the step -> function mapping and appended to the
661 The function is added to the step -> function mapping and appended to the
662 list of steps. Beware that decorated functions will be added in order
662 list of steps. Beware that decorated functions will be added in order
663 (this may matter).
663 (this may matter).
664
664
665 You can only use this decorator for new steps, if you want to wrap a step
665 You can only use this decorator for new steps, if you want to wrap a step
666 from an extension, attack the b2partsgenmapping dictionary directly."""
666 from an extension, attack the b2partsgenmapping dictionary directly."""
667 def dec(func):
667 def dec(func):
668 assert stepname not in b2partsgenmapping
668 assert stepname not in b2partsgenmapping
669 b2partsgenmapping[stepname] = func
669 b2partsgenmapping[stepname] = func
670 if idx is None:
670 if idx is None:
671 b2partsgenorder.append(stepname)
671 b2partsgenorder.append(stepname)
672 else:
672 else:
673 b2partsgenorder.insert(idx, stepname)
673 b2partsgenorder.insert(idx, stepname)
674 return func
674 return func
675 return dec
675 return dec
676
676
677 def _pushb2ctxcheckheads(pushop, bundler):
677 def _pushb2ctxcheckheads(pushop, bundler):
678 """Generate race condition checking parts
678 """Generate race condition checking parts
679
679
680 Exists as an independent function to aid extensions
680 Exists as an independent function to aid extensions
681 """
681 """
682 if not pushop.force:
682 if not pushop.force:
683 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
683 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
684
684
685 @b2partsgenerator('changeset')
685 @b2partsgenerator('changeset')
686 def _pushb2ctx(pushop, bundler):
686 def _pushb2ctx(pushop, bundler):
687 """handle changegroup push through bundle2
687 """handle changegroup push through bundle2
688
688
689 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
689 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
690 """
690 """
691 if 'changesets' in pushop.stepsdone:
691 if 'changesets' in pushop.stepsdone:
692 return
692 return
693 pushop.stepsdone.add('changesets')
693 pushop.stepsdone.add('changesets')
694 # Send known heads to the server for race detection.
694 # Send known heads to the server for race detection.
695 if not _pushcheckoutgoing(pushop):
695 if not _pushcheckoutgoing(pushop):
696 return
696 return
697 pushop.repo.prepushoutgoinghooks(pushop.repo,
697 pushop.repo.prepushoutgoinghooks(pushop.repo,
698 pushop.remote,
698 pushop.remote,
699 pushop.outgoing)
699 pushop.outgoing)
700
700
701 _pushb2ctxcheckheads(pushop, bundler)
701 _pushb2ctxcheckheads(pushop, bundler)
702
702
703 b2caps = bundle2.bundle2caps(pushop.remote)
703 b2caps = bundle2.bundle2caps(pushop.remote)
704 version = None
704 version = None
705 cgversions = b2caps.get('changegroup')
705 cgversions = b2caps.get('changegroup')
706 if not cgversions: # 3.1 and 3.2 ship with an empty value
706 if not cgversions: # 3.1 and 3.2 ship with an empty value
707 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
707 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
708 pushop.outgoing)
708 pushop.outgoing)
709 else:
709 else:
710 cgversions = [v for v in cgversions
710 cgversions = [v for v in cgversions
711 if v in changegroup.supportedoutgoingversions(
711 if v in changegroup.supportedoutgoingversions(
712 pushop.repo)]
712 pushop.repo)]
713 if not cgversions:
713 if not cgversions:
714 raise ValueError(_('no common changegroup version'))
714 raise ValueError(_('no common changegroup version'))
715 version = max(cgversions)
715 version = max(cgversions)
716 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
716 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
717 pushop.outgoing,
717 pushop.outgoing,
718 version=version)
718 version=version)
719 cgpart = bundler.newpart('changegroup', data=cg)
719 cgpart = bundler.newpart('changegroup', data=cg)
720 if version is not None:
720 if version is not None:
721 cgpart.addparam('version', version)
721 cgpart.addparam('version', version)
722 if 'treemanifest' in pushop.repo.requirements:
722 if 'treemanifest' in pushop.repo.requirements:
723 cgpart.addparam('treemanifest', '1')
723 cgpart.addparam('treemanifest', '1')
724 def handlereply(op):
724 def handlereply(op):
725 """extract addchangegroup returns from server reply"""
725 """extract addchangegroup returns from server reply"""
726 cgreplies = op.records.getreplies(cgpart.id)
726 cgreplies = op.records.getreplies(cgpart.id)
727 assert len(cgreplies['changegroup']) == 1
727 assert len(cgreplies['changegroup']) == 1
728 pushop.cgresult = cgreplies['changegroup'][0]['return']
728 pushop.cgresult = cgreplies['changegroup'][0]['return']
729 return handlereply
729 return handlereply
730
730
731 @b2partsgenerator('phase')
731 @b2partsgenerator('phase')
732 def _pushb2phases(pushop, bundler):
732 def _pushb2phases(pushop, bundler):
733 """handle phase push through bundle2"""
733 """handle phase push through bundle2"""
734 if 'phases' in pushop.stepsdone:
734 if 'phases' in pushop.stepsdone:
735 return
735 return
736 b2caps = bundle2.bundle2caps(pushop.remote)
736 b2caps = bundle2.bundle2caps(pushop.remote)
737 if not 'pushkey' in b2caps:
737 if not 'pushkey' in b2caps:
738 return
738 return
739 pushop.stepsdone.add('phases')
739 pushop.stepsdone.add('phases')
740 part2node = []
740 part2node = []
741
741
742 def handlefailure(pushop, exc):
742 def handlefailure(pushop, exc):
743 targetid = int(exc.partid)
743 targetid = int(exc.partid)
744 for partid, node in part2node:
744 for partid, node in part2node:
745 if partid == targetid:
745 if partid == targetid:
746 raise error.Abort(_('updating %s to public failed') % node)
746 raise error.Abort(_('updating %s to public failed') % node)
747
747
748 enc = pushkey.encode
748 enc = pushkey.encode
749 for newremotehead in pushop.outdatedphases:
749 for newremotehead in pushop.outdatedphases:
750 part = bundler.newpart('pushkey')
750 part = bundler.newpart('pushkey')
751 part.addparam('namespace', enc('phases'))
751 part.addparam('namespace', enc('phases'))
752 part.addparam('key', enc(newremotehead.hex()))
752 part.addparam('key', enc(newremotehead.hex()))
753 part.addparam('old', enc(str(phases.draft)))
753 part.addparam('old', enc(str(phases.draft)))
754 part.addparam('new', enc(str(phases.public)))
754 part.addparam('new', enc(str(phases.public)))
755 part2node.append((part.id, newremotehead))
755 part2node.append((part.id, newremotehead))
756 pushop.pkfailcb[part.id] = handlefailure
756 pushop.pkfailcb[part.id] = handlefailure
757
757
758 def handlereply(op):
758 def handlereply(op):
759 for partid, node in part2node:
759 for partid, node in part2node:
760 partrep = op.records.getreplies(partid)
760 partrep = op.records.getreplies(partid)
761 results = partrep['pushkey']
761 results = partrep['pushkey']
762 assert len(results) <= 1
762 assert len(results) <= 1
763 msg = None
763 msg = None
764 if not results:
764 if not results:
765 msg = _('server ignored update of %s to public!\n') % node
765 msg = _('server ignored update of %s to public!\n') % node
766 elif not int(results[0]['return']):
766 elif not int(results[0]['return']):
767 msg = _('updating %s to public failed!\n') % node
767 msg = _('updating %s to public failed!\n') % node
768 if msg is not None:
768 if msg is not None:
769 pushop.ui.warn(msg)
769 pushop.ui.warn(msg)
770 return handlereply
770 return handlereply
771
771
772 @b2partsgenerator('obsmarkers')
772 @b2partsgenerator('obsmarkers')
773 def _pushb2obsmarkers(pushop, bundler):
773 def _pushb2obsmarkers(pushop, bundler):
774 if 'obsmarkers' in pushop.stepsdone:
774 if 'obsmarkers' in pushop.stepsdone:
775 return
775 return
776 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
776 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
777 if obsolete.commonversion(remoteversions) is None:
777 if obsolete.commonversion(remoteversions) is None:
778 return
778 return
779 pushop.stepsdone.add('obsmarkers')
779 pushop.stepsdone.add('obsmarkers')
780 if pushop.outobsmarkers:
780 if pushop.outobsmarkers:
781 markers = sorted(pushop.outobsmarkers)
781 markers = sorted(pushop.outobsmarkers)
782 buildobsmarkerspart(bundler, markers)
782 buildobsmarkerspart(bundler, markers)
783
783
784 @b2partsgenerator('bookmarks')
784 @b2partsgenerator('bookmarks')
785 def _pushb2bookmarks(pushop, bundler):
785 def _pushb2bookmarks(pushop, bundler):
786 """handle bookmark push through bundle2"""
786 """handle bookmark push through bundle2"""
787 if 'bookmarks' in pushop.stepsdone:
787 if 'bookmarks' in pushop.stepsdone:
788 return
788 return
789 b2caps = bundle2.bundle2caps(pushop.remote)
789 b2caps = bundle2.bundle2caps(pushop.remote)
790 if 'pushkey' not in b2caps:
790 if 'pushkey' not in b2caps:
791 return
791 return
792 pushop.stepsdone.add('bookmarks')
792 pushop.stepsdone.add('bookmarks')
793 part2book = []
793 part2book = []
794 enc = pushkey.encode
794 enc = pushkey.encode
795
795
796 def handlefailure(pushop, exc):
796 def handlefailure(pushop, exc):
797 targetid = int(exc.partid)
797 targetid = int(exc.partid)
798 for partid, book, action in part2book:
798 for partid, book, action in part2book:
799 if partid == targetid:
799 if partid == targetid:
800 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
800 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
801 # we should not be called for part we did not generated
801 # we should not be called for part we did not generated
802 assert False
802 assert False
803
803
804 for book, old, new in pushop.outbookmarks:
804 for book, old, new in pushop.outbookmarks:
805 part = bundler.newpart('pushkey')
805 part = bundler.newpart('pushkey')
806 part.addparam('namespace', enc('bookmarks'))
806 part.addparam('namespace', enc('bookmarks'))
807 part.addparam('key', enc(book))
807 part.addparam('key', enc(book))
808 part.addparam('old', enc(old))
808 part.addparam('old', enc(old))
809 part.addparam('new', enc(new))
809 part.addparam('new', enc(new))
810 action = 'update'
810 action = 'update'
811 if not old:
811 if not old:
812 action = 'export'
812 action = 'export'
813 elif not new:
813 elif not new:
814 action = 'delete'
814 action = 'delete'
815 part2book.append((part.id, book, action))
815 part2book.append((part.id, book, action))
816 pushop.pkfailcb[part.id] = handlefailure
816 pushop.pkfailcb[part.id] = handlefailure
817
817
818 def handlereply(op):
818 def handlereply(op):
819 ui = pushop.ui
819 ui = pushop.ui
820 for partid, book, action in part2book:
820 for partid, book, action in part2book:
821 partrep = op.records.getreplies(partid)
821 partrep = op.records.getreplies(partid)
822 results = partrep['pushkey']
822 results = partrep['pushkey']
823 assert len(results) <= 1
823 assert len(results) <= 1
824 if not results:
824 if not results:
825 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
825 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
826 else:
826 else:
827 ret = int(results[0]['return'])
827 ret = int(results[0]['return'])
828 if ret:
828 if ret:
829 ui.status(bookmsgmap[action][0] % book)
829 ui.status(bookmsgmap[action][0] % book)
830 else:
830 else:
831 ui.warn(bookmsgmap[action][1] % book)
831 ui.warn(bookmsgmap[action][1] % book)
832 if pushop.bkresult is not None:
832 if pushop.bkresult is not None:
833 pushop.bkresult = 1
833 pushop.bkresult = 1
834 return handlereply
834 return handlereply
835
835
836
836
837 def _pushbundle2(pushop):
837 def _pushbundle2(pushop):
838 """push data to the remote using bundle2
838 """push data to the remote using bundle2
839
839
840 The only currently supported type of data is changegroup but this will
840 The only currently supported type of data is changegroup but this will
841 evolve in the future."""
841 evolve in the future."""
842 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
842 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
843 pushback = (pushop.trmanager
843 pushback = (pushop.trmanager
844 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
844 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
845
845
846 # create reply capability
846 # create reply capability
847 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
847 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
848 allowpushback=pushback))
848 allowpushback=pushback))
849 bundler.newpart('replycaps', data=capsblob)
849 bundler.newpart('replycaps', data=capsblob)
850 replyhandlers = []
850 replyhandlers = []
851 for partgenname in b2partsgenorder:
851 for partgenname in b2partsgenorder:
852 partgen = b2partsgenmapping[partgenname]
852 partgen = b2partsgenmapping[partgenname]
853 ret = partgen(pushop, bundler)
853 ret = partgen(pushop, bundler)
854 if callable(ret):
854 if callable(ret):
855 replyhandlers.append(ret)
855 replyhandlers.append(ret)
856 # do not push if nothing to push
856 # do not push if nothing to push
857 if bundler.nbparts <= 1:
857 if bundler.nbparts <= 1:
858 return
858 return
859 stream = util.chunkbuffer(bundler.getchunks())
859 stream = util.chunkbuffer(bundler.getchunks())
860 try:
860 try:
861 try:
861 try:
862 reply = pushop.remote.unbundle(stream, ['force'], 'push')
862 reply = pushop.remote.unbundle(stream, ['force'], 'push')
863 except error.BundleValueError as exc:
863 except error.BundleValueError as exc:
864 raise error.Abort('missing support for %s' % exc)
864 raise error.Abort('missing support for %s' % exc)
865 try:
865 try:
866 trgetter = None
866 trgetter = None
867 if pushback:
867 if pushback:
868 trgetter = pushop.trmanager.transaction
868 trgetter = pushop.trmanager.transaction
869 op = bundle2.processbundle(pushop.repo, reply, trgetter)
869 op = bundle2.processbundle(pushop.repo, reply, trgetter)
870 except error.BundleValueError as exc:
870 except error.BundleValueError as exc:
871 raise error.Abort('missing support for %s' % exc)
871 raise error.Abort('missing support for %s' % exc)
872 except bundle2.AbortFromPart as exc:
872 except bundle2.AbortFromPart as exc:
873 pushop.ui.status(_('remote: %s\n') % exc)
873 pushop.ui.status(_('remote: %s\n') % exc)
874 raise error.Abort(_('push failed on remote'), hint=exc.hint)
874 raise error.Abort(_('push failed on remote'), hint=exc.hint)
875 except error.PushkeyFailed as exc:
875 except error.PushkeyFailed as exc:
876 partid = int(exc.partid)
876 partid = int(exc.partid)
877 if partid not in pushop.pkfailcb:
877 if partid not in pushop.pkfailcb:
878 raise
878 raise
879 pushop.pkfailcb[partid](pushop, exc)
879 pushop.pkfailcb[partid](pushop, exc)
880 for rephand in replyhandlers:
880 for rephand in replyhandlers:
881 rephand(op)
881 rephand(op)
882
882
883 def _pushchangeset(pushop):
883 def _pushchangeset(pushop):
884 """Make the actual push of changeset bundle to remote repo"""
884 """Make the actual push of changeset bundle to remote repo"""
885 if 'changesets' in pushop.stepsdone:
885 if 'changesets' in pushop.stepsdone:
886 return
886 return
887 pushop.stepsdone.add('changesets')
887 pushop.stepsdone.add('changesets')
888 if not _pushcheckoutgoing(pushop):
888 if not _pushcheckoutgoing(pushop):
889 return
889 return
890 pushop.repo.prepushoutgoinghooks(pushop.repo,
890 pushop.repo.prepushoutgoinghooks(pushop.repo,
891 pushop.remote,
891 pushop.remote,
892 pushop.outgoing)
892 pushop.outgoing)
893 outgoing = pushop.outgoing
893 outgoing = pushop.outgoing
894 unbundle = pushop.remote.capable('unbundle')
894 unbundle = pushop.remote.capable('unbundle')
895 # TODO: get bundlecaps from remote
895 # TODO: get bundlecaps from remote
896 bundlecaps = None
896 bundlecaps = None
897 # create a changegroup from local
897 # create a changegroup from local
898 if pushop.revs is None and not (outgoing.excluded
898 if pushop.revs is None and not (outgoing.excluded
899 or pushop.repo.changelog.filteredrevs):
899 or pushop.repo.changelog.filteredrevs):
900 # push everything,
900 # push everything,
901 # use the fast path, no race possible on push
901 # use the fast path, no race possible on push
902 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
902 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
903 cg = changegroup.getsubset(pushop.repo,
903 cg = changegroup.getsubset(pushop.repo,
904 outgoing,
904 outgoing,
905 bundler,
905 bundler,
906 'push',
906 'push',
907 fastpath=True)
907 fastpath=True)
908 else:
908 else:
909 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
909 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
910 bundlecaps)
910 bundlecaps)
911
911
912 # apply changegroup to remote
912 # apply changegroup to remote
913 if unbundle:
913 if unbundle:
914 # local repo finds heads on server, finds out what
914 # local repo finds heads on server, finds out what
915 # revs it must push. once revs transferred, if server
915 # revs it must push. once revs transferred, if server
916 # finds it has different heads (someone else won
916 # finds it has different heads (someone else won
917 # commit/push race), server aborts.
917 # commit/push race), server aborts.
918 if pushop.force:
918 if pushop.force:
919 remoteheads = ['force']
919 remoteheads = ['force']
920 else:
920 else:
921 remoteheads = pushop.remoteheads
921 remoteheads = pushop.remoteheads
922 # ssh: return remote's addchangegroup()
922 # ssh: return remote's addchangegroup()
923 # http: return remote's addchangegroup() or 0 for error
923 # http: return remote's addchangegroup() or 0 for error
924 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
924 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
925 pushop.repo.url())
925 pushop.repo.url())
926 else:
926 else:
927 # we return an integer indicating remote head count
927 # we return an integer indicating remote head count
928 # change
928 # change
929 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
929 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
930 pushop.repo.url())
930 pushop.repo.url())
931
931
932 def _pushsyncphase(pushop):
932 def _pushsyncphase(pushop):
933 """synchronise phase information locally and remotely"""
933 """synchronise phase information locally and remotely"""
934 cheads = pushop.commonheads
934 cheads = pushop.commonheads
935 # even when we don't push, exchanging phase data is useful
935 # even when we don't push, exchanging phase data is useful
936 remotephases = pushop.remote.listkeys('phases')
936 remotephases = pushop.remote.listkeys('phases')
937 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
937 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
938 and remotephases # server supports phases
938 and remotephases # server supports phases
939 and pushop.cgresult is None # nothing was pushed
939 and pushop.cgresult is None # nothing was pushed
940 and remotephases.get('publishing', False)):
940 and remotephases.get('publishing', False)):
941 # When:
941 # When:
942 # - this is a subrepo push
942 # - this is a subrepo push
943 # - and remote support phase
943 # - and remote support phase
944 # - and no changeset was pushed
944 # - and no changeset was pushed
945 # - and remote is publishing
945 # - and remote is publishing
946 # We may be in issue 3871 case!
946 # We may be in issue 3871 case!
947 # We drop the possible phase synchronisation done by
947 # We drop the possible phase synchronisation done by
948 # courtesy to publish changesets possibly locally draft
948 # courtesy to publish changesets possibly locally draft
949 # on the remote.
949 # on the remote.
950 remotephases = {'publishing': 'True'}
950 remotephases = {'publishing': 'True'}
951 if not remotephases: # old server or public only reply from non-publishing
951 if not remotephases: # old server or public only reply from non-publishing
952 _localphasemove(pushop, cheads)
952 _localphasemove(pushop, cheads)
953 # don't push any phase data as there is nothing to push
953 # don't push any phase data as there is nothing to push
954 else:
954 else:
955 ana = phases.analyzeremotephases(pushop.repo, cheads,
955 ana = phases.analyzeremotephases(pushop.repo, cheads,
956 remotephases)
956 remotephases)
957 pheads, droots = ana
957 pheads, droots = ana
958 ### Apply remote phase on local
958 ### Apply remote phase on local
959 if remotephases.get('publishing', False):
959 if remotephases.get('publishing', False):
960 _localphasemove(pushop, cheads)
960 _localphasemove(pushop, cheads)
961 else: # publish = False
961 else: # publish = False
962 _localphasemove(pushop, pheads)
962 _localphasemove(pushop, pheads)
963 _localphasemove(pushop, cheads, phases.draft)
963 _localphasemove(pushop, cheads, phases.draft)
964 ### Apply local phase on remote
964 ### Apply local phase on remote
965
965
966 if pushop.cgresult:
966 if pushop.cgresult:
967 if 'phases' in pushop.stepsdone:
967 if 'phases' in pushop.stepsdone:
968 # phases already pushed though bundle2
968 # phases already pushed though bundle2
969 return
969 return
970 outdated = pushop.outdatedphases
970 outdated = pushop.outdatedphases
971 else:
971 else:
972 outdated = pushop.fallbackoutdatedphases
972 outdated = pushop.fallbackoutdatedphases
973
973
974 pushop.stepsdone.add('phases')
974 pushop.stepsdone.add('phases')
975
975
976 # filter heads already turned public by the push
976 # filter heads already turned public by the push
977 outdated = [c for c in outdated if c.node() not in pheads]
977 outdated = [c for c in outdated if c.node() not in pheads]
978 # fallback to independent pushkey command
978 # fallback to independent pushkey command
979 for newremotehead in outdated:
979 for newremotehead in outdated:
980 r = pushop.remote.pushkey('phases',
980 r = pushop.remote.pushkey('phases',
981 newremotehead.hex(),
981 newremotehead.hex(),
982 str(phases.draft),
982 str(phases.draft),
983 str(phases.public))
983 str(phases.public))
984 if not r:
984 if not r:
985 pushop.ui.warn(_('updating %s to public failed!\n')
985 pushop.ui.warn(_('updating %s to public failed!\n')
986 % newremotehead)
986 % newremotehead)
987
987
988 def _localphasemove(pushop, nodes, phase=phases.public):
988 def _localphasemove(pushop, nodes, phase=phases.public):
989 """move <nodes> to <phase> in the local source repo"""
989 """move <nodes> to <phase> in the local source repo"""
990 if pushop.trmanager:
990 if pushop.trmanager:
991 phases.advanceboundary(pushop.repo,
991 phases.advanceboundary(pushop.repo,
992 pushop.trmanager.transaction(),
992 pushop.trmanager.transaction(),
993 phase,
993 phase,
994 nodes)
994 nodes)
995 else:
995 else:
996 # repo is not locked, do not change any phases!
996 # repo is not locked, do not change any phases!
997 # Informs the user that phases should have been moved when
997 # Informs the user that phases should have been moved when
998 # applicable.
998 # applicable.
999 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
999 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1000 phasestr = phases.phasenames[phase]
1000 phasestr = phases.phasenames[phase]
1001 if actualmoves:
1001 if actualmoves:
1002 pushop.ui.status(_('cannot lock source repo, skipping '
1002 pushop.ui.status(_('cannot lock source repo, skipping '
1003 'local %s phase update\n') % phasestr)
1003 'local %s phase update\n') % phasestr)
1004
1004
1005 def _pushobsolete(pushop):
1005 def _pushobsolete(pushop):
1006 """utility function to push obsolete markers to a remote"""
1006 """utility function to push obsolete markers to a remote"""
1007 if 'obsmarkers' in pushop.stepsdone:
1007 if 'obsmarkers' in pushop.stepsdone:
1008 return
1008 return
1009 repo = pushop.repo
1009 repo = pushop.repo
1010 remote = pushop.remote
1010 remote = pushop.remote
1011 pushop.stepsdone.add('obsmarkers')
1011 pushop.stepsdone.add('obsmarkers')
1012 if pushop.outobsmarkers:
1012 if pushop.outobsmarkers:
1013 pushop.ui.debug('try to push obsolete markers to remote\n')
1013 pushop.ui.debug('try to push obsolete markers to remote\n')
1014 rslts = []
1014 rslts = []
1015 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1015 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1016 for key in sorted(remotedata, reverse=True):
1016 for key in sorted(remotedata, reverse=True):
1017 # reverse sort to ensure we end with dump0
1017 # reverse sort to ensure we end with dump0
1018 data = remotedata[key]
1018 data = remotedata[key]
1019 rslts.append(remote.pushkey('obsolete', key, '', data))
1019 rslts.append(remote.pushkey('obsolete', key, '', data))
1020 if [r for r in rslts if not r]:
1020 if [r for r in rslts if not r]:
1021 msg = _('failed to push some obsolete markers!\n')
1021 msg = _('failed to push some obsolete markers!\n')
1022 repo.ui.warn(msg)
1022 repo.ui.warn(msg)
1023
1023
1024 def _pushbookmark(pushop):
1024 def _pushbookmark(pushop):
1025 """Update bookmark position on remote"""
1025 """Update bookmark position on remote"""
1026 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1026 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1027 return
1027 return
1028 pushop.stepsdone.add('bookmarks')
1028 pushop.stepsdone.add('bookmarks')
1029 ui = pushop.ui
1029 ui = pushop.ui
1030 remote = pushop.remote
1030 remote = pushop.remote
1031
1031
1032 for b, old, new in pushop.outbookmarks:
1032 for b, old, new in pushop.outbookmarks:
1033 action = 'update'
1033 action = 'update'
1034 if not old:
1034 if not old:
1035 action = 'export'
1035 action = 'export'
1036 elif not new:
1036 elif not new:
1037 action = 'delete'
1037 action = 'delete'
1038 if remote.pushkey('bookmarks', b, old, new):
1038 if remote.pushkey('bookmarks', b, old, new):
1039 ui.status(bookmsgmap[action][0] % b)
1039 ui.status(bookmsgmap[action][0] % b)
1040 else:
1040 else:
1041 ui.warn(bookmsgmap[action][1] % b)
1041 ui.warn(bookmsgmap[action][1] % b)
1042 # discovery can have set the value form invalid entry
1042 # discovery can have set the value form invalid entry
1043 if pushop.bkresult is not None:
1043 if pushop.bkresult is not None:
1044 pushop.bkresult = 1
1044 pushop.bkresult = 1
1045
1045
1046 class pulloperation(object):
1046 class pulloperation(object):
1047 """A object that represent a single pull operation
1047 """A object that represent a single pull operation
1048
1048
1049 It purpose is to carry pull related state and very common operation.
1049 It purpose is to carry pull related state and very common operation.
1050
1050
1051 A new should be created at the beginning of each pull and discarded
1051 A new should be created at the beginning of each pull and discarded
1052 afterward.
1052 afterward.
1053 """
1053 """
1054
1054
1055 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1055 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1056 remotebookmarks=None, streamclonerequested=None):
1056 remotebookmarks=None, streamclonerequested=None):
1057 # repo we pull into
1057 # repo we pull into
1058 self.repo = repo
1058 self.repo = repo
1059 # repo we pull from
1059 # repo we pull from
1060 self.remote = remote
1060 self.remote = remote
1061 # revision we try to pull (None is "all")
1061 # revision we try to pull (None is "all")
1062 self.heads = heads
1062 self.heads = heads
1063 # bookmark pulled explicitly
1063 # bookmark pulled explicitly
1064 self.explicitbookmarks = bookmarks
1064 self.explicitbookmarks = bookmarks
1065 # do we force pull?
1065 # do we force pull?
1066 self.force = force
1066 self.force = force
1067 # whether a streaming clone was requested
1067 # whether a streaming clone was requested
1068 self.streamclonerequested = streamclonerequested
1068 self.streamclonerequested = streamclonerequested
1069 # transaction manager
1069 # transaction manager
1070 self.trmanager = None
1070 self.trmanager = None
1071 # set of common changeset between local and remote before pull
1071 # set of common changeset between local and remote before pull
1072 self.common = None
1072 self.common = None
1073 # set of pulled head
1073 # set of pulled head
1074 self.rheads = None
1074 self.rheads = None
1075 # list of missing changeset to fetch remotely
1075 # list of missing changeset to fetch remotely
1076 self.fetch = None
1076 self.fetch = None
1077 # remote bookmarks data
1077 # remote bookmarks data
1078 self.remotebookmarks = remotebookmarks
1078 self.remotebookmarks = remotebookmarks
1079 # result of changegroup pulling (used as return code by pull)
1079 # result of changegroup pulling (used as return code by pull)
1080 self.cgresult = None
1080 self.cgresult = None
1081 # list of step already done
1081 # list of step already done
1082 self.stepsdone = set()
1082 self.stepsdone = set()
1083 # Whether we attempted a clone from pre-generated bundles.
1083 # Whether we attempted a clone from pre-generated bundles.
1084 self.clonebundleattempted = False
1084 self.clonebundleattempted = False
1085
1085
1086 @util.propertycache
1086 @util.propertycache
1087 def pulledsubset(self):
1087 def pulledsubset(self):
1088 """heads of the set of changeset target by the pull"""
1088 """heads of the set of changeset target by the pull"""
1089 # compute target subset
1089 # compute target subset
1090 if self.heads is None:
1090 if self.heads is None:
1091 # We pulled every thing possible
1091 # We pulled every thing possible
1092 # sync on everything common
1092 # sync on everything common
1093 c = set(self.common)
1093 c = set(self.common)
1094 ret = list(self.common)
1094 ret = list(self.common)
1095 for n in self.rheads:
1095 for n in self.rheads:
1096 if n not in c:
1096 if n not in c:
1097 ret.append(n)
1097 ret.append(n)
1098 return ret
1098 return ret
1099 else:
1099 else:
1100 # We pulled a specific subset
1100 # We pulled a specific subset
1101 # sync on this subset
1101 # sync on this subset
1102 return self.heads
1102 return self.heads
1103
1103
1104 @util.propertycache
1104 @util.propertycache
1105 def canusebundle2(self):
1105 def canusebundle2(self):
1106 return _canusebundle2(self)
1106 return _canusebundle2(self)
1107
1107
1108 @util.propertycache
1108 @util.propertycache
1109 def remotebundle2caps(self):
1109 def remotebundle2caps(self):
1110 return bundle2.bundle2caps(self.remote)
1110 return bundle2.bundle2caps(self.remote)
1111
1111
1112 def gettransaction(self):
1112 def gettransaction(self):
1113 # deprecated; talk to trmanager directly
1113 # deprecated; talk to trmanager directly
1114 return self.trmanager.transaction()
1114 return self.trmanager.transaction()
1115
1115
1116 class transactionmanager(object):
1116 class transactionmanager(object):
1117 """An object to manage the life cycle of a transaction
1117 """An object to manage the life cycle of a transaction
1118
1118
1119 It creates the transaction on demand and calls the appropriate hooks when
1119 It creates the transaction on demand and calls the appropriate hooks when
1120 closing the transaction."""
1120 closing the transaction."""
1121 def __init__(self, repo, source, url):
1121 def __init__(self, repo, source, url):
1122 self.repo = repo
1122 self.repo = repo
1123 self.source = source
1123 self.source = source
1124 self.url = url
1124 self.url = url
1125 self._tr = None
1125 self._tr = None
1126
1126
1127 def transaction(self):
1127 def transaction(self):
1128 """Return an open transaction object, constructing if necessary"""
1128 """Return an open transaction object, constructing if necessary"""
1129 if not self._tr:
1129 if not self._tr:
1130 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1130 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1131 self._tr = self.repo.transaction(trname)
1131 self._tr = self.repo.transaction(trname)
1132 self._tr.hookargs['source'] = self.source
1132 self._tr.hookargs['source'] = self.source
1133 self._tr.hookargs['url'] = self.url
1133 self._tr.hookargs['url'] = self.url
1134 return self._tr
1134 return self._tr
1135
1135
1136 def close(self):
1136 def close(self):
1137 """close transaction if created"""
1137 """close transaction if created"""
1138 if self._tr is not None:
1138 if self._tr is not None:
1139 self._tr.close()
1139 self._tr.close()
1140
1140
1141 def release(self):
1141 def release(self):
1142 """release transaction if created"""
1142 """release transaction if created"""
1143 if self._tr is not None:
1143 if self._tr is not None:
1144 self._tr.release()
1144 self._tr.release()
1145
1145
1146 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1146 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1147 streamclonerequested=None):
1147 streamclonerequested=None):
1148 """Fetch repository data from a remote.
1148 """Fetch repository data from a remote.
1149
1149
1150 This is the main function used to retrieve data from a remote repository.
1150 This is the main function used to retrieve data from a remote repository.
1151
1151
1152 ``repo`` is the local repository to clone into.
1152 ``repo`` is the local repository to clone into.
1153 ``remote`` is a peer instance.
1153 ``remote`` is a peer instance.
1154 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1154 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1155 default) means to pull everything from the remote.
1155 default) means to pull everything from the remote.
1156 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1156 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1157 default, all remote bookmarks are pulled.
1157 default, all remote bookmarks are pulled.
1158 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1158 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1159 initialization.
1159 initialization.
1160 ``streamclonerequested`` is a boolean indicating whether a "streaming
1160 ``streamclonerequested`` is a boolean indicating whether a "streaming
1161 clone" is requested. A "streaming clone" is essentially a raw file copy
1161 clone" is requested. A "streaming clone" is essentially a raw file copy
1162 of revlogs from the server. This only works when the local repository is
1162 of revlogs from the server. This only works when the local repository is
1163 empty. The default value of ``None`` means to respect the server
1163 empty. The default value of ``None`` means to respect the server
1164 configuration for preferring stream clones.
1164 configuration for preferring stream clones.
1165
1165
1166 Returns the ``pulloperation`` created for this pull.
1166 Returns the ``pulloperation`` created for this pull.
1167 """
1167 """
1168 if opargs is None:
1168 if opargs is None:
1169 opargs = {}
1169 opargs = {}
1170 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1170 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1171 streamclonerequested=streamclonerequested, **opargs)
1171 streamclonerequested=streamclonerequested, **opargs)
1172 if pullop.remote.local():
1172 if pullop.remote.local():
1173 missing = set(pullop.remote.requirements) - pullop.repo.supported
1173 missing = set(pullop.remote.requirements) - pullop.repo.supported
1174 if missing:
1174 if missing:
1175 msg = _("required features are not"
1175 msg = _("required features are not"
1176 " supported in the destination:"
1176 " supported in the destination:"
1177 " %s") % (', '.join(sorted(missing)))
1177 " %s") % (', '.join(sorted(missing)))
1178 raise error.Abort(msg)
1178 raise error.Abort(msg)
1179
1179
1180 lock = pullop.repo.lock()
1180 lock = pullop.repo.lock()
1181 try:
1181 try:
1182 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1182 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1183 streamclone.maybeperformlegacystreamclone(pullop)
1183 streamclone.maybeperformlegacystreamclone(pullop)
1184 # This should ideally be in _pullbundle2(). However, it needs to run
1184 # This should ideally be in _pullbundle2(). However, it needs to run
1185 # before discovery to avoid extra work.
1185 # before discovery to avoid extra work.
1186 _maybeapplyclonebundle(pullop)
1186 _maybeapplyclonebundle(pullop)
1187 _pulldiscovery(pullop)
1187 _pulldiscovery(pullop)
1188 if pullop.canusebundle2:
1188 if pullop.canusebundle2:
1189 _pullbundle2(pullop)
1189 _pullbundle2(pullop)
1190 _pullchangeset(pullop)
1190 _pullchangeset(pullop)
1191 _pullphase(pullop)
1191 _pullphase(pullop)
1192 _pullbookmarks(pullop)
1192 _pullbookmarks(pullop)
1193 _pullobsolete(pullop)
1193 _pullobsolete(pullop)
1194 pullop.trmanager.close()
1194 pullop.trmanager.close()
1195 finally:
1195 finally:
1196 pullop.trmanager.release()
1196 pullop.trmanager.release()
1197 lock.release()
1197 lock.release()
1198
1198
1199 return pullop
1199 return pullop
1200
1200
1201 # list of steps to perform discovery before pull
1201 # list of steps to perform discovery before pull
1202 pulldiscoveryorder = []
1202 pulldiscoveryorder = []
1203
1203
1204 # Mapping between step name and function
1204 # Mapping between step name and function
1205 #
1205 #
1206 # This exists to help extensions wrap steps if necessary
1206 # This exists to help extensions wrap steps if necessary
1207 pulldiscoverymapping = {}
1207 pulldiscoverymapping = {}
1208
1208
1209 def pulldiscovery(stepname):
1209 def pulldiscovery(stepname):
1210 """decorator for function performing discovery before pull
1210 """decorator for function performing discovery before pull
1211
1211
1212 The function is added to the step -> function mapping and appended to the
1212 The function is added to the step -> function mapping and appended to the
1213 list of steps. Beware that decorated function will be added in order (this
1213 list of steps. Beware that decorated function will be added in order (this
1214 may matter).
1214 may matter).
1215
1215
1216 You can only use this decorator for a new step, if you want to wrap a step
1216 You can only use this decorator for a new step, if you want to wrap a step
1217 from an extension, change the pulldiscovery dictionary directly."""
1217 from an extension, change the pulldiscovery dictionary directly."""
1218 def dec(func):
1218 def dec(func):
1219 assert stepname not in pulldiscoverymapping
1219 assert stepname not in pulldiscoverymapping
1220 pulldiscoverymapping[stepname] = func
1220 pulldiscoverymapping[stepname] = func
1221 pulldiscoveryorder.append(stepname)
1221 pulldiscoveryorder.append(stepname)
1222 return func
1222 return func
1223 return dec
1223 return dec
1224
1224
1225 def _pulldiscovery(pullop):
1225 def _pulldiscovery(pullop):
1226 """Run all discovery steps"""
1226 """Run all discovery steps"""
1227 for stepname in pulldiscoveryorder:
1227 for stepname in pulldiscoveryorder:
1228 step = pulldiscoverymapping[stepname]
1228 step = pulldiscoverymapping[stepname]
1229 step(pullop)
1229 step(pullop)
1230
1230
1231 @pulldiscovery('b1:bookmarks')
1231 @pulldiscovery('b1:bookmarks')
1232 def _pullbookmarkbundle1(pullop):
1232 def _pullbookmarkbundle1(pullop):
1233 """fetch bookmark data in bundle1 case
1233 """fetch bookmark data in bundle1 case
1234
1234
1235 If not using bundle2, we have to fetch bookmarks before changeset
1235 If not using bundle2, we have to fetch bookmarks before changeset
1236 discovery to reduce the chance and impact of race conditions."""
1236 discovery to reduce the chance and impact of race conditions."""
1237 if pullop.remotebookmarks is not None:
1237 if pullop.remotebookmarks is not None:
1238 return
1238 return
1239 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1239 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1240 # all known bundle2 servers now support listkeys, but lets be nice with
1240 # all known bundle2 servers now support listkeys, but lets be nice with
1241 # new implementation.
1241 # new implementation.
1242 return
1242 return
1243 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1243 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1244
1244
1245
1245
1246 @pulldiscovery('changegroup')
1246 @pulldiscovery('changegroup')
1247 def _pulldiscoverychangegroup(pullop):
1247 def _pulldiscoverychangegroup(pullop):
1248 """discovery phase for the pull
1248 """discovery phase for the pull
1249
1249
1250 Current handle changeset discovery only, will change handle all discovery
1250 Current handle changeset discovery only, will change handle all discovery
1251 at some point."""
1251 at some point."""
1252 tmp = discovery.findcommonincoming(pullop.repo,
1252 tmp = discovery.findcommonincoming(pullop.repo,
1253 pullop.remote,
1253 pullop.remote,
1254 heads=pullop.heads,
1254 heads=pullop.heads,
1255 force=pullop.force)
1255 force=pullop.force)
1256 common, fetch, rheads = tmp
1256 common, fetch, rheads = tmp
1257 nm = pullop.repo.unfiltered().changelog.nodemap
1257 nm = pullop.repo.unfiltered().changelog.nodemap
1258 if fetch and rheads:
1258 if fetch and rheads:
1259 # If a remote heads in filtered locally, lets drop it from the unknown
1259 # If a remote heads in filtered locally, lets drop it from the unknown
1260 # remote heads and put in back in common.
1260 # remote heads and put in back in common.
1261 #
1261 #
1262 # This is a hackish solution to catch most of "common but locally
1262 # This is a hackish solution to catch most of "common but locally
1263 # hidden situation". We do not performs discovery on unfiltered
1263 # hidden situation". We do not performs discovery on unfiltered
1264 # repository because it end up doing a pathological amount of round
1264 # repository because it end up doing a pathological amount of round
1265 # trip for w huge amount of changeset we do not care about.
1265 # trip for w huge amount of changeset we do not care about.
1266 #
1266 #
1267 # If a set of such "common but filtered" changeset exist on the server
1267 # If a set of such "common but filtered" changeset exist on the server
1268 # but are not including a remote heads, we'll not be able to detect it,
1268 # but are not including a remote heads, we'll not be able to detect it,
1269 scommon = set(common)
1269 scommon = set(common)
1270 filteredrheads = []
1270 filteredrheads = []
1271 for n in rheads:
1271 for n in rheads:
1272 if n in nm:
1272 if n in nm:
1273 if n not in scommon:
1273 if n not in scommon:
1274 common.append(n)
1274 common.append(n)
1275 else:
1275 else:
1276 filteredrheads.append(n)
1276 filteredrheads.append(n)
1277 if not filteredrheads:
1277 if not filteredrheads:
1278 fetch = []
1278 fetch = []
1279 rheads = filteredrheads
1279 rheads = filteredrheads
1280 pullop.common = common
1280 pullop.common = common
1281 pullop.fetch = fetch
1281 pullop.fetch = fetch
1282 pullop.rheads = rheads
1282 pullop.rheads = rheads
1283
1283
1284 def _pullbundle2(pullop):
1284 def _pullbundle2(pullop):
1285 """pull data using bundle2
1285 """pull data using bundle2
1286
1286
1287 For now, the only supported data are changegroup."""
1287 For now, the only supported data are changegroup."""
1288 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1288 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1289
1289
1290 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1290 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1291
1291
1292 # pulling changegroup
1292 # pulling changegroup
1293 pullop.stepsdone.add('changegroup')
1293 pullop.stepsdone.add('changegroup')
1294
1294
1295 kwargs['common'] = pullop.common
1295 kwargs['common'] = pullop.common
1296 kwargs['heads'] = pullop.heads or pullop.rheads
1296 kwargs['heads'] = pullop.heads or pullop.rheads
1297 kwargs['cg'] = pullop.fetch
1297 kwargs['cg'] = pullop.fetch
1298 if 'listkeys' in pullop.remotebundle2caps:
1298 if 'listkeys' in pullop.remotebundle2caps:
1299 kwargs['listkeys'] = ['phase']
1299 kwargs['listkeys'] = ['phase']
1300 if pullop.remotebookmarks is None:
1300 if pullop.remotebookmarks is None:
1301 # make sure to always includes bookmark data when migrating
1301 # make sure to always includes bookmark data when migrating
1302 # `hg incoming --bundle` to using this function.
1302 # `hg incoming --bundle` to using this function.
1303 kwargs['listkeys'].append('bookmarks')
1303 kwargs['listkeys'].append('bookmarks')
1304
1304
1305 # If this is a full pull / clone and the server supports the clone bundles
1305 # If this is a full pull / clone and the server supports the clone bundles
1306 # feature, tell the server whether we attempted a clone bundle. The
1306 # feature, tell the server whether we attempted a clone bundle. The
1307 # presence of this flag indicates the client supports clone bundles. This
1307 # presence of this flag indicates the client supports clone bundles. This
1308 # will enable the server to treat clients that support clone bundles
1308 # will enable the server to treat clients that support clone bundles
1309 # differently from those that don't.
1309 # differently from those that don't.
1310 if (pullop.remote.capable('clonebundles')
1310 if (pullop.remote.capable('clonebundles')
1311 and pullop.heads is None and list(pullop.common) == [nullid]):
1311 and pullop.heads is None and list(pullop.common) == [nullid]):
1312 kwargs['cbattempted'] = pullop.clonebundleattempted
1312 kwargs['cbattempted'] = pullop.clonebundleattempted
1313
1313
1314 if streaming:
1314 if streaming:
1315 pullop.repo.ui.status(_('streaming all changes\n'))
1315 pullop.repo.ui.status(_('streaming all changes\n'))
1316 elif not pullop.fetch:
1316 elif not pullop.fetch:
1317 pullop.repo.ui.status(_("no changes found\n"))
1317 pullop.repo.ui.status(_("no changes found\n"))
1318 pullop.cgresult = 0
1318 pullop.cgresult = 0
1319 else:
1319 else:
1320 if pullop.heads is None and list(pullop.common) == [nullid]:
1320 if pullop.heads is None and list(pullop.common) == [nullid]:
1321 pullop.repo.ui.status(_("requesting all changes\n"))
1321 pullop.repo.ui.status(_("requesting all changes\n"))
1322 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1322 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1323 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1323 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1324 if obsolete.commonversion(remoteversions) is not None:
1324 if obsolete.commonversion(remoteversions) is not None:
1325 kwargs['obsmarkers'] = True
1325 kwargs['obsmarkers'] = True
1326 pullop.stepsdone.add('obsmarkers')
1326 pullop.stepsdone.add('obsmarkers')
1327 _pullbundle2extraprepare(pullop, kwargs)
1327 _pullbundle2extraprepare(pullop, kwargs)
1328 bundle = pullop.remote.getbundle('pull', **kwargs)
1328 bundle = pullop.remote.getbundle('pull', **kwargs)
1329 try:
1329 try:
1330 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1330 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1331 except error.BundleValueError as exc:
1331 except error.BundleValueError as exc:
1332 raise error.Abort('missing support for %s' % exc)
1332 raise error.Abort('missing support for %s' % exc)
1333
1333
1334 if pullop.fetch:
1334 if pullop.fetch:
1335 results = [cg['return'] for cg in op.records['changegroup']]
1335 results = [cg['return'] for cg in op.records['changegroup']]
1336 pullop.cgresult = changegroup.combineresults(results)
1336 pullop.cgresult = changegroup.combineresults(results)
1337
1337
1338 # processing phases change
1338 # processing phases change
1339 for namespace, value in op.records['listkeys']:
1339 for namespace, value in op.records['listkeys']:
1340 if namespace == 'phases':
1340 if namespace == 'phases':
1341 _pullapplyphases(pullop, value)
1341 _pullapplyphases(pullop, value)
1342
1342
1343 # processing bookmark update
1343 # processing bookmark update
1344 for namespace, value in op.records['listkeys']:
1344 for namespace, value in op.records['listkeys']:
1345 if namespace == 'bookmarks':
1345 if namespace == 'bookmarks':
1346 pullop.remotebookmarks = value
1346 pullop.remotebookmarks = value
1347
1347
1348 # bookmark data were either already there or pulled in the bundle
1348 # bookmark data were either already there or pulled in the bundle
1349 if pullop.remotebookmarks is not None:
1349 if pullop.remotebookmarks is not None:
1350 _pullbookmarks(pullop)
1350 _pullbookmarks(pullop)
1351
1351
1352 def _pullbundle2extraprepare(pullop, kwargs):
1352 def _pullbundle2extraprepare(pullop, kwargs):
1353 """hook function so that extensions can extend the getbundle call"""
1353 """hook function so that extensions can extend the getbundle call"""
1354 pass
1354 pass
1355
1355
1356 def _pullchangeset(pullop):
1356 def _pullchangeset(pullop):
1357 """pull changeset from unbundle into the local repo"""
1357 """pull changeset from unbundle into the local repo"""
1358 # We delay the open of the transaction as late as possible so we
1358 # We delay the open of the transaction as late as possible so we
1359 # don't open transaction for nothing or you break future useful
1359 # don't open transaction for nothing or you break future useful
1360 # rollback call
1360 # rollback call
1361 if 'changegroup' in pullop.stepsdone:
1361 if 'changegroup' in pullop.stepsdone:
1362 return
1362 return
1363 pullop.stepsdone.add('changegroup')
1363 pullop.stepsdone.add('changegroup')
1364 if not pullop.fetch:
1364 if not pullop.fetch:
1365 pullop.repo.ui.status(_("no changes found\n"))
1365 pullop.repo.ui.status(_("no changes found\n"))
1366 pullop.cgresult = 0
1366 pullop.cgresult = 0
1367 return
1367 return
1368 pullop.gettransaction()
1368 pullop.gettransaction()
1369 if pullop.heads is None and list(pullop.common) == [nullid]:
1369 if pullop.heads is None and list(pullop.common) == [nullid]:
1370 pullop.repo.ui.status(_("requesting all changes\n"))
1370 pullop.repo.ui.status(_("requesting all changes\n"))
1371 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1371 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1372 # issue1320, avoid a race if remote changed after discovery
1372 # issue1320, avoid a race if remote changed after discovery
1373 pullop.heads = pullop.rheads
1373 pullop.heads = pullop.rheads
1374
1374
1375 if pullop.remote.capable('getbundle'):
1375 if pullop.remote.capable('getbundle'):
1376 # TODO: get bundlecaps from remote
1376 # TODO: get bundlecaps from remote
1377 cg = pullop.remote.getbundle('pull', common=pullop.common,
1377 cg = pullop.remote.getbundle('pull', common=pullop.common,
1378 heads=pullop.heads or pullop.rheads)
1378 heads=pullop.heads or pullop.rheads)
1379 elif pullop.heads is None:
1379 elif pullop.heads is None:
1380 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1380 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1381 elif not pullop.remote.capable('changegroupsubset'):
1381 elif not pullop.remote.capable('changegroupsubset'):
1382 raise error.Abort(_("partial pull cannot be done because "
1382 raise error.Abort(_("partial pull cannot be done because "
1383 "other repository doesn't support "
1383 "other repository doesn't support "
1384 "changegroupsubset."))
1384 "changegroupsubset."))
1385 else:
1385 else:
1386 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1386 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1387 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1387 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1388
1388
1389 def _pullphase(pullop):
1389 def _pullphase(pullop):
1390 # Get remote phases data from remote
1390 # Get remote phases data from remote
1391 if 'phases' in pullop.stepsdone:
1391 if 'phases' in pullop.stepsdone:
1392 return
1392 return
1393 remotephases = pullop.remote.listkeys('phases')
1393 remotephases = pullop.remote.listkeys('phases')
1394 _pullapplyphases(pullop, remotephases)
1394 _pullapplyphases(pullop, remotephases)
1395
1395
1396 def _pullapplyphases(pullop, remotephases):
1396 def _pullapplyphases(pullop, remotephases):
1397 """apply phase movement from observed remote state"""
1397 """apply phase movement from observed remote state"""
1398 if 'phases' in pullop.stepsdone:
1398 if 'phases' in pullop.stepsdone:
1399 return
1399 return
1400 pullop.stepsdone.add('phases')
1400 pullop.stepsdone.add('phases')
1401 publishing = bool(remotephases.get('publishing', False))
1401 publishing = bool(remotephases.get('publishing', False))
1402 if remotephases and not publishing:
1402 if remotephases and not publishing:
1403 # remote is new and unpublishing
1403 # remote is new and unpublishing
1404 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1404 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1405 pullop.pulledsubset,
1405 pullop.pulledsubset,
1406 remotephases)
1406 remotephases)
1407 dheads = pullop.pulledsubset
1407 dheads = pullop.pulledsubset
1408 else:
1408 else:
1409 # Remote is old or publishing all common changesets
1409 # Remote is old or publishing all common changesets
1410 # should be seen as public
1410 # should be seen as public
1411 pheads = pullop.pulledsubset
1411 pheads = pullop.pulledsubset
1412 dheads = []
1412 dheads = []
1413 unfi = pullop.repo.unfiltered()
1413 unfi = pullop.repo.unfiltered()
1414 phase = unfi._phasecache.phase
1414 phase = unfi._phasecache.phase
1415 rev = unfi.changelog.nodemap.get
1415 rev = unfi.changelog.nodemap.get
1416 public = phases.public
1416 public = phases.public
1417 draft = phases.draft
1417 draft = phases.draft
1418
1418
1419 # exclude changesets already public locally and update the others
1419 # exclude changesets already public locally and update the others
1420 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1420 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1421 if pheads:
1421 if pheads:
1422 tr = pullop.gettransaction()
1422 tr = pullop.gettransaction()
1423 phases.advanceboundary(pullop.repo, tr, public, pheads)
1423 phases.advanceboundary(pullop.repo, tr, public, pheads)
1424
1424
1425 # exclude changesets already draft locally and update the others
1425 # exclude changesets already draft locally and update the others
1426 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1426 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1427 if dheads:
1427 if dheads:
1428 tr = pullop.gettransaction()
1428 tr = pullop.gettransaction()
1429 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1429 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1430
1430
1431 def _pullbookmarks(pullop):
1431 def _pullbookmarks(pullop):
1432 """process the remote bookmark information to update the local one"""
1432 """process the remote bookmark information to update the local one"""
1433 if 'bookmarks' in pullop.stepsdone:
1433 if 'bookmarks' in pullop.stepsdone:
1434 return
1434 return
1435 pullop.stepsdone.add('bookmarks')
1435 pullop.stepsdone.add('bookmarks')
1436 repo = pullop.repo
1436 repo = pullop.repo
1437 remotebookmarks = pullop.remotebookmarks
1437 remotebookmarks = pullop.remotebookmarks
1438 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1438 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1439 pullop.remote.url(),
1439 pullop.remote.url(),
1440 pullop.gettransaction,
1440 pullop.gettransaction,
1441 explicit=pullop.explicitbookmarks)
1441 explicit=pullop.explicitbookmarks)
1442
1442
1443 def _pullobsolete(pullop):
1443 def _pullobsolete(pullop):
1444 """utility function to pull obsolete markers from a remote
1444 """utility function to pull obsolete markers from a remote
1445
1445
1446 The `gettransaction` is function that return the pull transaction, creating
1446 The `gettransaction` is function that return the pull transaction, creating
1447 one if necessary. We return the transaction to inform the calling code that
1447 one if necessary. We return the transaction to inform the calling code that
1448 a new transaction have been created (when applicable).
1448 a new transaction have been created (when applicable).
1449
1449
1450 Exists mostly to allow overriding for experimentation purpose"""
1450 Exists mostly to allow overriding for experimentation purpose"""
1451 if 'obsmarkers' in pullop.stepsdone:
1451 if 'obsmarkers' in pullop.stepsdone:
1452 return
1452 return
1453 pullop.stepsdone.add('obsmarkers')
1453 pullop.stepsdone.add('obsmarkers')
1454 tr = None
1454 tr = None
1455 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1455 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1456 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1456 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1457 remoteobs = pullop.remote.listkeys('obsolete')
1457 remoteobs = pullop.remote.listkeys('obsolete')
1458 if 'dump0' in remoteobs:
1458 if 'dump0' in remoteobs:
1459 tr = pullop.gettransaction()
1459 tr = pullop.gettransaction()
1460 markers = []
1460 markers = []
1461 for key in sorted(remoteobs, reverse=True):
1461 for key in sorted(remoteobs, reverse=True):
1462 if key.startswith('dump'):
1462 if key.startswith('dump'):
1463 data = base85.b85decode(remoteobs[key])
1463 data = base85.b85decode(remoteobs[key])
1464 version, newmarks = obsolete._readmarkers(data)
1464 version, newmarks = obsolete._readmarkers(data)
1465 markers += newmarks
1465 markers += newmarks
1466 if markers:
1466 if markers:
1467 pullop.repo.obsstore.add(tr, markers)
1467 pullop.repo.obsstore.add(tr, markers)
1468 pullop.repo.invalidatevolatilesets()
1468 pullop.repo.invalidatevolatilesets()
1469 return tr
1469 return tr
1470
1470
1471 def caps20to10(repo):
1471 def caps20to10(repo):
1472 """return a set with appropriate options to use bundle20 during getbundle"""
1472 """return a set with appropriate options to use bundle20 during getbundle"""
1473 caps = set(['HG20'])
1473 caps = set(['HG20'])
1474 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1474 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1475 caps.add('bundle2=' + urllib.quote(capsblob))
1475 caps.add('bundle2=' + urllib.quote(capsblob))
1476 return caps
1476 return caps
1477
1477
1478 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1478 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1479 getbundle2partsorder = []
1479 getbundle2partsorder = []
1480
1480
1481 # Mapping between step name and function
1481 # Mapping between step name and function
1482 #
1482 #
1483 # This exists to help extensions wrap steps if necessary
1483 # This exists to help extensions wrap steps if necessary
1484 getbundle2partsmapping = {}
1484 getbundle2partsmapping = {}
1485
1485
1486 def getbundle2partsgenerator(stepname, idx=None):
1486 def getbundle2partsgenerator(stepname, idx=None):
1487 """decorator for function generating bundle2 part for getbundle
1487 """decorator for function generating bundle2 part for getbundle
1488
1488
1489 The function is added to the step -> function mapping and appended to the
1489 The function is added to the step -> function mapping and appended to the
1490 list of steps. Beware that decorated functions will be added in order
1490 list of steps. Beware that decorated functions will be added in order
1491 (this may matter).
1491 (this may matter).
1492
1492
1493 You can only use this decorator for new steps, if you want to wrap a step
1493 You can only use this decorator for new steps, if you want to wrap a step
1494 from an extension, attack the getbundle2partsmapping dictionary directly."""
1494 from an extension, attack the getbundle2partsmapping dictionary directly."""
1495 def dec(func):
1495 def dec(func):
1496 assert stepname not in getbundle2partsmapping
1496 assert stepname not in getbundle2partsmapping
1497 getbundle2partsmapping[stepname] = func
1497 getbundle2partsmapping[stepname] = func
1498 if idx is None:
1498 if idx is None:
1499 getbundle2partsorder.append(stepname)
1499 getbundle2partsorder.append(stepname)
1500 else:
1500 else:
1501 getbundle2partsorder.insert(idx, stepname)
1501 getbundle2partsorder.insert(idx, stepname)
1502 return func
1502 return func
1503 return dec
1503 return dec
1504
1504
1505 def bundle2requested(bundlecaps):
1505 def bundle2requested(bundlecaps):
1506 if bundlecaps is not None:
1506 if bundlecaps is not None:
1507 return any(cap.startswith('HG2') for cap in bundlecaps)
1507 return any(cap.startswith('HG2') for cap in bundlecaps)
1508 return False
1508 return False
1509
1509
1510 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1510 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1511 **kwargs):
1511 **kwargs):
1512 """return a full bundle (with potentially multiple kind of parts)
1512 """return a full bundle (with potentially multiple kind of parts)
1513
1513
1514 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1514 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1515 passed. For now, the bundle can contain only changegroup, but this will
1515 passed. For now, the bundle can contain only changegroup, but this will
1516 changes when more part type will be available for bundle2.
1516 changes when more part type will be available for bundle2.
1517
1517
1518 This is different from changegroup.getchangegroup that only returns an HG10
1518 This is different from changegroup.getchangegroup that only returns an HG10
1519 changegroup bundle. They may eventually get reunited in the future when we
1519 changegroup bundle. They may eventually get reunited in the future when we
1520 have a clearer idea of the API we what to query different data.
1520 have a clearer idea of the API we what to query different data.
1521
1521
1522 The implementation is at a very early stage and will get massive rework
1522 The implementation is at a very early stage and will get massive rework
1523 when the API of bundle is refined.
1523 when the API of bundle is refined.
1524 """
1524 """
1525 usebundle2 = bundle2requested(bundlecaps)
1525 usebundle2 = bundle2requested(bundlecaps)
1526 # bundle10 case
1526 # bundle10 case
1527 if not usebundle2:
1527 if not usebundle2:
1528 if bundlecaps and not kwargs.get('cg', True):
1528 if bundlecaps and not kwargs.get('cg', True):
1529 raise ValueError(_('request for bundle10 must include changegroup'))
1529 raise ValueError(_('request for bundle10 must include changegroup'))
1530
1530
1531 if kwargs:
1531 if kwargs:
1532 raise ValueError(_('unsupported getbundle arguments: %s')
1532 raise ValueError(_('unsupported getbundle arguments: %s')
1533 % ', '.join(sorted(kwargs.keys())))
1533 % ', '.join(sorted(kwargs.keys())))
1534 return changegroup.getchangegroup(repo, source, heads=heads,
1534 return changegroup.getchangegroup(repo, source, heads=heads,
1535 common=common, bundlecaps=bundlecaps)
1535 common=common, bundlecaps=bundlecaps)
1536
1536
1537 # bundle20 case
1537 # bundle20 case
1538 b2caps = {}
1538 b2caps = {}
1539 for bcaps in bundlecaps:
1539 for bcaps in bundlecaps:
1540 if bcaps.startswith('bundle2='):
1540 if bcaps.startswith('bundle2='):
1541 blob = urllib.unquote(bcaps[len('bundle2='):])
1541 blob = urllib.unquote(bcaps[len('bundle2='):])
1542 b2caps.update(bundle2.decodecaps(blob))
1542 b2caps.update(bundle2.decodecaps(blob))
1543 bundler = bundle2.bundle20(repo.ui, b2caps)
1543 bundler = bundle2.bundle20(repo.ui, b2caps)
1544
1544
1545 kwargs['heads'] = heads
1545 kwargs['heads'] = heads
1546 kwargs['common'] = common
1546 kwargs['common'] = common
1547
1547
1548 for name in getbundle2partsorder:
1548 for name in getbundle2partsorder:
1549 func = getbundle2partsmapping[name]
1549 func = getbundle2partsmapping[name]
1550 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1550 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1551 **kwargs)
1551 **kwargs)
1552
1552
1553 return util.chunkbuffer(bundler.getchunks())
1553 return util.chunkbuffer(bundler.getchunks())
1554
1554
1555 @getbundle2partsgenerator('changegroup')
1555 @getbundle2partsgenerator('changegroup')
1556 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1556 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1557 b2caps=None, heads=None, common=None, **kwargs):
1557 b2caps=None, heads=None, common=None, **kwargs):
1558 """add a changegroup part to the requested bundle"""
1558 """add a changegroup part to the requested bundle"""
1559 cg = None
1559 cg = None
1560 if kwargs.get('cg', True):
1560 if kwargs.get('cg', True):
1561 # build changegroup bundle here.
1561 # build changegroup bundle here.
1562 version = None
1562 version = None
1563 cgversions = b2caps.get('changegroup')
1563 cgversions = b2caps.get('changegroup')
1564 getcgkwargs = {}
1564 getcgkwargs = {}
1565 if cgversions: # 3.1 and 3.2 ship with an empty value
1565 if cgversions: # 3.1 and 3.2 ship with an empty value
1566 cgversions = [v for v in cgversions
1566 cgversions = [v for v in cgversions
1567 if v in changegroup.supportedoutgoingversions(repo)]
1567 if v in changegroup.supportedoutgoingversions(repo)]
1568 if not cgversions:
1568 if not cgversions:
1569 raise ValueError(_('no common changegroup version'))
1569 raise ValueError(_('no common changegroup version'))
1570 version = getcgkwargs['version'] = max(cgversions)
1570 version = getcgkwargs['version'] = max(cgversions)
1571 outgoing = changegroup.computeoutgoing(repo, heads, common)
1571 outgoing = changegroup.computeoutgoing(repo, heads, common)
1572 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1572 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1573 bundlecaps=bundlecaps,
1573 bundlecaps=bundlecaps,
1574 **getcgkwargs)
1574 **getcgkwargs)
1575
1575
1576 if cg:
1576 if cg:
1577 part = bundler.newpart('changegroup', data=cg)
1577 part = bundler.newpart('changegroup', data=cg)
1578 if version is not None:
1578 if version is not None:
1579 part.addparam('version', version)
1579 part.addparam('version', version)
1580 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1580 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1581 if 'treemanifest' in repo.requirements:
1581 if 'treemanifest' in repo.requirements:
1582 part.addparam('treemanifest', '1')
1582 part.addparam('treemanifest', '1')
1583
1583
1584 @getbundle2partsgenerator('listkeys')
1584 @getbundle2partsgenerator('listkeys')
1585 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1585 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1586 b2caps=None, **kwargs):
1586 b2caps=None, **kwargs):
1587 """add parts containing listkeys namespaces to the requested bundle"""
1587 """add parts containing listkeys namespaces to the requested bundle"""
1588 listkeys = kwargs.get('listkeys', ())
1588 listkeys = kwargs.get('listkeys', ())
1589 for namespace in listkeys:
1589 for namespace in listkeys:
1590 part = bundler.newpart('listkeys')
1590 part = bundler.newpart('listkeys')
1591 part.addparam('namespace', namespace)
1591 part.addparam('namespace', namespace)
1592 keys = repo.listkeys(namespace).items()
1592 keys = repo.listkeys(namespace).items()
1593 part.data = pushkey.encodekeys(keys)
1593 part.data = pushkey.encodekeys(keys)
1594
1594
1595 @getbundle2partsgenerator('obsmarkers')
1595 @getbundle2partsgenerator('obsmarkers')
1596 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1596 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1597 b2caps=None, heads=None, **kwargs):
1597 b2caps=None, heads=None, **kwargs):
1598 """add an obsolescence markers part to the requested bundle"""
1598 """add an obsolescence markers part to the requested bundle"""
1599 if kwargs.get('obsmarkers', False):
1599 if kwargs.get('obsmarkers', False):
1600 if heads is None:
1600 if heads is None:
1601 heads = repo.heads()
1601 heads = repo.heads()
1602 subset = [c.node() for c in repo.set('::%ln', heads)]
1602 subset = [c.node() for c in repo.set('::%ln', heads)]
1603 markers = repo.obsstore.relevantmarkers(subset)
1603 markers = repo.obsstore.relevantmarkers(subset)
1604 markers = sorted(markers)
1604 markers = sorted(markers)
1605 buildobsmarkerspart(bundler, markers)
1605 buildobsmarkerspart(bundler, markers)
1606
1606
1607 @getbundle2partsgenerator('hgtagsfnodes')
1607 @getbundle2partsgenerator('hgtagsfnodes')
1608 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1608 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1609 b2caps=None, heads=None, common=None,
1609 b2caps=None, heads=None, common=None,
1610 **kwargs):
1610 **kwargs):
1611 """Transfer the .hgtags filenodes mapping.
1611 """Transfer the .hgtags filenodes mapping.
1612
1612
1613 Only values for heads in this bundle will be transferred.
1613 Only values for heads in this bundle will be transferred.
1614
1614
1615 The part data consists of pairs of 20 byte changeset node and .hgtags
1615 The part data consists of pairs of 20 byte changeset node and .hgtags
1616 filenodes raw values.
1616 filenodes raw values.
1617 """
1617 """
1618 # Don't send unless:
1618 # Don't send unless:
1619 # - changeset are being exchanged,
1619 # - changeset are being exchanged,
1620 # - the client supports it.
1620 # - the client supports it.
1621 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1621 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1622 return
1622 return
1623
1623
1624 outgoing = changegroup.computeoutgoing(repo, heads, common)
1624 outgoing = changegroup.computeoutgoing(repo, heads, common)
1625
1625
1626 if not outgoing.missingheads:
1626 if not outgoing.missingheads:
1627 return
1627 return
1628
1628
1629 cache = tags.hgtagsfnodescache(repo.unfiltered())
1629 cache = tags.hgtagsfnodescache(repo.unfiltered())
1630 chunks = []
1630 chunks = []
1631
1631
1632 # .hgtags fnodes are only relevant for head changesets. While we could
1632 # .hgtags fnodes are only relevant for head changesets. While we could
1633 # transfer values for all known nodes, there will likely be little to
1633 # transfer values for all known nodes, there will likely be little to
1634 # no benefit.
1634 # no benefit.
1635 #
1635 #
1636 # We don't bother using a generator to produce output data because
1636 # We don't bother using a generator to produce output data because
1637 # a) we only have 40 bytes per head and even esoteric numbers of heads
1637 # a) we only have 40 bytes per head and even esoteric numbers of heads
1638 # consume little memory (1M heads is 40MB) b) we don't want to send the
1638 # consume little memory (1M heads is 40MB) b) we don't want to send the
1639 # part if we don't have entries and knowing if we have entries requires
1639 # part if we don't have entries and knowing if we have entries requires
1640 # cache lookups.
1640 # cache lookups.
1641 for node in outgoing.missingheads:
1641 for node in outgoing.missingheads:
1642 # Don't compute missing, as this may slow down serving.
1642 # Don't compute missing, as this may slow down serving.
1643 fnode = cache.getfnode(node, computemissing=False)
1643 fnode = cache.getfnode(node, computemissing=False)
1644 if fnode is not None:
1644 if fnode is not None:
1645 chunks.extend([node, fnode])
1645 chunks.extend([node, fnode])
1646
1646
1647 if chunks:
1647 if chunks:
1648 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1648 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1649
1649
1650 def check_heads(repo, their_heads, context):
1650 def check_heads(repo, their_heads, context):
1651 """check if the heads of a repo have been modified
1651 """check if the heads of a repo have been modified
1652
1652
1653 Used by peer for unbundling.
1653 Used by peer for unbundling.
1654 """
1654 """
1655 heads = repo.heads()
1655 heads = repo.heads()
1656 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1656 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1657 if not (their_heads == ['force'] or their_heads == heads or
1657 if not (their_heads == ['force'] or their_heads == heads or
1658 their_heads == ['hashed', heads_hash]):
1658 their_heads == ['hashed', heads_hash]):
1659 # someone else committed/pushed/unbundled while we
1659 # someone else committed/pushed/unbundled while we
1660 # were transferring data
1660 # were transferring data
1661 raise error.PushRaced('repository changed while %s - '
1661 raise error.PushRaced('repository changed while %s - '
1662 'please try again' % context)
1662 'please try again' % context)
1663
1663
1664 def unbundle(repo, cg, heads, source, url):
1664 def unbundle(repo, cg, heads, source, url):
1665 """Apply a bundle to a repo.
1665 """Apply a bundle to a repo.
1666
1666
1667 this function makes sure the repo is locked during the application and have
1667 this function makes sure the repo is locked during the application and have
1668 mechanism to check that no push race occurred between the creation of the
1668 mechanism to check that no push race occurred between the creation of the
1669 bundle and its application.
1669 bundle and its application.
1670
1670
1671 If the push was raced as PushRaced exception is raised."""
1671 If the push was raced as PushRaced exception is raised."""
1672 r = 0
1672 r = 0
1673 # need a transaction when processing a bundle2 stream
1673 # need a transaction when processing a bundle2 stream
1674 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1674 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1675 lockandtr = [None, None, None]
1675 lockandtr = [None, None, None]
1676 recordout = None
1676 recordout = None
1677 # quick fix for output mismatch with bundle2 in 3.4
1677 # quick fix for output mismatch with bundle2 in 3.4
1678 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1678 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1679 False)
1679 False)
1680 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1680 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1681 captureoutput = True
1681 captureoutput = True
1682 try:
1682 try:
1683 check_heads(repo, heads, 'uploading changes')
1683 check_heads(repo, heads, 'uploading changes')
1684 # push can proceed
1684 # push can proceed
1685 if util.safehasattr(cg, 'params'):
1685 if util.safehasattr(cg, 'params'):
1686 r = None
1686 r = None
1687 try:
1687 try:
1688 def gettransaction():
1688 def gettransaction():
1689 if not lockandtr[2]:
1689 if not lockandtr[2]:
1690 lockandtr[0] = repo.wlock()
1690 lockandtr[0] = repo.wlock()
1691 lockandtr[1] = repo.lock()
1691 lockandtr[1] = repo.lock()
1692 lockandtr[2] = repo.transaction(source)
1692 lockandtr[2] = repo.transaction(source)
1693 lockandtr[2].hookargs['source'] = source
1693 lockandtr[2].hookargs['source'] = source
1694 lockandtr[2].hookargs['url'] = url
1694 lockandtr[2].hookargs['url'] = url
1695 lockandtr[2].hookargs['bundle2'] = '1'
1695 lockandtr[2].hookargs['bundle2'] = '1'
1696 return lockandtr[2]
1696 return lockandtr[2]
1697
1697
1698 # Do greedy locking by default until we're satisfied with lazy
1698 # Do greedy locking by default until we're satisfied with lazy
1699 # locking.
1699 # locking.
1700 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1700 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1701 gettransaction()
1701 gettransaction()
1702
1702
1703 op = bundle2.bundleoperation(repo, gettransaction,
1703 op = bundle2.bundleoperation(repo, gettransaction,
1704 captureoutput=captureoutput)
1704 captureoutput=captureoutput)
1705 try:
1705 try:
1706 op = bundle2.processbundle(repo, cg, op=op)
1706 op = bundle2.processbundle(repo, cg, op=op)
1707 finally:
1707 finally:
1708 r = op.reply
1708 r = op.reply
1709 if captureoutput and r is not None:
1709 if captureoutput and r is not None:
1710 repo.ui.pushbuffer(error=True, subproc=True)
1710 repo.ui.pushbuffer(error=True, subproc=True)
1711 def recordout(output):
1711 def recordout(output):
1712 r.newpart('output', data=output, mandatory=False)
1712 r.newpart('output', data=output, mandatory=False)
1713 if lockandtr[2] is not None:
1713 if lockandtr[2] is not None:
1714 lockandtr[2].close()
1714 lockandtr[2].close()
1715 except BaseException as exc:
1715 except BaseException as exc:
1716 exc.duringunbundle2 = True
1716 exc.duringunbundle2 = True
1717 if captureoutput and r is not None:
1717 if captureoutput and r is not None:
1718 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1718 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1719 def recordout(output):
1719 def recordout(output):
1720 part = bundle2.bundlepart('output', data=output,
1720 part = bundle2.bundlepart('output', data=output,
1721 mandatory=False)
1721 mandatory=False)
1722 parts.append(part)
1722 parts.append(part)
1723 raise
1723 raise
1724 else:
1724 else:
1725 lockandtr[1] = repo.lock()
1725 lockandtr[1] = repo.lock()
1726 r = cg.apply(repo, source, url)
1726 r = cg.apply(repo, source, url)
1727 finally:
1727 finally:
1728 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1728 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1729 if recordout is not None:
1729 if recordout is not None:
1730 recordout(repo.ui.popbuffer())
1730 recordout(repo.ui.popbuffer())
1731 return r
1731 return r
1732
1732
1733 def _maybeapplyclonebundle(pullop):
1733 def _maybeapplyclonebundle(pullop):
1734 """Apply a clone bundle from a remote, if possible."""
1734 """Apply a clone bundle from a remote, if possible."""
1735
1735
1736 repo = pullop.repo
1736 repo = pullop.repo
1737 remote = pullop.remote
1737 remote = pullop.remote
1738
1738
1739 if not repo.ui.configbool('ui', 'clonebundles', True):
1739 if not repo.ui.configbool('ui', 'clonebundles', True):
1740 return
1740 return
1741
1741
1742 # Only run if local repo is empty.
1742 # Only run if local repo is empty.
1743 if len(repo):
1743 if len(repo):
1744 return
1744 return
1745
1745
1746 if pullop.heads:
1746 if pullop.heads:
1747 return
1747 return
1748
1748
1749 if not remote.capable('clonebundles'):
1749 if not remote.capable('clonebundles'):
1750 return
1750 return
1751
1751
1752 res = remote._call('clonebundles')
1752 res = remote._call('clonebundles')
1753
1753
1754 # If we call the wire protocol command, that's good enough to record the
1754 # If we call the wire protocol command, that's good enough to record the
1755 # attempt.
1755 # attempt.
1756 pullop.clonebundleattempted = True
1756 pullop.clonebundleattempted = True
1757
1757
1758 entries = parseclonebundlesmanifest(repo, res)
1758 entries = parseclonebundlesmanifest(repo, res)
1759 if not entries:
1759 if not entries:
1760 repo.ui.note(_('no clone bundles available on remote; '
1760 repo.ui.note(_('no clone bundles available on remote; '
1761 'falling back to regular clone\n'))
1761 'falling back to regular clone\n'))
1762 return
1762 return
1763
1763
1764 entries = filterclonebundleentries(repo, entries)
1764 entries = filterclonebundleentries(repo, entries)
1765 if not entries:
1765 if not entries:
1766 # There is a thundering herd concern here. However, if a server
1766 # There is a thundering herd concern here. However, if a server
1767 # operator doesn't advertise bundles appropriate for its clients,
1767 # operator doesn't advertise bundles appropriate for its clients,
1768 # they deserve what's coming. Furthermore, from a client's
1768 # they deserve what's coming. Furthermore, from a client's
1769 # perspective, no automatic fallback would mean not being able to
1769 # perspective, no automatic fallback would mean not being able to
1770 # clone!
1770 # clone!
1771 repo.ui.warn(_('no compatible clone bundles available on server; '
1771 repo.ui.warn(_('no compatible clone bundles available on server; '
1772 'falling back to regular clone\n'))
1772 'falling back to regular clone\n'))
1773 repo.ui.warn(_('(you may want to report this to the server '
1773 repo.ui.warn(_('(you may want to report this to the server '
1774 'operator)\n'))
1774 'operator)\n'))
1775 return
1775 return
1776
1776
1777 entries = sortclonebundleentries(repo.ui, entries)
1777 entries = sortclonebundleentries(repo.ui, entries)
1778
1778
1779 url = entries[0]['URL']
1779 url = entries[0]['URL']
1780 repo.ui.status(_('applying clone bundle from %s\n') % url)
1780 repo.ui.status(_('applying clone bundle from %s\n') % url)
1781 if trypullbundlefromurl(repo.ui, repo, url):
1781 if trypullbundlefromurl(repo.ui, repo, url):
1782 repo.ui.status(_('finished applying clone bundle\n'))
1782 repo.ui.status(_('finished applying clone bundle\n'))
1783 # Bundle failed.
1783 # Bundle failed.
1784 #
1784 #
1785 # We abort by default to avoid the thundering herd of
1785 # We abort by default to avoid the thundering herd of
1786 # clients flooding a server that was expecting expensive
1786 # clients flooding a server that was expecting expensive
1787 # clone load to be offloaded.
1787 # clone load to be offloaded.
1788 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1788 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1789 repo.ui.warn(_('falling back to normal clone\n'))
1789 repo.ui.warn(_('falling back to normal clone\n'))
1790 else:
1790 else:
1791 raise error.Abort(_('error applying bundle'),
1791 raise error.Abort(_('error applying bundle'),
1792 hint=_('if this error persists, consider contacting '
1792 hint=_('if this error persists, consider contacting '
1793 'the server operator or disable clone '
1793 'the server operator or disable clone '
1794 'bundles via '
1794 'bundles via '
1795 '"--config ui.clonebundles=false"'))
1795 '"--config ui.clonebundles=false"'))
1796
1796
1797 def parseclonebundlesmanifest(repo, s):
1797 def parseclonebundlesmanifest(repo, s):
1798 """Parses the raw text of a clone bundles manifest.
1798 """Parses the raw text of a clone bundles manifest.
1799
1799
1800 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1800 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1801 to the URL and other keys are the attributes for the entry.
1801 to the URL and other keys are the attributes for the entry.
1802 """
1802 """
1803 m = []
1803 m = []
1804 for line in s.splitlines():
1804 for line in s.splitlines():
1805 fields = line.split()
1805 fields = line.split()
1806 if not fields:
1806 if not fields:
1807 continue
1807 continue
1808 attrs = {'URL': fields[0]}
1808 attrs = {'URL': fields[0]}
1809 for rawattr in fields[1:]:
1809 for rawattr in fields[1:]:
1810 key, value = rawattr.split('=', 1)
1810 key, value = rawattr.split('=', 1)
1811 key = urllib.unquote(key)
1811 key = urllib.unquote(key)
1812 value = urllib.unquote(value)
1812 value = urllib.unquote(value)
1813 attrs[key] = value
1813 attrs[key] = value
1814
1814
1815 # Parse BUNDLESPEC into components. This makes client-side
1815 # Parse BUNDLESPEC into components. This makes client-side
1816 # preferences easier to specify since you can prefer a single
1816 # preferences easier to specify since you can prefer a single
1817 # component of the BUNDLESPEC.
1817 # component of the BUNDLESPEC.
1818 if key == 'BUNDLESPEC':
1818 if key == 'BUNDLESPEC':
1819 try:
1819 try:
1820 comp, version, params = parsebundlespec(repo, value,
1820 comp, version, params = parsebundlespec(repo, value,
1821 externalnames=True)
1821 externalnames=True)
1822 attrs['COMPRESSION'] = comp
1822 attrs['COMPRESSION'] = comp
1823 attrs['VERSION'] = version
1823 attrs['VERSION'] = version
1824 except error.InvalidBundleSpecification:
1824 except error.InvalidBundleSpecification:
1825 pass
1825 pass
1826 except error.UnsupportedBundleSpecification:
1826 except error.UnsupportedBundleSpecification:
1827 pass
1827 pass
1828
1828
1829 m.append(attrs)
1829 m.append(attrs)
1830
1830
1831 return m
1831 return m
1832
1832
1833 def filterclonebundleentries(repo, entries):
1833 def filterclonebundleentries(repo, entries):
1834 """Remove incompatible clone bundle manifest entries.
1834 """Remove incompatible clone bundle manifest entries.
1835
1835
1836 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1836 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1837 and returns a new list consisting of only the entries that this client
1837 and returns a new list consisting of only the entries that this client
1838 should be able to apply.
1838 should be able to apply.
1839
1839
1840 There is no guarantee we'll be able to apply all returned entries because
1840 There is no guarantee we'll be able to apply all returned entries because
1841 the metadata we use to filter on may be missing or wrong.
1841 the metadata we use to filter on may be missing or wrong.
1842 """
1842 """
1843 newentries = []
1843 newentries = []
1844 for entry in entries:
1844 for entry in entries:
1845 spec = entry.get('BUNDLESPEC')
1845 spec = entry.get('BUNDLESPEC')
1846 if spec:
1846 if spec:
1847 try:
1847 try:
1848 parsebundlespec(repo, spec, strict=True)
1848 parsebundlespec(repo, spec, strict=True)
1849 except error.InvalidBundleSpecification as e:
1849 except error.InvalidBundleSpecification as e:
1850 repo.ui.debug(str(e) + '\n')
1850 repo.ui.debug(str(e) + '\n')
1851 continue
1851 continue
1852 except error.UnsupportedBundleSpecification as e:
1852 except error.UnsupportedBundleSpecification as e:
1853 repo.ui.debug('filtering %s because unsupported bundle '
1853 repo.ui.debug('filtering %s because unsupported bundle '
1854 'spec: %s\n' % (entry['URL'], str(e)))
1854 'spec: %s\n' % (entry['URL'], str(e)))
1855 continue
1855 continue
1856
1856
1857 if 'REQUIRESNI' in entry and not sslutil.hassni:
1857 if 'REQUIRESNI' in entry and not sslutil.hassni:
1858 repo.ui.debug('filtering %s because SNI not supported\n' %
1858 repo.ui.debug('filtering %s because SNI not supported\n' %
1859 entry['URL'])
1859 entry['URL'])
1860 continue
1860 continue
1861
1861
1862 newentries.append(entry)
1862 newentries.append(entry)
1863
1863
1864 return newentries
1864 return newentries
1865
1865
1866 def sortclonebundleentries(ui, entries):
1866 def sortclonebundleentries(ui, entries):
1867 prefers = ui.configlist('ui', 'clonebundleprefers', default=[])
1867 prefers = ui.configlist('ui', 'clonebundleprefers', default=[])
1868 if not prefers:
1868 if not prefers:
1869 return list(entries)
1869 return list(entries)
1870
1870
1871 prefers = [p.split('=', 1) for p in prefers]
1871 prefers = [p.split('=', 1) for p in prefers]
1872
1872
1873 # Our sort function.
1873 # Our sort function.
1874 def compareentry(a, b):
1874 def compareentry(a, b):
1875 for prefkey, prefvalue in prefers:
1875 for prefkey, prefvalue in prefers:
1876 avalue = a.get(prefkey)
1876 avalue = a.get(prefkey)
1877 bvalue = b.get(prefkey)
1877 bvalue = b.get(prefkey)
1878
1878
1879 # Special case for b missing attribute and a matches exactly.
1879 # Special case for b missing attribute and a matches exactly.
1880 if avalue is not None and bvalue is None and avalue == prefvalue:
1880 if avalue is not None and bvalue is None and avalue == prefvalue:
1881 return -1
1881 return -1
1882
1882
1883 # Special case for a missing attribute and b matches exactly.
1883 # Special case for a missing attribute and b matches exactly.
1884 if bvalue is not None and avalue is None and bvalue == prefvalue:
1884 if bvalue is not None and avalue is None and bvalue == prefvalue:
1885 return 1
1885 return 1
1886
1886
1887 # We can't compare unless attribute present on both.
1887 # We can't compare unless attribute present on both.
1888 if avalue is None or bvalue is None:
1888 if avalue is None or bvalue is None:
1889 continue
1889 continue
1890
1890
1891 # Same values should fall back to next attribute.
1891 # Same values should fall back to next attribute.
1892 if avalue == bvalue:
1892 if avalue == bvalue:
1893 continue
1893 continue
1894
1894
1895 # Exact matches come first.
1895 # Exact matches come first.
1896 if avalue == prefvalue:
1896 if avalue == prefvalue:
1897 return -1
1897 return -1
1898 if bvalue == prefvalue:
1898 if bvalue == prefvalue:
1899 return 1
1899 return 1
1900
1900
1901 # Fall back to next attribute.
1901 # Fall back to next attribute.
1902 continue
1902 continue
1903
1903
1904 # If we got here we couldn't sort by attributes and prefers. Fall
1904 # If we got here we couldn't sort by attributes and prefers. Fall
1905 # back to index order.
1905 # back to index order.
1906 return 0
1906 return 0
1907
1907
1908 return sorted(entries, cmp=compareentry)
1908 return sorted(entries, cmp=compareentry)
1909
1909
1910 def trypullbundlefromurl(ui, repo, url):
1910 def trypullbundlefromurl(ui, repo, url):
1911 """Attempt to apply a bundle from a URL."""
1911 """Attempt to apply a bundle from a URL."""
1912 lock = repo.lock()
1912 lock = repo.lock()
1913 try:
1913 try:
1914 tr = repo.transaction('bundleurl')
1914 tr = repo.transaction('bundleurl')
1915 try:
1915 try:
1916 try:
1916 try:
1917 fh = urlmod.open(ui, url)
1917 fh = urlmod.open(ui, url)
1918 cg = readbundle(ui, fh, 'stream')
1918 cg = readbundle(ui, fh, 'stream')
1919
1919
1920 if isinstance(cg, bundle2.unbundle20):
1920 if isinstance(cg, bundle2.unbundle20):
1921 bundle2.processbundle(repo, cg, lambda: tr)
1921 bundle2.processbundle(repo, cg, lambda: tr)
1922 elif isinstance(cg, streamclone.streamcloneapplier):
1922 elif isinstance(cg, streamclone.streamcloneapplier):
1923 cg.apply(repo)
1923 cg.apply(repo)
1924 else:
1924 else:
1925 cg.apply(repo, 'clonebundles', url)
1925 cg.apply(repo, 'clonebundles', url)
1926 tr.close()
1926 tr.close()
1927 return True
1927 return True
1928 except urllib2.HTTPError as e:
1928 except urllib2.HTTPError as e:
1929 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1929 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1930 except urllib2.URLError as e:
1930 except urllib2.URLError as e:
1931 ui.warn(_('error fetching bundle: %s\n') % e.reason[1])
1931 ui.warn(_('error fetching bundle: %s\n') % e.reason[1])
1932
1932
1933 return False
1933 return False
1934 finally:
1934 finally:
1935 tr.release()
1935 tr.release()
1936 finally:
1936 finally:
1937 lock.release()
1937 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now