##// END OF EJS Templates
bundle2: rename the _canusebundle2 method to _forcebundle1...
Pierre-Yves David -
r29682:2db085d5 default
parent child Browse files
Show More
@@ -1,1932 +1,1932 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 hex,
15 hex,
16 nullid,
16 nullid,
17 )
17 )
18 from . import (
18 from . import (
19 base85,
19 base85,
20 bookmarks as bookmod,
20 bookmarks as bookmod,
21 bundle2,
21 bundle2,
22 changegroup,
22 changegroup,
23 discovery,
23 discovery,
24 error,
24 error,
25 lock as lockmod,
25 lock as lockmod,
26 obsolete,
26 obsolete,
27 phases,
27 phases,
28 pushkey,
28 pushkey,
29 scmutil,
29 scmutil,
30 sslutil,
30 sslutil,
31 streamclone,
31 streamclone,
32 tags,
32 tags,
33 url as urlmod,
33 url as urlmod,
34 util,
34 util,
35 )
35 )
36
36
37 urlerr = util.urlerr
37 urlerr = util.urlerr
38 urlreq = util.urlreq
38 urlreq = util.urlreq
39
39
40 # Maps bundle compression human names to internal representation.
40 # Maps bundle compression human names to internal representation.
41 _bundlespeccompressions = {'none': None,
41 _bundlespeccompressions = {'none': None,
42 'bzip2': 'BZ',
42 'bzip2': 'BZ',
43 'gzip': 'GZ',
43 'gzip': 'GZ',
44 }
44 }
45
45
46 # Maps bundle version human names to changegroup versions.
46 # Maps bundle version human names to changegroup versions.
47 _bundlespeccgversions = {'v1': '01',
47 _bundlespeccgversions = {'v1': '01',
48 'v2': '02',
48 'v2': '02',
49 'packed1': 's1',
49 'packed1': 's1',
50 'bundle2': '02', #legacy
50 'bundle2': '02', #legacy
51 }
51 }
52
52
53 def parsebundlespec(repo, spec, strict=True, externalnames=False):
53 def parsebundlespec(repo, spec, strict=True, externalnames=False):
54 """Parse a bundle string specification into parts.
54 """Parse a bundle string specification into parts.
55
55
56 Bundle specifications denote a well-defined bundle/exchange format.
56 Bundle specifications denote a well-defined bundle/exchange format.
57 The content of a given specification should not change over time in
57 The content of a given specification should not change over time in
58 order to ensure that bundles produced by a newer version of Mercurial are
58 order to ensure that bundles produced by a newer version of Mercurial are
59 readable from an older version.
59 readable from an older version.
60
60
61 The string currently has the form:
61 The string currently has the form:
62
62
63 <compression>-<type>[;<parameter0>[;<parameter1>]]
63 <compression>-<type>[;<parameter0>[;<parameter1>]]
64
64
65 Where <compression> is one of the supported compression formats
65 Where <compression> is one of the supported compression formats
66 and <type> is (currently) a version string. A ";" can follow the type and
66 and <type> is (currently) a version string. A ";" can follow the type and
67 all text afterwards is interpretted as URI encoded, ";" delimited key=value
67 all text afterwards is interpretted as URI encoded, ";" delimited key=value
68 pairs.
68 pairs.
69
69
70 If ``strict`` is True (the default) <compression> is required. Otherwise,
70 If ``strict`` is True (the default) <compression> is required. Otherwise,
71 it is optional.
71 it is optional.
72
72
73 If ``externalnames`` is False (the default), the human-centric names will
73 If ``externalnames`` is False (the default), the human-centric names will
74 be converted to their internal representation.
74 be converted to their internal representation.
75
75
76 Returns a 3-tuple of (compression, version, parameters). Compression will
76 Returns a 3-tuple of (compression, version, parameters). Compression will
77 be ``None`` if not in strict mode and a compression isn't defined.
77 be ``None`` if not in strict mode and a compression isn't defined.
78
78
79 An ``InvalidBundleSpecification`` is raised when the specification is
79 An ``InvalidBundleSpecification`` is raised when the specification is
80 not syntactically well formed.
80 not syntactically well formed.
81
81
82 An ``UnsupportedBundleSpecification`` is raised when the compression or
82 An ``UnsupportedBundleSpecification`` is raised when the compression or
83 bundle type/version is not recognized.
83 bundle type/version is not recognized.
84
84
85 Note: this function will likely eventually return a more complex data
85 Note: this function will likely eventually return a more complex data
86 structure, including bundle2 part information.
86 structure, including bundle2 part information.
87 """
87 """
88 def parseparams(s):
88 def parseparams(s):
89 if ';' not in s:
89 if ';' not in s:
90 return s, {}
90 return s, {}
91
91
92 params = {}
92 params = {}
93 version, paramstr = s.split(';', 1)
93 version, paramstr = s.split(';', 1)
94
94
95 for p in paramstr.split(';'):
95 for p in paramstr.split(';'):
96 if '=' not in p:
96 if '=' not in p:
97 raise error.InvalidBundleSpecification(
97 raise error.InvalidBundleSpecification(
98 _('invalid bundle specification: '
98 _('invalid bundle specification: '
99 'missing "=" in parameter: %s') % p)
99 'missing "=" in parameter: %s') % p)
100
100
101 key, value = p.split('=', 1)
101 key, value = p.split('=', 1)
102 key = urlreq.unquote(key)
102 key = urlreq.unquote(key)
103 value = urlreq.unquote(value)
103 value = urlreq.unquote(value)
104 params[key] = value
104 params[key] = value
105
105
106 return version, params
106 return version, params
107
107
108
108
109 if strict and '-' not in spec:
109 if strict and '-' not in spec:
110 raise error.InvalidBundleSpecification(
110 raise error.InvalidBundleSpecification(
111 _('invalid bundle specification; '
111 _('invalid bundle specification; '
112 'must be prefixed with compression: %s') % spec)
112 'must be prefixed with compression: %s') % spec)
113
113
114 if '-' in spec:
114 if '-' in spec:
115 compression, version = spec.split('-', 1)
115 compression, version = spec.split('-', 1)
116
116
117 if compression not in _bundlespeccompressions:
117 if compression not in _bundlespeccompressions:
118 raise error.UnsupportedBundleSpecification(
118 raise error.UnsupportedBundleSpecification(
119 _('%s compression is not supported') % compression)
119 _('%s compression is not supported') % compression)
120
120
121 version, params = parseparams(version)
121 version, params = parseparams(version)
122
122
123 if version not in _bundlespeccgversions:
123 if version not in _bundlespeccgversions:
124 raise error.UnsupportedBundleSpecification(
124 raise error.UnsupportedBundleSpecification(
125 _('%s is not a recognized bundle version') % version)
125 _('%s is not a recognized bundle version') % version)
126 else:
126 else:
127 # Value could be just the compression or just the version, in which
127 # Value could be just the compression or just the version, in which
128 # case some defaults are assumed (but only when not in strict mode).
128 # case some defaults are assumed (but only when not in strict mode).
129 assert not strict
129 assert not strict
130
130
131 spec, params = parseparams(spec)
131 spec, params = parseparams(spec)
132
132
133 if spec in _bundlespeccompressions:
133 if spec in _bundlespeccompressions:
134 compression = spec
134 compression = spec
135 version = 'v1'
135 version = 'v1'
136 if 'generaldelta' in repo.requirements:
136 if 'generaldelta' in repo.requirements:
137 version = 'v2'
137 version = 'v2'
138 elif spec in _bundlespeccgversions:
138 elif spec in _bundlespeccgversions:
139 if spec == 'packed1':
139 if spec == 'packed1':
140 compression = 'none'
140 compression = 'none'
141 else:
141 else:
142 compression = 'bzip2'
142 compression = 'bzip2'
143 version = spec
143 version = spec
144 else:
144 else:
145 raise error.UnsupportedBundleSpecification(
145 raise error.UnsupportedBundleSpecification(
146 _('%s is not a recognized bundle specification') % spec)
146 _('%s is not a recognized bundle specification') % spec)
147
147
148 # The specification for packed1 can optionally declare the data formats
148 # The specification for packed1 can optionally declare the data formats
149 # required to apply it. If we see this metadata, compare against what the
149 # required to apply it. If we see this metadata, compare against what the
150 # repo supports and error if the bundle isn't compatible.
150 # repo supports and error if the bundle isn't compatible.
151 if version == 'packed1' and 'requirements' in params:
151 if version == 'packed1' and 'requirements' in params:
152 requirements = set(params['requirements'].split(','))
152 requirements = set(params['requirements'].split(','))
153 missingreqs = requirements - repo.supportedformats
153 missingreqs = requirements - repo.supportedformats
154 if missingreqs:
154 if missingreqs:
155 raise error.UnsupportedBundleSpecification(
155 raise error.UnsupportedBundleSpecification(
156 _('missing support for repository features: %s') %
156 _('missing support for repository features: %s') %
157 ', '.join(sorted(missingreqs)))
157 ', '.join(sorted(missingreqs)))
158
158
159 if not externalnames:
159 if not externalnames:
160 compression = _bundlespeccompressions[compression]
160 compression = _bundlespeccompressions[compression]
161 version = _bundlespeccgversions[version]
161 version = _bundlespeccgversions[version]
162 return compression, version, params
162 return compression, version, params
163
163
164 def readbundle(ui, fh, fname, vfs=None):
164 def readbundle(ui, fh, fname, vfs=None):
165 header = changegroup.readexactly(fh, 4)
165 header = changegroup.readexactly(fh, 4)
166
166
167 alg = None
167 alg = None
168 if not fname:
168 if not fname:
169 fname = "stream"
169 fname = "stream"
170 if not header.startswith('HG') and header.startswith('\0'):
170 if not header.startswith('HG') and header.startswith('\0'):
171 fh = changegroup.headerlessfixup(fh, header)
171 fh = changegroup.headerlessfixup(fh, header)
172 header = "HG10"
172 header = "HG10"
173 alg = 'UN'
173 alg = 'UN'
174 elif vfs:
174 elif vfs:
175 fname = vfs.join(fname)
175 fname = vfs.join(fname)
176
176
177 magic, version = header[0:2], header[2:4]
177 magic, version = header[0:2], header[2:4]
178
178
179 if magic != 'HG':
179 if magic != 'HG':
180 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
180 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
181 if version == '10':
181 if version == '10':
182 if alg is None:
182 if alg is None:
183 alg = changegroup.readexactly(fh, 2)
183 alg = changegroup.readexactly(fh, 2)
184 return changegroup.cg1unpacker(fh, alg)
184 return changegroup.cg1unpacker(fh, alg)
185 elif version.startswith('2'):
185 elif version.startswith('2'):
186 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
186 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
187 elif version == 'S1':
187 elif version == 'S1':
188 return streamclone.streamcloneapplier(fh)
188 return streamclone.streamcloneapplier(fh)
189 else:
189 else:
190 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
190 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
191
191
192 def getbundlespec(ui, fh):
192 def getbundlespec(ui, fh):
193 """Infer the bundlespec from a bundle file handle.
193 """Infer the bundlespec from a bundle file handle.
194
194
195 The input file handle is seeked and the original seek position is not
195 The input file handle is seeked and the original seek position is not
196 restored.
196 restored.
197 """
197 """
198 def speccompression(alg):
198 def speccompression(alg):
199 for k, v in _bundlespeccompressions.items():
199 for k, v in _bundlespeccompressions.items():
200 if v == alg:
200 if v == alg:
201 return k
201 return k
202 return None
202 return None
203
203
204 b = readbundle(ui, fh, None)
204 b = readbundle(ui, fh, None)
205 if isinstance(b, changegroup.cg1unpacker):
205 if isinstance(b, changegroup.cg1unpacker):
206 alg = b._type
206 alg = b._type
207 if alg == '_truncatedBZ':
207 if alg == '_truncatedBZ':
208 alg = 'BZ'
208 alg = 'BZ'
209 comp = speccompression(alg)
209 comp = speccompression(alg)
210 if not comp:
210 if not comp:
211 raise error.Abort(_('unknown compression algorithm: %s') % alg)
211 raise error.Abort(_('unknown compression algorithm: %s') % alg)
212 return '%s-v1' % comp
212 return '%s-v1' % comp
213 elif isinstance(b, bundle2.unbundle20):
213 elif isinstance(b, bundle2.unbundle20):
214 if 'Compression' in b.params:
214 if 'Compression' in b.params:
215 comp = speccompression(b.params['Compression'])
215 comp = speccompression(b.params['Compression'])
216 if not comp:
216 if not comp:
217 raise error.Abort(_('unknown compression algorithm: %s') % comp)
217 raise error.Abort(_('unknown compression algorithm: %s') % comp)
218 else:
218 else:
219 comp = 'none'
219 comp = 'none'
220
220
221 version = None
221 version = None
222 for part in b.iterparts():
222 for part in b.iterparts():
223 if part.type == 'changegroup':
223 if part.type == 'changegroup':
224 version = part.params['version']
224 version = part.params['version']
225 if version in ('01', '02'):
225 if version in ('01', '02'):
226 version = 'v2'
226 version = 'v2'
227 else:
227 else:
228 raise error.Abort(_('changegroup version %s does not have '
228 raise error.Abort(_('changegroup version %s does not have '
229 'a known bundlespec') % version,
229 'a known bundlespec') % version,
230 hint=_('try upgrading your Mercurial '
230 hint=_('try upgrading your Mercurial '
231 'client'))
231 'client'))
232
232
233 if not version:
233 if not version:
234 raise error.Abort(_('could not identify changegroup version in '
234 raise error.Abort(_('could not identify changegroup version in '
235 'bundle'))
235 'bundle'))
236
236
237 return '%s-%s' % (comp, version)
237 return '%s-%s' % (comp, version)
238 elif isinstance(b, streamclone.streamcloneapplier):
238 elif isinstance(b, streamclone.streamcloneapplier):
239 requirements = streamclone.readbundle1header(fh)[2]
239 requirements = streamclone.readbundle1header(fh)[2]
240 params = 'requirements=%s' % ','.join(sorted(requirements))
240 params = 'requirements=%s' % ','.join(sorted(requirements))
241 return 'none-packed1;%s' % urlreq.quote(params)
241 return 'none-packed1;%s' % urlreq.quote(params)
242 else:
242 else:
243 raise error.Abort(_('unknown bundle type: %s') % b)
243 raise error.Abort(_('unknown bundle type: %s') % b)
244
244
245 def buildobsmarkerspart(bundler, markers):
245 def buildobsmarkerspart(bundler, markers):
246 """add an obsmarker part to the bundler with <markers>
246 """add an obsmarker part to the bundler with <markers>
247
247
248 No part is created if markers is empty.
248 No part is created if markers is empty.
249 Raises ValueError if the bundler doesn't support any known obsmarker format.
249 Raises ValueError if the bundler doesn't support any known obsmarker format.
250 """
250 """
251 if markers:
251 if markers:
252 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
252 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
253 version = obsolete.commonversion(remoteversions)
253 version = obsolete.commonversion(remoteversions)
254 if version is None:
254 if version is None:
255 raise ValueError('bundler does not support common obsmarker format')
255 raise ValueError('bundler does not support common obsmarker format')
256 stream = obsolete.encodemarkers(markers, True, version=version)
256 stream = obsolete.encodemarkers(markers, True, version=version)
257 return bundler.newpart('obsmarkers', data=stream)
257 return bundler.newpart('obsmarkers', data=stream)
258 return None
258 return None
259
259
260 def _canusebundle2(op):
260 def _forcebundle1(op):
261 """return true if a pull/push can use bundle2
261 """return true if a pull/push must use bundle1
262
262
263 Feel free to nuke this function when we drop the experimental option"""
263 Feel free to nuke this function when we drop the experimental option"""
264 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
264 return not (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
265 and op.remote.capable('bundle2'))
265 and op.remote.capable('bundle2'))
266
266
267
267
268 class pushoperation(object):
268 class pushoperation(object):
269 """A object that represent a single push operation
269 """A object that represent a single push operation
270
270
271 Its purpose is to carry push related state and very common operations.
271 Its purpose is to carry push related state and very common operations.
272
272
273 A new pushoperation should be created at the beginning of each push and
273 A new pushoperation should be created at the beginning of each push and
274 discarded afterward.
274 discarded afterward.
275 """
275 """
276
276
277 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
277 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
278 bookmarks=()):
278 bookmarks=()):
279 # repo we push from
279 # repo we push from
280 self.repo = repo
280 self.repo = repo
281 self.ui = repo.ui
281 self.ui = repo.ui
282 # repo we push to
282 # repo we push to
283 self.remote = remote
283 self.remote = remote
284 # force option provided
284 # force option provided
285 self.force = force
285 self.force = force
286 # revs to be pushed (None is "all")
286 # revs to be pushed (None is "all")
287 self.revs = revs
287 self.revs = revs
288 # bookmark explicitly pushed
288 # bookmark explicitly pushed
289 self.bookmarks = bookmarks
289 self.bookmarks = bookmarks
290 # allow push of new branch
290 # allow push of new branch
291 self.newbranch = newbranch
291 self.newbranch = newbranch
292 # did a local lock get acquired?
292 # did a local lock get acquired?
293 self.locallocked = None
293 self.locallocked = None
294 # step already performed
294 # step already performed
295 # (used to check what steps have been already performed through bundle2)
295 # (used to check what steps have been already performed through bundle2)
296 self.stepsdone = set()
296 self.stepsdone = set()
297 # Integer version of the changegroup push result
297 # Integer version of the changegroup push result
298 # - None means nothing to push
298 # - None means nothing to push
299 # - 0 means HTTP error
299 # - 0 means HTTP error
300 # - 1 means we pushed and remote head count is unchanged *or*
300 # - 1 means we pushed and remote head count is unchanged *or*
301 # we have outgoing changesets but refused to push
301 # we have outgoing changesets but refused to push
302 # - other values as described by addchangegroup()
302 # - other values as described by addchangegroup()
303 self.cgresult = None
303 self.cgresult = None
304 # Boolean value for the bookmark push
304 # Boolean value for the bookmark push
305 self.bkresult = None
305 self.bkresult = None
306 # discover.outgoing object (contains common and outgoing data)
306 # discover.outgoing object (contains common and outgoing data)
307 self.outgoing = None
307 self.outgoing = None
308 # all remote heads before the push
308 # all remote heads before the push
309 self.remoteheads = None
309 self.remoteheads = None
310 # testable as a boolean indicating if any nodes are missing locally.
310 # testable as a boolean indicating if any nodes are missing locally.
311 self.incoming = None
311 self.incoming = None
312 # phases changes that must be pushed along side the changesets
312 # phases changes that must be pushed along side the changesets
313 self.outdatedphases = None
313 self.outdatedphases = None
314 # phases changes that must be pushed if changeset push fails
314 # phases changes that must be pushed if changeset push fails
315 self.fallbackoutdatedphases = None
315 self.fallbackoutdatedphases = None
316 # outgoing obsmarkers
316 # outgoing obsmarkers
317 self.outobsmarkers = set()
317 self.outobsmarkers = set()
318 # outgoing bookmarks
318 # outgoing bookmarks
319 self.outbookmarks = []
319 self.outbookmarks = []
320 # transaction manager
320 # transaction manager
321 self.trmanager = None
321 self.trmanager = None
322 # map { pushkey partid -> callback handling failure}
322 # map { pushkey partid -> callback handling failure}
323 # used to handle exception from mandatory pushkey part failure
323 # used to handle exception from mandatory pushkey part failure
324 self.pkfailcb = {}
324 self.pkfailcb = {}
325
325
326 @util.propertycache
326 @util.propertycache
327 def futureheads(self):
327 def futureheads(self):
328 """future remote heads if the changeset push succeeds"""
328 """future remote heads if the changeset push succeeds"""
329 return self.outgoing.missingheads
329 return self.outgoing.missingheads
330
330
331 @util.propertycache
331 @util.propertycache
332 def fallbackheads(self):
332 def fallbackheads(self):
333 """future remote heads if the changeset push fails"""
333 """future remote heads if the changeset push fails"""
334 if self.revs is None:
334 if self.revs is None:
335 # not target to push, all common are relevant
335 # not target to push, all common are relevant
336 return self.outgoing.commonheads
336 return self.outgoing.commonheads
337 unfi = self.repo.unfiltered()
337 unfi = self.repo.unfiltered()
338 # I want cheads = heads(::missingheads and ::commonheads)
338 # I want cheads = heads(::missingheads and ::commonheads)
339 # (missingheads is revs with secret changeset filtered out)
339 # (missingheads is revs with secret changeset filtered out)
340 #
340 #
341 # This can be expressed as:
341 # This can be expressed as:
342 # cheads = ( (missingheads and ::commonheads)
342 # cheads = ( (missingheads and ::commonheads)
343 # + (commonheads and ::missingheads))"
343 # + (commonheads and ::missingheads))"
344 # )
344 # )
345 #
345 #
346 # while trying to push we already computed the following:
346 # while trying to push we already computed the following:
347 # common = (::commonheads)
347 # common = (::commonheads)
348 # missing = ((commonheads::missingheads) - commonheads)
348 # missing = ((commonheads::missingheads) - commonheads)
349 #
349 #
350 # We can pick:
350 # We can pick:
351 # * missingheads part of common (::commonheads)
351 # * missingheads part of common (::commonheads)
352 common = self.outgoing.common
352 common = self.outgoing.common
353 nm = self.repo.changelog.nodemap
353 nm = self.repo.changelog.nodemap
354 cheads = [node for node in self.revs if nm[node] in common]
354 cheads = [node for node in self.revs if nm[node] in common]
355 # and
355 # and
356 # * commonheads parents on missing
356 # * commonheads parents on missing
357 revset = unfi.set('%ln and parents(roots(%ln))',
357 revset = unfi.set('%ln and parents(roots(%ln))',
358 self.outgoing.commonheads,
358 self.outgoing.commonheads,
359 self.outgoing.missing)
359 self.outgoing.missing)
360 cheads.extend(c.node() for c in revset)
360 cheads.extend(c.node() for c in revset)
361 return cheads
361 return cheads
362
362
363 @property
363 @property
364 def commonheads(self):
364 def commonheads(self):
365 """set of all common heads after changeset bundle push"""
365 """set of all common heads after changeset bundle push"""
366 if self.cgresult:
366 if self.cgresult:
367 return self.futureheads
367 return self.futureheads
368 else:
368 else:
369 return self.fallbackheads
369 return self.fallbackheads
370
370
371 # mapping of message used when pushing bookmark
371 # mapping of message used when pushing bookmark
372 bookmsgmap = {'update': (_("updating bookmark %s\n"),
372 bookmsgmap = {'update': (_("updating bookmark %s\n"),
373 _('updating bookmark %s failed!\n')),
373 _('updating bookmark %s failed!\n')),
374 'export': (_("exporting bookmark %s\n"),
374 'export': (_("exporting bookmark %s\n"),
375 _('exporting bookmark %s failed!\n')),
375 _('exporting bookmark %s failed!\n')),
376 'delete': (_("deleting remote bookmark %s\n"),
376 'delete': (_("deleting remote bookmark %s\n"),
377 _('deleting remote bookmark %s failed!\n')),
377 _('deleting remote bookmark %s failed!\n')),
378 }
378 }
379
379
380
380
381 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
381 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
382 opargs=None):
382 opargs=None):
383 '''Push outgoing changesets (limited by revs) from a local
383 '''Push outgoing changesets (limited by revs) from a local
384 repository to remote. Return an integer:
384 repository to remote. Return an integer:
385 - None means nothing to push
385 - None means nothing to push
386 - 0 means HTTP error
386 - 0 means HTTP error
387 - 1 means we pushed and remote head count is unchanged *or*
387 - 1 means we pushed and remote head count is unchanged *or*
388 we have outgoing changesets but refused to push
388 we have outgoing changesets but refused to push
389 - other values as described by addchangegroup()
389 - other values as described by addchangegroup()
390 '''
390 '''
391 if opargs is None:
391 if opargs is None:
392 opargs = {}
392 opargs = {}
393 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
393 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
394 **opargs)
394 **opargs)
395 if pushop.remote.local():
395 if pushop.remote.local():
396 missing = (set(pushop.repo.requirements)
396 missing = (set(pushop.repo.requirements)
397 - pushop.remote.local().supported)
397 - pushop.remote.local().supported)
398 if missing:
398 if missing:
399 msg = _("required features are not"
399 msg = _("required features are not"
400 " supported in the destination:"
400 " supported in the destination:"
401 " %s") % (', '.join(sorted(missing)))
401 " %s") % (', '.join(sorted(missing)))
402 raise error.Abort(msg)
402 raise error.Abort(msg)
403
403
404 # there are two ways to push to remote repo:
404 # there are two ways to push to remote repo:
405 #
405 #
406 # addchangegroup assumes local user can lock remote
406 # addchangegroup assumes local user can lock remote
407 # repo (local filesystem, old ssh servers).
407 # repo (local filesystem, old ssh servers).
408 #
408 #
409 # unbundle assumes local user cannot lock remote repo (new ssh
409 # unbundle assumes local user cannot lock remote repo (new ssh
410 # servers, http servers).
410 # servers, http servers).
411
411
412 if not pushop.remote.canpush():
412 if not pushop.remote.canpush():
413 raise error.Abort(_("destination does not support push"))
413 raise error.Abort(_("destination does not support push"))
414 # get local lock as we might write phase data
414 # get local lock as we might write phase data
415 localwlock = locallock = None
415 localwlock = locallock = None
416 try:
416 try:
417 # bundle2 push may receive a reply bundle touching bookmarks or other
417 # bundle2 push may receive a reply bundle touching bookmarks or other
418 # things requiring the wlock. Take it now to ensure proper ordering.
418 # things requiring the wlock. Take it now to ensure proper ordering.
419 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
419 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
420 if _canusebundle2(pushop) and maypushback:
420 if (not _forcebundle1(pushop)) and maypushback:
421 localwlock = pushop.repo.wlock()
421 localwlock = pushop.repo.wlock()
422 locallock = pushop.repo.lock()
422 locallock = pushop.repo.lock()
423 pushop.locallocked = True
423 pushop.locallocked = True
424 except IOError as err:
424 except IOError as err:
425 pushop.locallocked = False
425 pushop.locallocked = False
426 if err.errno != errno.EACCES:
426 if err.errno != errno.EACCES:
427 raise
427 raise
428 # source repo cannot be locked.
428 # source repo cannot be locked.
429 # We do not abort the push, but just disable the local phase
429 # We do not abort the push, but just disable the local phase
430 # synchronisation.
430 # synchronisation.
431 msg = 'cannot lock source repository: %s\n' % err
431 msg = 'cannot lock source repository: %s\n' % err
432 pushop.ui.debug(msg)
432 pushop.ui.debug(msg)
433 try:
433 try:
434 if pushop.locallocked:
434 if pushop.locallocked:
435 pushop.trmanager = transactionmanager(pushop.repo,
435 pushop.trmanager = transactionmanager(pushop.repo,
436 'push-response',
436 'push-response',
437 pushop.remote.url())
437 pushop.remote.url())
438 pushop.repo.checkpush(pushop)
438 pushop.repo.checkpush(pushop)
439 lock = None
439 lock = None
440 unbundle = pushop.remote.capable('unbundle')
440 unbundle = pushop.remote.capable('unbundle')
441 if not unbundle:
441 if not unbundle:
442 lock = pushop.remote.lock()
442 lock = pushop.remote.lock()
443 try:
443 try:
444 _pushdiscovery(pushop)
444 _pushdiscovery(pushop)
445 if _canusebundle2(pushop):
445 if not _forcebundle1(pushop):
446 _pushbundle2(pushop)
446 _pushbundle2(pushop)
447 _pushchangeset(pushop)
447 _pushchangeset(pushop)
448 _pushsyncphase(pushop)
448 _pushsyncphase(pushop)
449 _pushobsolete(pushop)
449 _pushobsolete(pushop)
450 _pushbookmark(pushop)
450 _pushbookmark(pushop)
451 finally:
451 finally:
452 if lock is not None:
452 if lock is not None:
453 lock.release()
453 lock.release()
454 if pushop.trmanager:
454 if pushop.trmanager:
455 pushop.trmanager.close()
455 pushop.trmanager.close()
456 finally:
456 finally:
457 if pushop.trmanager:
457 if pushop.trmanager:
458 pushop.trmanager.release()
458 pushop.trmanager.release()
459 if locallock is not None:
459 if locallock is not None:
460 locallock.release()
460 locallock.release()
461 if localwlock is not None:
461 if localwlock is not None:
462 localwlock.release()
462 localwlock.release()
463
463
464 return pushop
464 return pushop
465
465
466 # list of steps to perform discovery before push
466 # list of steps to perform discovery before push
467 pushdiscoveryorder = []
467 pushdiscoveryorder = []
468
468
469 # Mapping between step name and function
469 # Mapping between step name and function
470 #
470 #
471 # This exists to help extensions wrap steps if necessary
471 # This exists to help extensions wrap steps if necessary
472 pushdiscoverymapping = {}
472 pushdiscoverymapping = {}
473
473
474 def pushdiscovery(stepname):
474 def pushdiscovery(stepname):
475 """decorator for function performing discovery before push
475 """decorator for function performing discovery before push
476
476
477 The function is added to the step -> function mapping and appended to the
477 The function is added to the step -> function mapping and appended to the
478 list of steps. Beware that decorated function will be added in order (this
478 list of steps. Beware that decorated function will be added in order (this
479 may matter).
479 may matter).
480
480
481 You can only use this decorator for a new step, if you want to wrap a step
481 You can only use this decorator for a new step, if you want to wrap a step
482 from an extension, change the pushdiscovery dictionary directly."""
482 from an extension, change the pushdiscovery dictionary directly."""
483 def dec(func):
483 def dec(func):
484 assert stepname not in pushdiscoverymapping
484 assert stepname not in pushdiscoverymapping
485 pushdiscoverymapping[stepname] = func
485 pushdiscoverymapping[stepname] = func
486 pushdiscoveryorder.append(stepname)
486 pushdiscoveryorder.append(stepname)
487 return func
487 return func
488 return dec
488 return dec
489
489
490 def _pushdiscovery(pushop):
490 def _pushdiscovery(pushop):
491 """Run all discovery steps"""
491 """Run all discovery steps"""
492 for stepname in pushdiscoveryorder:
492 for stepname in pushdiscoveryorder:
493 step = pushdiscoverymapping[stepname]
493 step = pushdiscoverymapping[stepname]
494 step(pushop)
494 step(pushop)
495
495
496 @pushdiscovery('changeset')
496 @pushdiscovery('changeset')
497 def _pushdiscoverychangeset(pushop):
497 def _pushdiscoverychangeset(pushop):
498 """discover the changeset that need to be pushed"""
498 """discover the changeset that need to be pushed"""
499 fci = discovery.findcommonincoming
499 fci = discovery.findcommonincoming
500 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
500 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
501 common, inc, remoteheads = commoninc
501 common, inc, remoteheads = commoninc
502 fco = discovery.findcommonoutgoing
502 fco = discovery.findcommonoutgoing
503 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
503 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
504 commoninc=commoninc, force=pushop.force)
504 commoninc=commoninc, force=pushop.force)
505 pushop.outgoing = outgoing
505 pushop.outgoing = outgoing
506 pushop.remoteheads = remoteheads
506 pushop.remoteheads = remoteheads
507 pushop.incoming = inc
507 pushop.incoming = inc
508
508
509 @pushdiscovery('phase')
509 @pushdiscovery('phase')
510 def _pushdiscoveryphase(pushop):
510 def _pushdiscoveryphase(pushop):
511 """discover the phase that needs to be pushed
511 """discover the phase that needs to be pushed
512
512
513 (computed for both success and failure case for changesets push)"""
513 (computed for both success and failure case for changesets push)"""
514 outgoing = pushop.outgoing
514 outgoing = pushop.outgoing
515 unfi = pushop.repo.unfiltered()
515 unfi = pushop.repo.unfiltered()
516 remotephases = pushop.remote.listkeys('phases')
516 remotephases = pushop.remote.listkeys('phases')
517 publishing = remotephases.get('publishing', False)
517 publishing = remotephases.get('publishing', False)
518 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
518 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
519 and remotephases # server supports phases
519 and remotephases # server supports phases
520 and not pushop.outgoing.missing # no changesets to be pushed
520 and not pushop.outgoing.missing # no changesets to be pushed
521 and publishing):
521 and publishing):
522 # When:
522 # When:
523 # - this is a subrepo push
523 # - this is a subrepo push
524 # - and remote support phase
524 # - and remote support phase
525 # - and no changeset are to be pushed
525 # - and no changeset are to be pushed
526 # - and remote is publishing
526 # - and remote is publishing
527 # We may be in issue 3871 case!
527 # We may be in issue 3871 case!
528 # We drop the possible phase synchronisation done by
528 # We drop the possible phase synchronisation done by
529 # courtesy to publish changesets possibly locally draft
529 # courtesy to publish changesets possibly locally draft
530 # on the remote.
530 # on the remote.
531 remotephases = {'publishing': 'True'}
531 remotephases = {'publishing': 'True'}
532 ana = phases.analyzeremotephases(pushop.repo,
532 ana = phases.analyzeremotephases(pushop.repo,
533 pushop.fallbackheads,
533 pushop.fallbackheads,
534 remotephases)
534 remotephases)
535 pheads, droots = ana
535 pheads, droots = ana
536 extracond = ''
536 extracond = ''
537 if not publishing:
537 if not publishing:
538 extracond = ' and public()'
538 extracond = ' and public()'
539 revset = 'heads((%%ln::%%ln) %s)' % extracond
539 revset = 'heads((%%ln::%%ln) %s)' % extracond
540 # Get the list of all revs draft on remote by public here.
540 # Get the list of all revs draft on remote by public here.
541 # XXX Beware that revset break if droots is not strictly
541 # XXX Beware that revset break if droots is not strictly
542 # XXX root we may want to ensure it is but it is costly
542 # XXX root we may want to ensure it is but it is costly
543 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
543 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
544 if not outgoing.missing:
544 if not outgoing.missing:
545 future = fallback
545 future = fallback
546 else:
546 else:
547 # adds changeset we are going to push as draft
547 # adds changeset we are going to push as draft
548 #
548 #
549 # should not be necessary for publishing server, but because of an
549 # should not be necessary for publishing server, but because of an
550 # issue fixed in xxxxx we have to do it anyway.
550 # issue fixed in xxxxx we have to do it anyway.
551 fdroots = list(unfi.set('roots(%ln + %ln::)',
551 fdroots = list(unfi.set('roots(%ln + %ln::)',
552 outgoing.missing, droots))
552 outgoing.missing, droots))
553 fdroots = [f.node() for f in fdroots]
553 fdroots = [f.node() for f in fdroots]
554 future = list(unfi.set(revset, fdroots, pushop.futureheads))
554 future = list(unfi.set(revset, fdroots, pushop.futureheads))
555 pushop.outdatedphases = future
555 pushop.outdatedphases = future
556 pushop.fallbackoutdatedphases = fallback
556 pushop.fallbackoutdatedphases = fallback
557
557
558 @pushdiscovery('obsmarker')
558 @pushdiscovery('obsmarker')
559 def _pushdiscoveryobsmarkers(pushop):
559 def _pushdiscoveryobsmarkers(pushop):
560 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
560 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
561 and pushop.repo.obsstore
561 and pushop.repo.obsstore
562 and 'obsolete' in pushop.remote.listkeys('namespaces')):
562 and 'obsolete' in pushop.remote.listkeys('namespaces')):
563 repo = pushop.repo
563 repo = pushop.repo
564 # very naive computation, that can be quite expensive on big repo.
564 # very naive computation, that can be quite expensive on big repo.
565 # However: evolution is currently slow on them anyway.
565 # However: evolution is currently slow on them anyway.
566 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
566 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
567 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
567 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
568
568
569 @pushdiscovery('bookmarks')
569 @pushdiscovery('bookmarks')
570 def _pushdiscoverybookmarks(pushop):
570 def _pushdiscoverybookmarks(pushop):
571 ui = pushop.ui
571 ui = pushop.ui
572 repo = pushop.repo.unfiltered()
572 repo = pushop.repo.unfiltered()
573 remote = pushop.remote
573 remote = pushop.remote
574 ui.debug("checking for updated bookmarks\n")
574 ui.debug("checking for updated bookmarks\n")
575 ancestors = ()
575 ancestors = ()
576 if pushop.revs:
576 if pushop.revs:
577 revnums = map(repo.changelog.rev, pushop.revs)
577 revnums = map(repo.changelog.rev, pushop.revs)
578 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
578 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
579 remotebookmark = remote.listkeys('bookmarks')
579 remotebookmark = remote.listkeys('bookmarks')
580
580
581 explicit = set([repo._bookmarks.expandname(bookmark)
581 explicit = set([repo._bookmarks.expandname(bookmark)
582 for bookmark in pushop.bookmarks])
582 for bookmark in pushop.bookmarks])
583
583
584 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
584 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
585 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
585 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
586 for b, scid, dcid in advsrc:
586 for b, scid, dcid in advsrc:
587 if b in explicit:
587 if b in explicit:
588 explicit.remove(b)
588 explicit.remove(b)
589 if not ancestors or repo[scid].rev() in ancestors:
589 if not ancestors or repo[scid].rev() in ancestors:
590 pushop.outbookmarks.append((b, dcid, scid))
590 pushop.outbookmarks.append((b, dcid, scid))
591 # search added bookmark
591 # search added bookmark
592 for b, scid, dcid in addsrc:
592 for b, scid, dcid in addsrc:
593 if b in explicit:
593 if b in explicit:
594 explicit.remove(b)
594 explicit.remove(b)
595 pushop.outbookmarks.append((b, '', scid))
595 pushop.outbookmarks.append((b, '', scid))
596 # search for overwritten bookmark
596 # search for overwritten bookmark
597 for b, scid, dcid in advdst + diverge + differ:
597 for b, scid, dcid in advdst + diverge + differ:
598 if b in explicit:
598 if b in explicit:
599 explicit.remove(b)
599 explicit.remove(b)
600 pushop.outbookmarks.append((b, dcid, scid))
600 pushop.outbookmarks.append((b, dcid, scid))
601 # search for bookmark to delete
601 # search for bookmark to delete
602 for b, scid, dcid in adddst:
602 for b, scid, dcid in adddst:
603 if b in explicit:
603 if b in explicit:
604 explicit.remove(b)
604 explicit.remove(b)
605 # treat as "deleted locally"
605 # treat as "deleted locally"
606 pushop.outbookmarks.append((b, dcid, ''))
606 pushop.outbookmarks.append((b, dcid, ''))
607 # identical bookmarks shouldn't get reported
607 # identical bookmarks shouldn't get reported
608 for b, scid, dcid in same:
608 for b, scid, dcid in same:
609 if b in explicit:
609 if b in explicit:
610 explicit.remove(b)
610 explicit.remove(b)
611
611
612 if explicit:
612 if explicit:
613 explicit = sorted(explicit)
613 explicit = sorted(explicit)
614 # we should probably list all of them
614 # we should probably list all of them
615 ui.warn(_('bookmark %s does not exist on the local '
615 ui.warn(_('bookmark %s does not exist on the local '
616 'or remote repository!\n') % explicit[0])
616 'or remote repository!\n') % explicit[0])
617 pushop.bkresult = 2
617 pushop.bkresult = 2
618
618
619 pushop.outbookmarks.sort()
619 pushop.outbookmarks.sort()
620
620
621 def _pushcheckoutgoing(pushop):
621 def _pushcheckoutgoing(pushop):
622 outgoing = pushop.outgoing
622 outgoing = pushop.outgoing
623 unfi = pushop.repo.unfiltered()
623 unfi = pushop.repo.unfiltered()
624 if not outgoing.missing:
624 if not outgoing.missing:
625 # nothing to push
625 # nothing to push
626 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
626 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
627 return False
627 return False
628 # something to push
628 # something to push
629 if not pushop.force:
629 if not pushop.force:
630 # if repo.obsstore == False --> no obsolete
630 # if repo.obsstore == False --> no obsolete
631 # then, save the iteration
631 # then, save the iteration
632 if unfi.obsstore:
632 if unfi.obsstore:
633 # this message are here for 80 char limit reason
633 # this message are here for 80 char limit reason
634 mso = _("push includes obsolete changeset: %s!")
634 mso = _("push includes obsolete changeset: %s!")
635 mst = {"unstable": _("push includes unstable changeset: %s!"),
635 mst = {"unstable": _("push includes unstable changeset: %s!"),
636 "bumped": _("push includes bumped changeset: %s!"),
636 "bumped": _("push includes bumped changeset: %s!"),
637 "divergent": _("push includes divergent changeset: %s!")}
637 "divergent": _("push includes divergent changeset: %s!")}
638 # If we are to push if there is at least one
638 # If we are to push if there is at least one
639 # obsolete or unstable changeset in missing, at
639 # obsolete or unstable changeset in missing, at
640 # least one of the missinghead will be obsolete or
640 # least one of the missinghead will be obsolete or
641 # unstable. So checking heads only is ok
641 # unstable. So checking heads only is ok
642 for node in outgoing.missingheads:
642 for node in outgoing.missingheads:
643 ctx = unfi[node]
643 ctx = unfi[node]
644 if ctx.obsolete():
644 if ctx.obsolete():
645 raise error.Abort(mso % ctx)
645 raise error.Abort(mso % ctx)
646 elif ctx.troubled():
646 elif ctx.troubled():
647 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
647 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
648
648
649 discovery.checkheads(pushop)
649 discovery.checkheads(pushop)
650 return True
650 return True
651
651
652 # List of names of steps to perform for an outgoing bundle2, order matters.
652 # List of names of steps to perform for an outgoing bundle2, order matters.
653 b2partsgenorder = []
653 b2partsgenorder = []
654
654
655 # Mapping between step name and function
655 # Mapping between step name and function
656 #
656 #
657 # This exists to help extensions wrap steps if necessary
657 # This exists to help extensions wrap steps if necessary
658 b2partsgenmapping = {}
658 b2partsgenmapping = {}
659
659
660 def b2partsgenerator(stepname, idx=None):
660 def b2partsgenerator(stepname, idx=None):
661 """decorator for function generating bundle2 part
661 """decorator for function generating bundle2 part
662
662
663 The function is added to the step -> function mapping and appended to the
663 The function is added to the step -> function mapping and appended to the
664 list of steps. Beware that decorated functions will be added in order
664 list of steps. Beware that decorated functions will be added in order
665 (this may matter).
665 (this may matter).
666
666
667 You can only use this decorator for new steps, if you want to wrap a step
667 You can only use this decorator for new steps, if you want to wrap a step
668 from an extension, attack the b2partsgenmapping dictionary directly."""
668 from an extension, attack the b2partsgenmapping dictionary directly."""
669 def dec(func):
669 def dec(func):
670 assert stepname not in b2partsgenmapping
670 assert stepname not in b2partsgenmapping
671 b2partsgenmapping[stepname] = func
671 b2partsgenmapping[stepname] = func
672 if idx is None:
672 if idx is None:
673 b2partsgenorder.append(stepname)
673 b2partsgenorder.append(stepname)
674 else:
674 else:
675 b2partsgenorder.insert(idx, stepname)
675 b2partsgenorder.insert(idx, stepname)
676 return func
676 return func
677 return dec
677 return dec
678
678
679 def _pushb2ctxcheckheads(pushop, bundler):
679 def _pushb2ctxcheckheads(pushop, bundler):
680 """Generate race condition checking parts
680 """Generate race condition checking parts
681
681
682 Exists as an independent function to aid extensions
682 Exists as an independent function to aid extensions
683 """
683 """
684 if not pushop.force:
684 if not pushop.force:
685 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
685 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
686
686
687 @b2partsgenerator('changeset')
687 @b2partsgenerator('changeset')
688 def _pushb2ctx(pushop, bundler):
688 def _pushb2ctx(pushop, bundler):
689 """handle changegroup push through bundle2
689 """handle changegroup push through bundle2
690
690
691 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
691 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
692 """
692 """
693 if 'changesets' in pushop.stepsdone:
693 if 'changesets' in pushop.stepsdone:
694 return
694 return
695 pushop.stepsdone.add('changesets')
695 pushop.stepsdone.add('changesets')
696 # Send known heads to the server for race detection.
696 # Send known heads to the server for race detection.
697 if not _pushcheckoutgoing(pushop):
697 if not _pushcheckoutgoing(pushop):
698 return
698 return
699 pushop.repo.prepushoutgoinghooks(pushop)
699 pushop.repo.prepushoutgoinghooks(pushop)
700
700
701 _pushb2ctxcheckheads(pushop, bundler)
701 _pushb2ctxcheckheads(pushop, bundler)
702
702
703 b2caps = bundle2.bundle2caps(pushop.remote)
703 b2caps = bundle2.bundle2caps(pushop.remote)
704 version = '01'
704 version = '01'
705 cgversions = b2caps.get('changegroup')
705 cgversions = b2caps.get('changegroup')
706 if cgversions: # 3.1 and 3.2 ship with an empty value
706 if cgversions: # 3.1 and 3.2 ship with an empty value
707 cgversions = [v for v in cgversions
707 cgversions = [v for v in cgversions
708 if v in changegroup.supportedoutgoingversions(
708 if v in changegroup.supportedoutgoingversions(
709 pushop.repo)]
709 pushop.repo)]
710 if not cgversions:
710 if not cgversions:
711 raise ValueError(_('no common changegroup version'))
711 raise ValueError(_('no common changegroup version'))
712 version = max(cgversions)
712 version = max(cgversions)
713 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
713 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
714 pushop.outgoing,
714 pushop.outgoing,
715 version=version)
715 version=version)
716 cgpart = bundler.newpart('changegroup', data=cg)
716 cgpart = bundler.newpart('changegroup', data=cg)
717 if cgversions:
717 if cgversions:
718 cgpart.addparam('version', version)
718 cgpart.addparam('version', version)
719 if 'treemanifest' in pushop.repo.requirements:
719 if 'treemanifest' in pushop.repo.requirements:
720 cgpart.addparam('treemanifest', '1')
720 cgpart.addparam('treemanifest', '1')
721 def handlereply(op):
721 def handlereply(op):
722 """extract addchangegroup returns from server reply"""
722 """extract addchangegroup returns from server reply"""
723 cgreplies = op.records.getreplies(cgpart.id)
723 cgreplies = op.records.getreplies(cgpart.id)
724 assert len(cgreplies['changegroup']) == 1
724 assert len(cgreplies['changegroup']) == 1
725 pushop.cgresult = cgreplies['changegroup'][0]['return']
725 pushop.cgresult = cgreplies['changegroup'][0]['return']
726 return handlereply
726 return handlereply
727
727
728 @b2partsgenerator('phase')
728 @b2partsgenerator('phase')
729 def _pushb2phases(pushop, bundler):
729 def _pushb2phases(pushop, bundler):
730 """handle phase push through bundle2"""
730 """handle phase push through bundle2"""
731 if 'phases' in pushop.stepsdone:
731 if 'phases' in pushop.stepsdone:
732 return
732 return
733 b2caps = bundle2.bundle2caps(pushop.remote)
733 b2caps = bundle2.bundle2caps(pushop.remote)
734 if not 'pushkey' in b2caps:
734 if not 'pushkey' in b2caps:
735 return
735 return
736 pushop.stepsdone.add('phases')
736 pushop.stepsdone.add('phases')
737 part2node = []
737 part2node = []
738
738
739 def handlefailure(pushop, exc):
739 def handlefailure(pushop, exc):
740 targetid = int(exc.partid)
740 targetid = int(exc.partid)
741 for partid, node in part2node:
741 for partid, node in part2node:
742 if partid == targetid:
742 if partid == targetid:
743 raise error.Abort(_('updating %s to public failed') % node)
743 raise error.Abort(_('updating %s to public failed') % node)
744
744
745 enc = pushkey.encode
745 enc = pushkey.encode
746 for newremotehead in pushop.outdatedphases:
746 for newremotehead in pushop.outdatedphases:
747 part = bundler.newpart('pushkey')
747 part = bundler.newpart('pushkey')
748 part.addparam('namespace', enc('phases'))
748 part.addparam('namespace', enc('phases'))
749 part.addparam('key', enc(newremotehead.hex()))
749 part.addparam('key', enc(newremotehead.hex()))
750 part.addparam('old', enc(str(phases.draft)))
750 part.addparam('old', enc(str(phases.draft)))
751 part.addparam('new', enc(str(phases.public)))
751 part.addparam('new', enc(str(phases.public)))
752 part2node.append((part.id, newremotehead))
752 part2node.append((part.id, newremotehead))
753 pushop.pkfailcb[part.id] = handlefailure
753 pushop.pkfailcb[part.id] = handlefailure
754
754
755 def handlereply(op):
755 def handlereply(op):
756 for partid, node in part2node:
756 for partid, node in part2node:
757 partrep = op.records.getreplies(partid)
757 partrep = op.records.getreplies(partid)
758 results = partrep['pushkey']
758 results = partrep['pushkey']
759 assert len(results) <= 1
759 assert len(results) <= 1
760 msg = None
760 msg = None
761 if not results:
761 if not results:
762 msg = _('server ignored update of %s to public!\n') % node
762 msg = _('server ignored update of %s to public!\n') % node
763 elif not int(results[0]['return']):
763 elif not int(results[0]['return']):
764 msg = _('updating %s to public failed!\n') % node
764 msg = _('updating %s to public failed!\n') % node
765 if msg is not None:
765 if msg is not None:
766 pushop.ui.warn(msg)
766 pushop.ui.warn(msg)
767 return handlereply
767 return handlereply
768
768
769 @b2partsgenerator('obsmarkers')
769 @b2partsgenerator('obsmarkers')
770 def _pushb2obsmarkers(pushop, bundler):
770 def _pushb2obsmarkers(pushop, bundler):
771 if 'obsmarkers' in pushop.stepsdone:
771 if 'obsmarkers' in pushop.stepsdone:
772 return
772 return
773 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
773 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
774 if obsolete.commonversion(remoteversions) is None:
774 if obsolete.commonversion(remoteversions) is None:
775 return
775 return
776 pushop.stepsdone.add('obsmarkers')
776 pushop.stepsdone.add('obsmarkers')
777 if pushop.outobsmarkers:
777 if pushop.outobsmarkers:
778 markers = sorted(pushop.outobsmarkers)
778 markers = sorted(pushop.outobsmarkers)
779 buildobsmarkerspart(bundler, markers)
779 buildobsmarkerspart(bundler, markers)
780
780
781 @b2partsgenerator('bookmarks')
781 @b2partsgenerator('bookmarks')
782 def _pushb2bookmarks(pushop, bundler):
782 def _pushb2bookmarks(pushop, bundler):
783 """handle bookmark push through bundle2"""
783 """handle bookmark push through bundle2"""
784 if 'bookmarks' in pushop.stepsdone:
784 if 'bookmarks' in pushop.stepsdone:
785 return
785 return
786 b2caps = bundle2.bundle2caps(pushop.remote)
786 b2caps = bundle2.bundle2caps(pushop.remote)
787 if 'pushkey' not in b2caps:
787 if 'pushkey' not in b2caps:
788 return
788 return
789 pushop.stepsdone.add('bookmarks')
789 pushop.stepsdone.add('bookmarks')
790 part2book = []
790 part2book = []
791 enc = pushkey.encode
791 enc = pushkey.encode
792
792
793 def handlefailure(pushop, exc):
793 def handlefailure(pushop, exc):
794 targetid = int(exc.partid)
794 targetid = int(exc.partid)
795 for partid, book, action in part2book:
795 for partid, book, action in part2book:
796 if partid == targetid:
796 if partid == targetid:
797 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
797 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
798 # we should not be called for part we did not generated
798 # we should not be called for part we did not generated
799 assert False
799 assert False
800
800
801 for book, old, new in pushop.outbookmarks:
801 for book, old, new in pushop.outbookmarks:
802 part = bundler.newpart('pushkey')
802 part = bundler.newpart('pushkey')
803 part.addparam('namespace', enc('bookmarks'))
803 part.addparam('namespace', enc('bookmarks'))
804 part.addparam('key', enc(book))
804 part.addparam('key', enc(book))
805 part.addparam('old', enc(old))
805 part.addparam('old', enc(old))
806 part.addparam('new', enc(new))
806 part.addparam('new', enc(new))
807 action = 'update'
807 action = 'update'
808 if not old:
808 if not old:
809 action = 'export'
809 action = 'export'
810 elif not new:
810 elif not new:
811 action = 'delete'
811 action = 'delete'
812 part2book.append((part.id, book, action))
812 part2book.append((part.id, book, action))
813 pushop.pkfailcb[part.id] = handlefailure
813 pushop.pkfailcb[part.id] = handlefailure
814
814
815 def handlereply(op):
815 def handlereply(op):
816 ui = pushop.ui
816 ui = pushop.ui
817 for partid, book, action in part2book:
817 for partid, book, action in part2book:
818 partrep = op.records.getreplies(partid)
818 partrep = op.records.getreplies(partid)
819 results = partrep['pushkey']
819 results = partrep['pushkey']
820 assert len(results) <= 1
820 assert len(results) <= 1
821 if not results:
821 if not results:
822 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
822 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
823 else:
823 else:
824 ret = int(results[0]['return'])
824 ret = int(results[0]['return'])
825 if ret:
825 if ret:
826 ui.status(bookmsgmap[action][0] % book)
826 ui.status(bookmsgmap[action][0] % book)
827 else:
827 else:
828 ui.warn(bookmsgmap[action][1] % book)
828 ui.warn(bookmsgmap[action][1] % book)
829 if pushop.bkresult is not None:
829 if pushop.bkresult is not None:
830 pushop.bkresult = 1
830 pushop.bkresult = 1
831 return handlereply
831 return handlereply
832
832
833
833
834 def _pushbundle2(pushop):
834 def _pushbundle2(pushop):
835 """push data to the remote using bundle2
835 """push data to the remote using bundle2
836
836
837 The only currently supported type of data is changegroup but this will
837 The only currently supported type of data is changegroup but this will
838 evolve in the future."""
838 evolve in the future."""
839 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
839 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
840 pushback = (pushop.trmanager
840 pushback = (pushop.trmanager
841 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
841 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
842
842
843 # create reply capability
843 # create reply capability
844 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
844 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
845 allowpushback=pushback))
845 allowpushback=pushback))
846 bundler.newpart('replycaps', data=capsblob)
846 bundler.newpart('replycaps', data=capsblob)
847 replyhandlers = []
847 replyhandlers = []
848 for partgenname in b2partsgenorder:
848 for partgenname in b2partsgenorder:
849 partgen = b2partsgenmapping[partgenname]
849 partgen = b2partsgenmapping[partgenname]
850 ret = partgen(pushop, bundler)
850 ret = partgen(pushop, bundler)
851 if callable(ret):
851 if callable(ret):
852 replyhandlers.append(ret)
852 replyhandlers.append(ret)
853 # do not push if nothing to push
853 # do not push if nothing to push
854 if bundler.nbparts <= 1:
854 if bundler.nbparts <= 1:
855 return
855 return
856 stream = util.chunkbuffer(bundler.getchunks())
856 stream = util.chunkbuffer(bundler.getchunks())
857 try:
857 try:
858 try:
858 try:
859 reply = pushop.remote.unbundle(stream, ['force'], 'push')
859 reply = pushop.remote.unbundle(stream, ['force'], 'push')
860 except error.BundleValueError as exc:
860 except error.BundleValueError as exc:
861 raise error.Abort(_('missing support for %s') % exc)
861 raise error.Abort(_('missing support for %s') % exc)
862 try:
862 try:
863 trgetter = None
863 trgetter = None
864 if pushback:
864 if pushback:
865 trgetter = pushop.trmanager.transaction
865 trgetter = pushop.trmanager.transaction
866 op = bundle2.processbundle(pushop.repo, reply, trgetter)
866 op = bundle2.processbundle(pushop.repo, reply, trgetter)
867 except error.BundleValueError as exc:
867 except error.BundleValueError as exc:
868 raise error.Abort(_('missing support for %s') % exc)
868 raise error.Abort(_('missing support for %s') % exc)
869 except bundle2.AbortFromPart as exc:
869 except bundle2.AbortFromPart as exc:
870 pushop.ui.status(_('remote: %s\n') % exc)
870 pushop.ui.status(_('remote: %s\n') % exc)
871 raise error.Abort(_('push failed on remote'), hint=exc.hint)
871 raise error.Abort(_('push failed on remote'), hint=exc.hint)
872 except error.PushkeyFailed as exc:
872 except error.PushkeyFailed as exc:
873 partid = int(exc.partid)
873 partid = int(exc.partid)
874 if partid not in pushop.pkfailcb:
874 if partid not in pushop.pkfailcb:
875 raise
875 raise
876 pushop.pkfailcb[partid](pushop, exc)
876 pushop.pkfailcb[partid](pushop, exc)
877 for rephand in replyhandlers:
877 for rephand in replyhandlers:
878 rephand(op)
878 rephand(op)
879
879
880 def _pushchangeset(pushop):
880 def _pushchangeset(pushop):
881 """Make the actual push of changeset bundle to remote repo"""
881 """Make the actual push of changeset bundle to remote repo"""
882 if 'changesets' in pushop.stepsdone:
882 if 'changesets' in pushop.stepsdone:
883 return
883 return
884 pushop.stepsdone.add('changesets')
884 pushop.stepsdone.add('changesets')
885 if not _pushcheckoutgoing(pushop):
885 if not _pushcheckoutgoing(pushop):
886 return
886 return
887 pushop.repo.prepushoutgoinghooks(pushop)
887 pushop.repo.prepushoutgoinghooks(pushop)
888 outgoing = pushop.outgoing
888 outgoing = pushop.outgoing
889 unbundle = pushop.remote.capable('unbundle')
889 unbundle = pushop.remote.capable('unbundle')
890 # TODO: get bundlecaps from remote
890 # TODO: get bundlecaps from remote
891 bundlecaps = None
891 bundlecaps = None
892 # create a changegroup from local
892 # create a changegroup from local
893 if pushop.revs is None and not (outgoing.excluded
893 if pushop.revs is None and not (outgoing.excluded
894 or pushop.repo.changelog.filteredrevs):
894 or pushop.repo.changelog.filteredrevs):
895 # push everything,
895 # push everything,
896 # use the fast path, no race possible on push
896 # use the fast path, no race possible on push
897 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
897 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
898 cg = changegroup.getsubset(pushop.repo,
898 cg = changegroup.getsubset(pushop.repo,
899 outgoing,
899 outgoing,
900 bundler,
900 bundler,
901 'push',
901 'push',
902 fastpath=True)
902 fastpath=True)
903 else:
903 else:
904 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
904 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
905 bundlecaps)
905 bundlecaps)
906
906
907 # apply changegroup to remote
907 # apply changegroup to remote
908 if unbundle:
908 if unbundle:
909 # local repo finds heads on server, finds out what
909 # local repo finds heads on server, finds out what
910 # revs it must push. once revs transferred, if server
910 # revs it must push. once revs transferred, if server
911 # finds it has different heads (someone else won
911 # finds it has different heads (someone else won
912 # commit/push race), server aborts.
912 # commit/push race), server aborts.
913 if pushop.force:
913 if pushop.force:
914 remoteheads = ['force']
914 remoteheads = ['force']
915 else:
915 else:
916 remoteheads = pushop.remoteheads
916 remoteheads = pushop.remoteheads
917 # ssh: return remote's addchangegroup()
917 # ssh: return remote's addchangegroup()
918 # http: return remote's addchangegroup() or 0 for error
918 # http: return remote's addchangegroup() or 0 for error
919 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
919 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
920 pushop.repo.url())
920 pushop.repo.url())
921 else:
921 else:
922 # we return an integer indicating remote head count
922 # we return an integer indicating remote head count
923 # change
923 # change
924 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
924 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
925 pushop.repo.url())
925 pushop.repo.url())
926
926
927 def _pushsyncphase(pushop):
927 def _pushsyncphase(pushop):
928 """synchronise phase information locally and remotely"""
928 """synchronise phase information locally and remotely"""
929 cheads = pushop.commonheads
929 cheads = pushop.commonheads
930 # even when we don't push, exchanging phase data is useful
930 # even when we don't push, exchanging phase data is useful
931 remotephases = pushop.remote.listkeys('phases')
931 remotephases = pushop.remote.listkeys('phases')
932 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
932 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
933 and remotephases # server supports phases
933 and remotephases # server supports phases
934 and pushop.cgresult is None # nothing was pushed
934 and pushop.cgresult is None # nothing was pushed
935 and remotephases.get('publishing', False)):
935 and remotephases.get('publishing', False)):
936 # When:
936 # When:
937 # - this is a subrepo push
937 # - this is a subrepo push
938 # - and remote support phase
938 # - and remote support phase
939 # - and no changeset was pushed
939 # - and no changeset was pushed
940 # - and remote is publishing
940 # - and remote is publishing
941 # We may be in issue 3871 case!
941 # We may be in issue 3871 case!
942 # We drop the possible phase synchronisation done by
942 # We drop the possible phase synchronisation done by
943 # courtesy to publish changesets possibly locally draft
943 # courtesy to publish changesets possibly locally draft
944 # on the remote.
944 # on the remote.
945 remotephases = {'publishing': 'True'}
945 remotephases = {'publishing': 'True'}
946 if not remotephases: # old server or public only reply from non-publishing
946 if not remotephases: # old server or public only reply from non-publishing
947 _localphasemove(pushop, cheads)
947 _localphasemove(pushop, cheads)
948 # don't push any phase data as there is nothing to push
948 # don't push any phase data as there is nothing to push
949 else:
949 else:
950 ana = phases.analyzeremotephases(pushop.repo, cheads,
950 ana = phases.analyzeremotephases(pushop.repo, cheads,
951 remotephases)
951 remotephases)
952 pheads, droots = ana
952 pheads, droots = ana
953 ### Apply remote phase on local
953 ### Apply remote phase on local
954 if remotephases.get('publishing', False):
954 if remotephases.get('publishing', False):
955 _localphasemove(pushop, cheads)
955 _localphasemove(pushop, cheads)
956 else: # publish = False
956 else: # publish = False
957 _localphasemove(pushop, pheads)
957 _localphasemove(pushop, pheads)
958 _localphasemove(pushop, cheads, phases.draft)
958 _localphasemove(pushop, cheads, phases.draft)
959 ### Apply local phase on remote
959 ### Apply local phase on remote
960
960
961 if pushop.cgresult:
961 if pushop.cgresult:
962 if 'phases' in pushop.stepsdone:
962 if 'phases' in pushop.stepsdone:
963 # phases already pushed though bundle2
963 # phases already pushed though bundle2
964 return
964 return
965 outdated = pushop.outdatedphases
965 outdated = pushop.outdatedphases
966 else:
966 else:
967 outdated = pushop.fallbackoutdatedphases
967 outdated = pushop.fallbackoutdatedphases
968
968
969 pushop.stepsdone.add('phases')
969 pushop.stepsdone.add('phases')
970
970
971 # filter heads already turned public by the push
971 # filter heads already turned public by the push
972 outdated = [c for c in outdated if c.node() not in pheads]
972 outdated = [c for c in outdated if c.node() not in pheads]
973 # fallback to independent pushkey command
973 # fallback to independent pushkey command
974 for newremotehead in outdated:
974 for newremotehead in outdated:
975 r = pushop.remote.pushkey('phases',
975 r = pushop.remote.pushkey('phases',
976 newremotehead.hex(),
976 newremotehead.hex(),
977 str(phases.draft),
977 str(phases.draft),
978 str(phases.public))
978 str(phases.public))
979 if not r:
979 if not r:
980 pushop.ui.warn(_('updating %s to public failed!\n')
980 pushop.ui.warn(_('updating %s to public failed!\n')
981 % newremotehead)
981 % newremotehead)
982
982
983 def _localphasemove(pushop, nodes, phase=phases.public):
983 def _localphasemove(pushop, nodes, phase=phases.public):
984 """move <nodes> to <phase> in the local source repo"""
984 """move <nodes> to <phase> in the local source repo"""
985 if pushop.trmanager:
985 if pushop.trmanager:
986 phases.advanceboundary(pushop.repo,
986 phases.advanceboundary(pushop.repo,
987 pushop.trmanager.transaction(),
987 pushop.trmanager.transaction(),
988 phase,
988 phase,
989 nodes)
989 nodes)
990 else:
990 else:
991 # repo is not locked, do not change any phases!
991 # repo is not locked, do not change any phases!
992 # Informs the user that phases should have been moved when
992 # Informs the user that phases should have been moved when
993 # applicable.
993 # applicable.
994 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
994 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
995 phasestr = phases.phasenames[phase]
995 phasestr = phases.phasenames[phase]
996 if actualmoves:
996 if actualmoves:
997 pushop.ui.status(_('cannot lock source repo, skipping '
997 pushop.ui.status(_('cannot lock source repo, skipping '
998 'local %s phase update\n') % phasestr)
998 'local %s phase update\n') % phasestr)
999
999
1000 def _pushobsolete(pushop):
1000 def _pushobsolete(pushop):
1001 """utility function to push obsolete markers to a remote"""
1001 """utility function to push obsolete markers to a remote"""
1002 if 'obsmarkers' in pushop.stepsdone:
1002 if 'obsmarkers' in pushop.stepsdone:
1003 return
1003 return
1004 repo = pushop.repo
1004 repo = pushop.repo
1005 remote = pushop.remote
1005 remote = pushop.remote
1006 pushop.stepsdone.add('obsmarkers')
1006 pushop.stepsdone.add('obsmarkers')
1007 if pushop.outobsmarkers:
1007 if pushop.outobsmarkers:
1008 pushop.ui.debug('try to push obsolete markers to remote\n')
1008 pushop.ui.debug('try to push obsolete markers to remote\n')
1009 rslts = []
1009 rslts = []
1010 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1010 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1011 for key in sorted(remotedata, reverse=True):
1011 for key in sorted(remotedata, reverse=True):
1012 # reverse sort to ensure we end with dump0
1012 # reverse sort to ensure we end with dump0
1013 data = remotedata[key]
1013 data = remotedata[key]
1014 rslts.append(remote.pushkey('obsolete', key, '', data))
1014 rslts.append(remote.pushkey('obsolete', key, '', data))
1015 if [r for r in rslts if not r]:
1015 if [r for r in rslts if not r]:
1016 msg = _('failed to push some obsolete markers!\n')
1016 msg = _('failed to push some obsolete markers!\n')
1017 repo.ui.warn(msg)
1017 repo.ui.warn(msg)
1018
1018
1019 def _pushbookmark(pushop):
1019 def _pushbookmark(pushop):
1020 """Update bookmark position on remote"""
1020 """Update bookmark position on remote"""
1021 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1021 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1022 return
1022 return
1023 pushop.stepsdone.add('bookmarks')
1023 pushop.stepsdone.add('bookmarks')
1024 ui = pushop.ui
1024 ui = pushop.ui
1025 remote = pushop.remote
1025 remote = pushop.remote
1026
1026
1027 for b, old, new in pushop.outbookmarks:
1027 for b, old, new in pushop.outbookmarks:
1028 action = 'update'
1028 action = 'update'
1029 if not old:
1029 if not old:
1030 action = 'export'
1030 action = 'export'
1031 elif not new:
1031 elif not new:
1032 action = 'delete'
1032 action = 'delete'
1033 if remote.pushkey('bookmarks', b, old, new):
1033 if remote.pushkey('bookmarks', b, old, new):
1034 ui.status(bookmsgmap[action][0] % b)
1034 ui.status(bookmsgmap[action][0] % b)
1035 else:
1035 else:
1036 ui.warn(bookmsgmap[action][1] % b)
1036 ui.warn(bookmsgmap[action][1] % b)
1037 # discovery can have set the value form invalid entry
1037 # discovery can have set the value form invalid entry
1038 if pushop.bkresult is not None:
1038 if pushop.bkresult is not None:
1039 pushop.bkresult = 1
1039 pushop.bkresult = 1
1040
1040
1041 class pulloperation(object):
1041 class pulloperation(object):
1042 """A object that represent a single pull operation
1042 """A object that represent a single pull operation
1043
1043
1044 It purpose is to carry pull related state and very common operation.
1044 It purpose is to carry pull related state and very common operation.
1045
1045
1046 A new should be created at the beginning of each pull and discarded
1046 A new should be created at the beginning of each pull and discarded
1047 afterward.
1047 afterward.
1048 """
1048 """
1049
1049
1050 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1050 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1051 remotebookmarks=None, streamclonerequested=None):
1051 remotebookmarks=None, streamclonerequested=None):
1052 # repo we pull into
1052 # repo we pull into
1053 self.repo = repo
1053 self.repo = repo
1054 # repo we pull from
1054 # repo we pull from
1055 self.remote = remote
1055 self.remote = remote
1056 # revision we try to pull (None is "all")
1056 # revision we try to pull (None is "all")
1057 self.heads = heads
1057 self.heads = heads
1058 # bookmark pulled explicitly
1058 # bookmark pulled explicitly
1059 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1059 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1060 for bookmark in bookmarks]
1060 for bookmark in bookmarks]
1061 # do we force pull?
1061 # do we force pull?
1062 self.force = force
1062 self.force = force
1063 # whether a streaming clone was requested
1063 # whether a streaming clone was requested
1064 self.streamclonerequested = streamclonerequested
1064 self.streamclonerequested = streamclonerequested
1065 # transaction manager
1065 # transaction manager
1066 self.trmanager = None
1066 self.trmanager = None
1067 # set of common changeset between local and remote before pull
1067 # set of common changeset between local and remote before pull
1068 self.common = None
1068 self.common = None
1069 # set of pulled head
1069 # set of pulled head
1070 self.rheads = None
1070 self.rheads = None
1071 # list of missing changeset to fetch remotely
1071 # list of missing changeset to fetch remotely
1072 self.fetch = None
1072 self.fetch = None
1073 # remote bookmarks data
1073 # remote bookmarks data
1074 self.remotebookmarks = remotebookmarks
1074 self.remotebookmarks = remotebookmarks
1075 # result of changegroup pulling (used as return code by pull)
1075 # result of changegroup pulling (used as return code by pull)
1076 self.cgresult = None
1076 self.cgresult = None
1077 # list of step already done
1077 # list of step already done
1078 self.stepsdone = set()
1078 self.stepsdone = set()
1079 # Whether we attempted a clone from pre-generated bundles.
1079 # Whether we attempted a clone from pre-generated bundles.
1080 self.clonebundleattempted = False
1080 self.clonebundleattempted = False
1081
1081
1082 @util.propertycache
1082 @util.propertycache
1083 def pulledsubset(self):
1083 def pulledsubset(self):
1084 """heads of the set of changeset target by the pull"""
1084 """heads of the set of changeset target by the pull"""
1085 # compute target subset
1085 # compute target subset
1086 if self.heads is None:
1086 if self.heads is None:
1087 # We pulled every thing possible
1087 # We pulled every thing possible
1088 # sync on everything common
1088 # sync on everything common
1089 c = set(self.common)
1089 c = set(self.common)
1090 ret = list(self.common)
1090 ret = list(self.common)
1091 for n in self.rheads:
1091 for n in self.rheads:
1092 if n not in c:
1092 if n not in c:
1093 ret.append(n)
1093 ret.append(n)
1094 return ret
1094 return ret
1095 else:
1095 else:
1096 # We pulled a specific subset
1096 # We pulled a specific subset
1097 # sync on this subset
1097 # sync on this subset
1098 return self.heads
1098 return self.heads
1099
1099
1100 @util.propertycache
1100 @util.propertycache
1101 def canusebundle2(self):
1101 def canusebundle2(self):
1102 return _canusebundle2(self)
1102 return not _forcebundle1(self)
1103
1103
1104 @util.propertycache
1104 @util.propertycache
1105 def remotebundle2caps(self):
1105 def remotebundle2caps(self):
1106 return bundle2.bundle2caps(self.remote)
1106 return bundle2.bundle2caps(self.remote)
1107
1107
1108 def gettransaction(self):
1108 def gettransaction(self):
1109 # deprecated; talk to trmanager directly
1109 # deprecated; talk to trmanager directly
1110 return self.trmanager.transaction()
1110 return self.trmanager.transaction()
1111
1111
1112 class transactionmanager(object):
1112 class transactionmanager(object):
1113 """An object to manage the life cycle of a transaction
1113 """An object to manage the life cycle of a transaction
1114
1114
1115 It creates the transaction on demand and calls the appropriate hooks when
1115 It creates the transaction on demand and calls the appropriate hooks when
1116 closing the transaction."""
1116 closing the transaction."""
1117 def __init__(self, repo, source, url):
1117 def __init__(self, repo, source, url):
1118 self.repo = repo
1118 self.repo = repo
1119 self.source = source
1119 self.source = source
1120 self.url = url
1120 self.url = url
1121 self._tr = None
1121 self._tr = None
1122
1122
1123 def transaction(self):
1123 def transaction(self):
1124 """Return an open transaction object, constructing if necessary"""
1124 """Return an open transaction object, constructing if necessary"""
1125 if not self._tr:
1125 if not self._tr:
1126 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1126 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1127 self._tr = self.repo.transaction(trname)
1127 self._tr = self.repo.transaction(trname)
1128 self._tr.hookargs['source'] = self.source
1128 self._tr.hookargs['source'] = self.source
1129 self._tr.hookargs['url'] = self.url
1129 self._tr.hookargs['url'] = self.url
1130 return self._tr
1130 return self._tr
1131
1131
1132 def close(self):
1132 def close(self):
1133 """close transaction if created"""
1133 """close transaction if created"""
1134 if self._tr is not None:
1134 if self._tr is not None:
1135 self._tr.close()
1135 self._tr.close()
1136
1136
1137 def release(self):
1137 def release(self):
1138 """release transaction if created"""
1138 """release transaction if created"""
1139 if self._tr is not None:
1139 if self._tr is not None:
1140 self._tr.release()
1140 self._tr.release()
1141
1141
1142 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1142 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1143 streamclonerequested=None):
1143 streamclonerequested=None):
1144 """Fetch repository data from a remote.
1144 """Fetch repository data from a remote.
1145
1145
1146 This is the main function used to retrieve data from a remote repository.
1146 This is the main function used to retrieve data from a remote repository.
1147
1147
1148 ``repo`` is the local repository to clone into.
1148 ``repo`` is the local repository to clone into.
1149 ``remote`` is a peer instance.
1149 ``remote`` is a peer instance.
1150 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1150 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1151 default) means to pull everything from the remote.
1151 default) means to pull everything from the remote.
1152 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1152 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1153 default, all remote bookmarks are pulled.
1153 default, all remote bookmarks are pulled.
1154 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1154 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1155 initialization.
1155 initialization.
1156 ``streamclonerequested`` is a boolean indicating whether a "streaming
1156 ``streamclonerequested`` is a boolean indicating whether a "streaming
1157 clone" is requested. A "streaming clone" is essentially a raw file copy
1157 clone" is requested. A "streaming clone" is essentially a raw file copy
1158 of revlogs from the server. This only works when the local repository is
1158 of revlogs from the server. This only works when the local repository is
1159 empty. The default value of ``None`` means to respect the server
1159 empty. The default value of ``None`` means to respect the server
1160 configuration for preferring stream clones.
1160 configuration for preferring stream clones.
1161
1161
1162 Returns the ``pulloperation`` created for this pull.
1162 Returns the ``pulloperation`` created for this pull.
1163 """
1163 """
1164 if opargs is None:
1164 if opargs is None:
1165 opargs = {}
1165 opargs = {}
1166 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1166 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1167 streamclonerequested=streamclonerequested, **opargs)
1167 streamclonerequested=streamclonerequested, **opargs)
1168 if pullop.remote.local():
1168 if pullop.remote.local():
1169 missing = set(pullop.remote.requirements) - pullop.repo.supported
1169 missing = set(pullop.remote.requirements) - pullop.repo.supported
1170 if missing:
1170 if missing:
1171 msg = _("required features are not"
1171 msg = _("required features are not"
1172 " supported in the destination:"
1172 " supported in the destination:"
1173 " %s") % (', '.join(sorted(missing)))
1173 " %s") % (', '.join(sorted(missing)))
1174 raise error.Abort(msg)
1174 raise error.Abort(msg)
1175
1175
1176 lock = pullop.repo.lock()
1176 lock = pullop.repo.lock()
1177 try:
1177 try:
1178 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1178 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1179 streamclone.maybeperformlegacystreamclone(pullop)
1179 streamclone.maybeperformlegacystreamclone(pullop)
1180 # This should ideally be in _pullbundle2(). However, it needs to run
1180 # This should ideally be in _pullbundle2(). However, it needs to run
1181 # before discovery to avoid extra work.
1181 # before discovery to avoid extra work.
1182 _maybeapplyclonebundle(pullop)
1182 _maybeapplyclonebundle(pullop)
1183 _pulldiscovery(pullop)
1183 _pulldiscovery(pullop)
1184 if pullop.canusebundle2:
1184 if pullop.canusebundle2:
1185 _pullbundle2(pullop)
1185 _pullbundle2(pullop)
1186 _pullchangeset(pullop)
1186 _pullchangeset(pullop)
1187 _pullphase(pullop)
1187 _pullphase(pullop)
1188 _pullbookmarks(pullop)
1188 _pullbookmarks(pullop)
1189 _pullobsolete(pullop)
1189 _pullobsolete(pullop)
1190 pullop.trmanager.close()
1190 pullop.trmanager.close()
1191 finally:
1191 finally:
1192 pullop.trmanager.release()
1192 pullop.trmanager.release()
1193 lock.release()
1193 lock.release()
1194
1194
1195 return pullop
1195 return pullop
1196
1196
1197 # list of steps to perform discovery before pull
1197 # list of steps to perform discovery before pull
1198 pulldiscoveryorder = []
1198 pulldiscoveryorder = []
1199
1199
1200 # Mapping between step name and function
1200 # Mapping between step name and function
1201 #
1201 #
1202 # This exists to help extensions wrap steps if necessary
1202 # This exists to help extensions wrap steps if necessary
1203 pulldiscoverymapping = {}
1203 pulldiscoverymapping = {}
1204
1204
1205 def pulldiscovery(stepname):
1205 def pulldiscovery(stepname):
1206 """decorator for function performing discovery before pull
1206 """decorator for function performing discovery before pull
1207
1207
1208 The function is added to the step -> function mapping and appended to the
1208 The function is added to the step -> function mapping and appended to the
1209 list of steps. Beware that decorated function will be added in order (this
1209 list of steps. Beware that decorated function will be added in order (this
1210 may matter).
1210 may matter).
1211
1211
1212 You can only use this decorator for a new step, if you want to wrap a step
1212 You can only use this decorator for a new step, if you want to wrap a step
1213 from an extension, change the pulldiscovery dictionary directly."""
1213 from an extension, change the pulldiscovery dictionary directly."""
1214 def dec(func):
1214 def dec(func):
1215 assert stepname not in pulldiscoverymapping
1215 assert stepname not in pulldiscoverymapping
1216 pulldiscoverymapping[stepname] = func
1216 pulldiscoverymapping[stepname] = func
1217 pulldiscoveryorder.append(stepname)
1217 pulldiscoveryorder.append(stepname)
1218 return func
1218 return func
1219 return dec
1219 return dec
1220
1220
1221 def _pulldiscovery(pullop):
1221 def _pulldiscovery(pullop):
1222 """Run all discovery steps"""
1222 """Run all discovery steps"""
1223 for stepname in pulldiscoveryorder:
1223 for stepname in pulldiscoveryorder:
1224 step = pulldiscoverymapping[stepname]
1224 step = pulldiscoverymapping[stepname]
1225 step(pullop)
1225 step(pullop)
1226
1226
1227 @pulldiscovery('b1:bookmarks')
1227 @pulldiscovery('b1:bookmarks')
1228 def _pullbookmarkbundle1(pullop):
1228 def _pullbookmarkbundle1(pullop):
1229 """fetch bookmark data in bundle1 case
1229 """fetch bookmark data in bundle1 case
1230
1230
1231 If not using bundle2, we have to fetch bookmarks before changeset
1231 If not using bundle2, we have to fetch bookmarks before changeset
1232 discovery to reduce the chance and impact of race conditions."""
1232 discovery to reduce the chance and impact of race conditions."""
1233 if pullop.remotebookmarks is not None:
1233 if pullop.remotebookmarks is not None:
1234 return
1234 return
1235 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1235 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1236 # all known bundle2 servers now support listkeys, but lets be nice with
1236 # all known bundle2 servers now support listkeys, but lets be nice with
1237 # new implementation.
1237 # new implementation.
1238 return
1238 return
1239 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1239 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1240
1240
1241
1241
1242 @pulldiscovery('changegroup')
1242 @pulldiscovery('changegroup')
1243 def _pulldiscoverychangegroup(pullop):
1243 def _pulldiscoverychangegroup(pullop):
1244 """discovery phase for the pull
1244 """discovery phase for the pull
1245
1245
1246 Current handle changeset discovery only, will change handle all discovery
1246 Current handle changeset discovery only, will change handle all discovery
1247 at some point."""
1247 at some point."""
1248 tmp = discovery.findcommonincoming(pullop.repo,
1248 tmp = discovery.findcommonincoming(pullop.repo,
1249 pullop.remote,
1249 pullop.remote,
1250 heads=pullop.heads,
1250 heads=pullop.heads,
1251 force=pullop.force)
1251 force=pullop.force)
1252 common, fetch, rheads = tmp
1252 common, fetch, rheads = tmp
1253 nm = pullop.repo.unfiltered().changelog.nodemap
1253 nm = pullop.repo.unfiltered().changelog.nodemap
1254 if fetch and rheads:
1254 if fetch and rheads:
1255 # If a remote heads in filtered locally, lets drop it from the unknown
1255 # If a remote heads in filtered locally, lets drop it from the unknown
1256 # remote heads and put in back in common.
1256 # remote heads and put in back in common.
1257 #
1257 #
1258 # This is a hackish solution to catch most of "common but locally
1258 # This is a hackish solution to catch most of "common but locally
1259 # hidden situation". We do not performs discovery on unfiltered
1259 # hidden situation". We do not performs discovery on unfiltered
1260 # repository because it end up doing a pathological amount of round
1260 # repository because it end up doing a pathological amount of round
1261 # trip for w huge amount of changeset we do not care about.
1261 # trip for w huge amount of changeset we do not care about.
1262 #
1262 #
1263 # If a set of such "common but filtered" changeset exist on the server
1263 # If a set of such "common but filtered" changeset exist on the server
1264 # but are not including a remote heads, we'll not be able to detect it,
1264 # but are not including a remote heads, we'll not be able to detect it,
1265 scommon = set(common)
1265 scommon = set(common)
1266 filteredrheads = []
1266 filteredrheads = []
1267 for n in rheads:
1267 for n in rheads:
1268 if n in nm:
1268 if n in nm:
1269 if n not in scommon:
1269 if n not in scommon:
1270 common.append(n)
1270 common.append(n)
1271 else:
1271 else:
1272 filteredrheads.append(n)
1272 filteredrheads.append(n)
1273 if not filteredrheads:
1273 if not filteredrheads:
1274 fetch = []
1274 fetch = []
1275 rheads = filteredrheads
1275 rheads = filteredrheads
1276 pullop.common = common
1276 pullop.common = common
1277 pullop.fetch = fetch
1277 pullop.fetch = fetch
1278 pullop.rheads = rheads
1278 pullop.rheads = rheads
1279
1279
1280 def _pullbundle2(pullop):
1280 def _pullbundle2(pullop):
1281 """pull data using bundle2
1281 """pull data using bundle2
1282
1282
1283 For now, the only supported data are changegroup."""
1283 For now, the only supported data are changegroup."""
1284 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1284 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1285
1285
1286 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1286 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1287
1287
1288 # pulling changegroup
1288 # pulling changegroup
1289 pullop.stepsdone.add('changegroup')
1289 pullop.stepsdone.add('changegroup')
1290
1290
1291 kwargs['common'] = pullop.common
1291 kwargs['common'] = pullop.common
1292 kwargs['heads'] = pullop.heads or pullop.rheads
1292 kwargs['heads'] = pullop.heads or pullop.rheads
1293 kwargs['cg'] = pullop.fetch
1293 kwargs['cg'] = pullop.fetch
1294 if 'listkeys' in pullop.remotebundle2caps:
1294 if 'listkeys' in pullop.remotebundle2caps:
1295 kwargs['listkeys'] = ['phases']
1295 kwargs['listkeys'] = ['phases']
1296 if pullop.remotebookmarks is None:
1296 if pullop.remotebookmarks is None:
1297 # make sure to always includes bookmark data when migrating
1297 # make sure to always includes bookmark data when migrating
1298 # `hg incoming --bundle` to using this function.
1298 # `hg incoming --bundle` to using this function.
1299 kwargs['listkeys'].append('bookmarks')
1299 kwargs['listkeys'].append('bookmarks')
1300
1300
1301 # If this is a full pull / clone and the server supports the clone bundles
1301 # If this is a full pull / clone and the server supports the clone bundles
1302 # feature, tell the server whether we attempted a clone bundle. The
1302 # feature, tell the server whether we attempted a clone bundle. The
1303 # presence of this flag indicates the client supports clone bundles. This
1303 # presence of this flag indicates the client supports clone bundles. This
1304 # will enable the server to treat clients that support clone bundles
1304 # will enable the server to treat clients that support clone bundles
1305 # differently from those that don't.
1305 # differently from those that don't.
1306 if (pullop.remote.capable('clonebundles')
1306 if (pullop.remote.capable('clonebundles')
1307 and pullop.heads is None and list(pullop.common) == [nullid]):
1307 and pullop.heads is None and list(pullop.common) == [nullid]):
1308 kwargs['cbattempted'] = pullop.clonebundleattempted
1308 kwargs['cbattempted'] = pullop.clonebundleattempted
1309
1309
1310 if streaming:
1310 if streaming:
1311 pullop.repo.ui.status(_('streaming all changes\n'))
1311 pullop.repo.ui.status(_('streaming all changes\n'))
1312 elif not pullop.fetch:
1312 elif not pullop.fetch:
1313 pullop.repo.ui.status(_("no changes found\n"))
1313 pullop.repo.ui.status(_("no changes found\n"))
1314 pullop.cgresult = 0
1314 pullop.cgresult = 0
1315 else:
1315 else:
1316 if pullop.heads is None and list(pullop.common) == [nullid]:
1316 if pullop.heads is None and list(pullop.common) == [nullid]:
1317 pullop.repo.ui.status(_("requesting all changes\n"))
1317 pullop.repo.ui.status(_("requesting all changes\n"))
1318 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1318 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1319 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1319 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1320 if obsolete.commonversion(remoteversions) is not None:
1320 if obsolete.commonversion(remoteversions) is not None:
1321 kwargs['obsmarkers'] = True
1321 kwargs['obsmarkers'] = True
1322 pullop.stepsdone.add('obsmarkers')
1322 pullop.stepsdone.add('obsmarkers')
1323 _pullbundle2extraprepare(pullop, kwargs)
1323 _pullbundle2extraprepare(pullop, kwargs)
1324 bundle = pullop.remote.getbundle('pull', **kwargs)
1324 bundle = pullop.remote.getbundle('pull', **kwargs)
1325 try:
1325 try:
1326 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1326 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1327 except error.BundleValueError as exc:
1327 except error.BundleValueError as exc:
1328 raise error.Abort(_('missing support for %s') % exc)
1328 raise error.Abort(_('missing support for %s') % exc)
1329
1329
1330 if pullop.fetch:
1330 if pullop.fetch:
1331 results = [cg['return'] for cg in op.records['changegroup']]
1331 results = [cg['return'] for cg in op.records['changegroup']]
1332 pullop.cgresult = changegroup.combineresults(results)
1332 pullop.cgresult = changegroup.combineresults(results)
1333
1333
1334 # processing phases change
1334 # processing phases change
1335 for namespace, value in op.records['listkeys']:
1335 for namespace, value in op.records['listkeys']:
1336 if namespace == 'phases':
1336 if namespace == 'phases':
1337 _pullapplyphases(pullop, value)
1337 _pullapplyphases(pullop, value)
1338
1338
1339 # processing bookmark update
1339 # processing bookmark update
1340 for namespace, value in op.records['listkeys']:
1340 for namespace, value in op.records['listkeys']:
1341 if namespace == 'bookmarks':
1341 if namespace == 'bookmarks':
1342 pullop.remotebookmarks = value
1342 pullop.remotebookmarks = value
1343
1343
1344 # bookmark data were either already there or pulled in the bundle
1344 # bookmark data were either already there or pulled in the bundle
1345 if pullop.remotebookmarks is not None:
1345 if pullop.remotebookmarks is not None:
1346 _pullbookmarks(pullop)
1346 _pullbookmarks(pullop)
1347
1347
1348 def _pullbundle2extraprepare(pullop, kwargs):
1348 def _pullbundle2extraprepare(pullop, kwargs):
1349 """hook function so that extensions can extend the getbundle call"""
1349 """hook function so that extensions can extend the getbundle call"""
1350 pass
1350 pass
1351
1351
1352 def _pullchangeset(pullop):
1352 def _pullchangeset(pullop):
1353 """pull changeset from unbundle into the local repo"""
1353 """pull changeset from unbundle into the local repo"""
1354 # We delay the open of the transaction as late as possible so we
1354 # We delay the open of the transaction as late as possible so we
1355 # don't open transaction for nothing or you break future useful
1355 # don't open transaction for nothing or you break future useful
1356 # rollback call
1356 # rollback call
1357 if 'changegroup' in pullop.stepsdone:
1357 if 'changegroup' in pullop.stepsdone:
1358 return
1358 return
1359 pullop.stepsdone.add('changegroup')
1359 pullop.stepsdone.add('changegroup')
1360 if not pullop.fetch:
1360 if not pullop.fetch:
1361 pullop.repo.ui.status(_("no changes found\n"))
1361 pullop.repo.ui.status(_("no changes found\n"))
1362 pullop.cgresult = 0
1362 pullop.cgresult = 0
1363 return
1363 return
1364 pullop.gettransaction()
1364 pullop.gettransaction()
1365 if pullop.heads is None and list(pullop.common) == [nullid]:
1365 if pullop.heads is None and list(pullop.common) == [nullid]:
1366 pullop.repo.ui.status(_("requesting all changes\n"))
1366 pullop.repo.ui.status(_("requesting all changes\n"))
1367 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1367 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1368 # issue1320, avoid a race if remote changed after discovery
1368 # issue1320, avoid a race if remote changed after discovery
1369 pullop.heads = pullop.rheads
1369 pullop.heads = pullop.rheads
1370
1370
1371 if pullop.remote.capable('getbundle'):
1371 if pullop.remote.capable('getbundle'):
1372 # TODO: get bundlecaps from remote
1372 # TODO: get bundlecaps from remote
1373 cg = pullop.remote.getbundle('pull', common=pullop.common,
1373 cg = pullop.remote.getbundle('pull', common=pullop.common,
1374 heads=pullop.heads or pullop.rheads)
1374 heads=pullop.heads or pullop.rheads)
1375 elif pullop.heads is None:
1375 elif pullop.heads is None:
1376 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1376 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1377 elif not pullop.remote.capable('changegroupsubset'):
1377 elif not pullop.remote.capable('changegroupsubset'):
1378 raise error.Abort(_("partial pull cannot be done because "
1378 raise error.Abort(_("partial pull cannot be done because "
1379 "other repository doesn't support "
1379 "other repository doesn't support "
1380 "changegroupsubset."))
1380 "changegroupsubset."))
1381 else:
1381 else:
1382 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1382 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1383 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1383 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1384
1384
1385 def _pullphase(pullop):
1385 def _pullphase(pullop):
1386 # Get remote phases data from remote
1386 # Get remote phases data from remote
1387 if 'phases' in pullop.stepsdone:
1387 if 'phases' in pullop.stepsdone:
1388 return
1388 return
1389 remotephases = pullop.remote.listkeys('phases')
1389 remotephases = pullop.remote.listkeys('phases')
1390 _pullapplyphases(pullop, remotephases)
1390 _pullapplyphases(pullop, remotephases)
1391
1391
1392 def _pullapplyphases(pullop, remotephases):
1392 def _pullapplyphases(pullop, remotephases):
1393 """apply phase movement from observed remote state"""
1393 """apply phase movement from observed remote state"""
1394 if 'phases' in pullop.stepsdone:
1394 if 'phases' in pullop.stepsdone:
1395 return
1395 return
1396 pullop.stepsdone.add('phases')
1396 pullop.stepsdone.add('phases')
1397 publishing = bool(remotephases.get('publishing', False))
1397 publishing = bool(remotephases.get('publishing', False))
1398 if remotephases and not publishing:
1398 if remotephases and not publishing:
1399 # remote is new and unpublishing
1399 # remote is new and unpublishing
1400 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1400 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1401 pullop.pulledsubset,
1401 pullop.pulledsubset,
1402 remotephases)
1402 remotephases)
1403 dheads = pullop.pulledsubset
1403 dheads = pullop.pulledsubset
1404 else:
1404 else:
1405 # Remote is old or publishing all common changesets
1405 # Remote is old or publishing all common changesets
1406 # should be seen as public
1406 # should be seen as public
1407 pheads = pullop.pulledsubset
1407 pheads = pullop.pulledsubset
1408 dheads = []
1408 dheads = []
1409 unfi = pullop.repo.unfiltered()
1409 unfi = pullop.repo.unfiltered()
1410 phase = unfi._phasecache.phase
1410 phase = unfi._phasecache.phase
1411 rev = unfi.changelog.nodemap.get
1411 rev = unfi.changelog.nodemap.get
1412 public = phases.public
1412 public = phases.public
1413 draft = phases.draft
1413 draft = phases.draft
1414
1414
1415 # exclude changesets already public locally and update the others
1415 # exclude changesets already public locally and update the others
1416 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1416 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1417 if pheads:
1417 if pheads:
1418 tr = pullop.gettransaction()
1418 tr = pullop.gettransaction()
1419 phases.advanceboundary(pullop.repo, tr, public, pheads)
1419 phases.advanceboundary(pullop.repo, tr, public, pheads)
1420
1420
1421 # exclude changesets already draft locally and update the others
1421 # exclude changesets already draft locally and update the others
1422 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1422 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1423 if dheads:
1423 if dheads:
1424 tr = pullop.gettransaction()
1424 tr = pullop.gettransaction()
1425 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1425 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1426
1426
1427 def _pullbookmarks(pullop):
1427 def _pullbookmarks(pullop):
1428 """process the remote bookmark information to update the local one"""
1428 """process the remote bookmark information to update the local one"""
1429 if 'bookmarks' in pullop.stepsdone:
1429 if 'bookmarks' in pullop.stepsdone:
1430 return
1430 return
1431 pullop.stepsdone.add('bookmarks')
1431 pullop.stepsdone.add('bookmarks')
1432 repo = pullop.repo
1432 repo = pullop.repo
1433 remotebookmarks = pullop.remotebookmarks
1433 remotebookmarks = pullop.remotebookmarks
1434 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1434 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1435 pullop.remote.url(),
1435 pullop.remote.url(),
1436 pullop.gettransaction,
1436 pullop.gettransaction,
1437 explicit=pullop.explicitbookmarks)
1437 explicit=pullop.explicitbookmarks)
1438
1438
1439 def _pullobsolete(pullop):
1439 def _pullobsolete(pullop):
1440 """utility function to pull obsolete markers from a remote
1440 """utility function to pull obsolete markers from a remote
1441
1441
1442 The `gettransaction` is function that return the pull transaction, creating
1442 The `gettransaction` is function that return the pull transaction, creating
1443 one if necessary. We return the transaction to inform the calling code that
1443 one if necessary. We return the transaction to inform the calling code that
1444 a new transaction have been created (when applicable).
1444 a new transaction have been created (when applicable).
1445
1445
1446 Exists mostly to allow overriding for experimentation purpose"""
1446 Exists mostly to allow overriding for experimentation purpose"""
1447 if 'obsmarkers' in pullop.stepsdone:
1447 if 'obsmarkers' in pullop.stepsdone:
1448 return
1448 return
1449 pullop.stepsdone.add('obsmarkers')
1449 pullop.stepsdone.add('obsmarkers')
1450 tr = None
1450 tr = None
1451 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1451 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1452 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1452 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1453 remoteobs = pullop.remote.listkeys('obsolete')
1453 remoteobs = pullop.remote.listkeys('obsolete')
1454 if 'dump0' in remoteobs:
1454 if 'dump0' in remoteobs:
1455 tr = pullop.gettransaction()
1455 tr = pullop.gettransaction()
1456 markers = []
1456 markers = []
1457 for key in sorted(remoteobs, reverse=True):
1457 for key in sorted(remoteobs, reverse=True):
1458 if key.startswith('dump'):
1458 if key.startswith('dump'):
1459 data = base85.b85decode(remoteobs[key])
1459 data = base85.b85decode(remoteobs[key])
1460 version, newmarks = obsolete._readmarkers(data)
1460 version, newmarks = obsolete._readmarkers(data)
1461 markers += newmarks
1461 markers += newmarks
1462 if markers:
1462 if markers:
1463 pullop.repo.obsstore.add(tr, markers)
1463 pullop.repo.obsstore.add(tr, markers)
1464 pullop.repo.invalidatevolatilesets()
1464 pullop.repo.invalidatevolatilesets()
1465 return tr
1465 return tr
1466
1466
1467 def caps20to10(repo):
1467 def caps20to10(repo):
1468 """return a set with appropriate options to use bundle20 during getbundle"""
1468 """return a set with appropriate options to use bundle20 during getbundle"""
1469 caps = set(['HG20'])
1469 caps = set(['HG20'])
1470 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1470 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1471 caps.add('bundle2=' + urlreq.quote(capsblob))
1471 caps.add('bundle2=' + urlreq.quote(capsblob))
1472 return caps
1472 return caps
1473
1473
1474 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1474 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1475 getbundle2partsorder = []
1475 getbundle2partsorder = []
1476
1476
1477 # Mapping between step name and function
1477 # Mapping between step name and function
1478 #
1478 #
1479 # This exists to help extensions wrap steps if necessary
1479 # This exists to help extensions wrap steps if necessary
1480 getbundle2partsmapping = {}
1480 getbundle2partsmapping = {}
1481
1481
1482 def getbundle2partsgenerator(stepname, idx=None):
1482 def getbundle2partsgenerator(stepname, idx=None):
1483 """decorator for function generating bundle2 part for getbundle
1483 """decorator for function generating bundle2 part for getbundle
1484
1484
1485 The function is added to the step -> function mapping and appended to the
1485 The function is added to the step -> function mapping and appended to the
1486 list of steps. Beware that decorated functions will be added in order
1486 list of steps. Beware that decorated functions will be added in order
1487 (this may matter).
1487 (this may matter).
1488
1488
1489 You can only use this decorator for new steps, if you want to wrap a step
1489 You can only use this decorator for new steps, if you want to wrap a step
1490 from an extension, attack the getbundle2partsmapping dictionary directly."""
1490 from an extension, attack the getbundle2partsmapping dictionary directly."""
1491 def dec(func):
1491 def dec(func):
1492 assert stepname not in getbundle2partsmapping
1492 assert stepname not in getbundle2partsmapping
1493 getbundle2partsmapping[stepname] = func
1493 getbundle2partsmapping[stepname] = func
1494 if idx is None:
1494 if idx is None:
1495 getbundle2partsorder.append(stepname)
1495 getbundle2partsorder.append(stepname)
1496 else:
1496 else:
1497 getbundle2partsorder.insert(idx, stepname)
1497 getbundle2partsorder.insert(idx, stepname)
1498 return func
1498 return func
1499 return dec
1499 return dec
1500
1500
1501 def bundle2requested(bundlecaps):
1501 def bundle2requested(bundlecaps):
1502 if bundlecaps is not None:
1502 if bundlecaps is not None:
1503 return any(cap.startswith('HG2') for cap in bundlecaps)
1503 return any(cap.startswith('HG2') for cap in bundlecaps)
1504 return False
1504 return False
1505
1505
1506 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1506 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1507 **kwargs):
1507 **kwargs):
1508 """return a full bundle (with potentially multiple kind of parts)
1508 """return a full bundle (with potentially multiple kind of parts)
1509
1509
1510 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1510 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1511 passed. For now, the bundle can contain only changegroup, but this will
1511 passed. For now, the bundle can contain only changegroup, but this will
1512 changes when more part type will be available for bundle2.
1512 changes when more part type will be available for bundle2.
1513
1513
1514 This is different from changegroup.getchangegroup that only returns an HG10
1514 This is different from changegroup.getchangegroup that only returns an HG10
1515 changegroup bundle. They may eventually get reunited in the future when we
1515 changegroup bundle. They may eventually get reunited in the future when we
1516 have a clearer idea of the API we what to query different data.
1516 have a clearer idea of the API we what to query different data.
1517
1517
1518 The implementation is at a very early stage and will get massive rework
1518 The implementation is at a very early stage and will get massive rework
1519 when the API of bundle is refined.
1519 when the API of bundle is refined.
1520 """
1520 """
1521 usebundle2 = bundle2requested(bundlecaps)
1521 usebundle2 = bundle2requested(bundlecaps)
1522 # bundle10 case
1522 # bundle10 case
1523 if not usebundle2:
1523 if not usebundle2:
1524 if bundlecaps and not kwargs.get('cg', True):
1524 if bundlecaps and not kwargs.get('cg', True):
1525 raise ValueError(_('request for bundle10 must include changegroup'))
1525 raise ValueError(_('request for bundle10 must include changegroup'))
1526
1526
1527 if kwargs:
1527 if kwargs:
1528 raise ValueError(_('unsupported getbundle arguments: %s')
1528 raise ValueError(_('unsupported getbundle arguments: %s')
1529 % ', '.join(sorted(kwargs.keys())))
1529 % ', '.join(sorted(kwargs.keys())))
1530 return changegroup.getchangegroup(repo, source, heads=heads,
1530 return changegroup.getchangegroup(repo, source, heads=heads,
1531 common=common, bundlecaps=bundlecaps)
1531 common=common, bundlecaps=bundlecaps)
1532
1532
1533 # bundle20 case
1533 # bundle20 case
1534 b2caps = {}
1534 b2caps = {}
1535 for bcaps in bundlecaps:
1535 for bcaps in bundlecaps:
1536 if bcaps.startswith('bundle2='):
1536 if bcaps.startswith('bundle2='):
1537 blob = urlreq.unquote(bcaps[len('bundle2='):])
1537 blob = urlreq.unquote(bcaps[len('bundle2='):])
1538 b2caps.update(bundle2.decodecaps(blob))
1538 b2caps.update(bundle2.decodecaps(blob))
1539 bundler = bundle2.bundle20(repo.ui, b2caps)
1539 bundler = bundle2.bundle20(repo.ui, b2caps)
1540
1540
1541 kwargs['heads'] = heads
1541 kwargs['heads'] = heads
1542 kwargs['common'] = common
1542 kwargs['common'] = common
1543
1543
1544 for name in getbundle2partsorder:
1544 for name in getbundle2partsorder:
1545 func = getbundle2partsmapping[name]
1545 func = getbundle2partsmapping[name]
1546 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1546 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1547 **kwargs)
1547 **kwargs)
1548
1548
1549 return util.chunkbuffer(bundler.getchunks())
1549 return util.chunkbuffer(bundler.getchunks())
1550
1550
1551 @getbundle2partsgenerator('changegroup')
1551 @getbundle2partsgenerator('changegroup')
1552 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1552 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1553 b2caps=None, heads=None, common=None, **kwargs):
1553 b2caps=None, heads=None, common=None, **kwargs):
1554 """add a changegroup part to the requested bundle"""
1554 """add a changegroup part to the requested bundle"""
1555 cg = None
1555 cg = None
1556 if kwargs.get('cg', True):
1556 if kwargs.get('cg', True):
1557 # build changegroup bundle here.
1557 # build changegroup bundle here.
1558 version = '01'
1558 version = '01'
1559 cgversions = b2caps.get('changegroup')
1559 cgversions = b2caps.get('changegroup')
1560 if cgversions: # 3.1 and 3.2 ship with an empty value
1560 if cgversions: # 3.1 and 3.2 ship with an empty value
1561 cgversions = [v for v in cgversions
1561 cgversions = [v for v in cgversions
1562 if v in changegroup.supportedoutgoingversions(repo)]
1562 if v in changegroup.supportedoutgoingversions(repo)]
1563 if not cgversions:
1563 if not cgversions:
1564 raise ValueError(_('no common changegroup version'))
1564 raise ValueError(_('no common changegroup version'))
1565 version = max(cgversions)
1565 version = max(cgversions)
1566 outgoing = changegroup.computeoutgoing(repo, heads, common)
1566 outgoing = changegroup.computeoutgoing(repo, heads, common)
1567 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1567 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1568 bundlecaps=bundlecaps,
1568 bundlecaps=bundlecaps,
1569 version=version)
1569 version=version)
1570
1570
1571 if cg:
1571 if cg:
1572 part = bundler.newpart('changegroup', data=cg)
1572 part = bundler.newpart('changegroup', data=cg)
1573 if cgversions:
1573 if cgversions:
1574 part.addparam('version', version)
1574 part.addparam('version', version)
1575 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1575 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1576 if 'treemanifest' in repo.requirements:
1576 if 'treemanifest' in repo.requirements:
1577 part.addparam('treemanifest', '1')
1577 part.addparam('treemanifest', '1')
1578
1578
1579 @getbundle2partsgenerator('listkeys')
1579 @getbundle2partsgenerator('listkeys')
1580 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1580 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1581 b2caps=None, **kwargs):
1581 b2caps=None, **kwargs):
1582 """add parts containing listkeys namespaces to the requested bundle"""
1582 """add parts containing listkeys namespaces to the requested bundle"""
1583 listkeys = kwargs.get('listkeys', ())
1583 listkeys = kwargs.get('listkeys', ())
1584 for namespace in listkeys:
1584 for namespace in listkeys:
1585 part = bundler.newpart('listkeys')
1585 part = bundler.newpart('listkeys')
1586 part.addparam('namespace', namespace)
1586 part.addparam('namespace', namespace)
1587 keys = repo.listkeys(namespace).items()
1587 keys = repo.listkeys(namespace).items()
1588 part.data = pushkey.encodekeys(keys)
1588 part.data = pushkey.encodekeys(keys)
1589
1589
1590 @getbundle2partsgenerator('obsmarkers')
1590 @getbundle2partsgenerator('obsmarkers')
1591 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1591 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1592 b2caps=None, heads=None, **kwargs):
1592 b2caps=None, heads=None, **kwargs):
1593 """add an obsolescence markers part to the requested bundle"""
1593 """add an obsolescence markers part to the requested bundle"""
1594 if kwargs.get('obsmarkers', False):
1594 if kwargs.get('obsmarkers', False):
1595 if heads is None:
1595 if heads is None:
1596 heads = repo.heads()
1596 heads = repo.heads()
1597 subset = [c.node() for c in repo.set('::%ln', heads)]
1597 subset = [c.node() for c in repo.set('::%ln', heads)]
1598 markers = repo.obsstore.relevantmarkers(subset)
1598 markers = repo.obsstore.relevantmarkers(subset)
1599 markers = sorted(markers)
1599 markers = sorted(markers)
1600 buildobsmarkerspart(bundler, markers)
1600 buildobsmarkerspart(bundler, markers)
1601
1601
1602 @getbundle2partsgenerator('hgtagsfnodes')
1602 @getbundle2partsgenerator('hgtagsfnodes')
1603 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1603 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1604 b2caps=None, heads=None, common=None,
1604 b2caps=None, heads=None, common=None,
1605 **kwargs):
1605 **kwargs):
1606 """Transfer the .hgtags filenodes mapping.
1606 """Transfer the .hgtags filenodes mapping.
1607
1607
1608 Only values for heads in this bundle will be transferred.
1608 Only values for heads in this bundle will be transferred.
1609
1609
1610 The part data consists of pairs of 20 byte changeset node and .hgtags
1610 The part data consists of pairs of 20 byte changeset node and .hgtags
1611 filenodes raw values.
1611 filenodes raw values.
1612 """
1612 """
1613 # Don't send unless:
1613 # Don't send unless:
1614 # - changeset are being exchanged,
1614 # - changeset are being exchanged,
1615 # - the client supports it.
1615 # - the client supports it.
1616 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1616 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1617 return
1617 return
1618
1618
1619 outgoing = changegroup.computeoutgoing(repo, heads, common)
1619 outgoing = changegroup.computeoutgoing(repo, heads, common)
1620
1620
1621 if not outgoing.missingheads:
1621 if not outgoing.missingheads:
1622 return
1622 return
1623
1623
1624 cache = tags.hgtagsfnodescache(repo.unfiltered())
1624 cache = tags.hgtagsfnodescache(repo.unfiltered())
1625 chunks = []
1625 chunks = []
1626
1626
1627 # .hgtags fnodes are only relevant for head changesets. While we could
1627 # .hgtags fnodes are only relevant for head changesets. While we could
1628 # transfer values for all known nodes, there will likely be little to
1628 # transfer values for all known nodes, there will likely be little to
1629 # no benefit.
1629 # no benefit.
1630 #
1630 #
1631 # We don't bother using a generator to produce output data because
1631 # We don't bother using a generator to produce output data because
1632 # a) we only have 40 bytes per head and even esoteric numbers of heads
1632 # a) we only have 40 bytes per head and even esoteric numbers of heads
1633 # consume little memory (1M heads is 40MB) b) we don't want to send the
1633 # consume little memory (1M heads is 40MB) b) we don't want to send the
1634 # part if we don't have entries and knowing if we have entries requires
1634 # part if we don't have entries and knowing if we have entries requires
1635 # cache lookups.
1635 # cache lookups.
1636 for node in outgoing.missingheads:
1636 for node in outgoing.missingheads:
1637 # Don't compute missing, as this may slow down serving.
1637 # Don't compute missing, as this may slow down serving.
1638 fnode = cache.getfnode(node, computemissing=False)
1638 fnode = cache.getfnode(node, computemissing=False)
1639 if fnode is not None:
1639 if fnode is not None:
1640 chunks.extend([node, fnode])
1640 chunks.extend([node, fnode])
1641
1641
1642 if chunks:
1642 if chunks:
1643 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1643 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1644
1644
1645 def check_heads(repo, their_heads, context):
1645 def check_heads(repo, their_heads, context):
1646 """check if the heads of a repo have been modified
1646 """check if the heads of a repo have been modified
1647
1647
1648 Used by peer for unbundling.
1648 Used by peer for unbundling.
1649 """
1649 """
1650 heads = repo.heads()
1650 heads = repo.heads()
1651 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1651 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1652 if not (their_heads == ['force'] or their_heads == heads or
1652 if not (their_heads == ['force'] or their_heads == heads or
1653 their_heads == ['hashed', heads_hash]):
1653 their_heads == ['hashed', heads_hash]):
1654 # someone else committed/pushed/unbundled while we
1654 # someone else committed/pushed/unbundled while we
1655 # were transferring data
1655 # were transferring data
1656 raise error.PushRaced('repository changed while %s - '
1656 raise error.PushRaced('repository changed while %s - '
1657 'please try again' % context)
1657 'please try again' % context)
1658
1658
1659 def unbundle(repo, cg, heads, source, url):
1659 def unbundle(repo, cg, heads, source, url):
1660 """Apply a bundle to a repo.
1660 """Apply a bundle to a repo.
1661
1661
1662 this function makes sure the repo is locked during the application and have
1662 this function makes sure the repo is locked during the application and have
1663 mechanism to check that no push race occurred between the creation of the
1663 mechanism to check that no push race occurred between the creation of the
1664 bundle and its application.
1664 bundle and its application.
1665
1665
1666 If the push was raced as PushRaced exception is raised."""
1666 If the push was raced as PushRaced exception is raised."""
1667 r = 0
1667 r = 0
1668 # need a transaction when processing a bundle2 stream
1668 # need a transaction when processing a bundle2 stream
1669 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1669 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1670 lockandtr = [None, None, None]
1670 lockandtr = [None, None, None]
1671 recordout = None
1671 recordout = None
1672 # quick fix for output mismatch with bundle2 in 3.4
1672 # quick fix for output mismatch with bundle2 in 3.4
1673 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1673 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1674 False)
1674 False)
1675 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1675 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1676 captureoutput = True
1676 captureoutput = True
1677 try:
1677 try:
1678 check_heads(repo, heads, 'uploading changes')
1678 check_heads(repo, heads, 'uploading changes')
1679 # push can proceed
1679 # push can proceed
1680 if util.safehasattr(cg, 'params'):
1680 if util.safehasattr(cg, 'params'):
1681 r = None
1681 r = None
1682 try:
1682 try:
1683 def gettransaction():
1683 def gettransaction():
1684 if not lockandtr[2]:
1684 if not lockandtr[2]:
1685 lockandtr[0] = repo.wlock()
1685 lockandtr[0] = repo.wlock()
1686 lockandtr[1] = repo.lock()
1686 lockandtr[1] = repo.lock()
1687 lockandtr[2] = repo.transaction(source)
1687 lockandtr[2] = repo.transaction(source)
1688 lockandtr[2].hookargs['source'] = source
1688 lockandtr[2].hookargs['source'] = source
1689 lockandtr[2].hookargs['url'] = url
1689 lockandtr[2].hookargs['url'] = url
1690 lockandtr[2].hookargs['bundle2'] = '1'
1690 lockandtr[2].hookargs['bundle2'] = '1'
1691 return lockandtr[2]
1691 return lockandtr[2]
1692
1692
1693 # Do greedy locking by default until we're satisfied with lazy
1693 # Do greedy locking by default until we're satisfied with lazy
1694 # locking.
1694 # locking.
1695 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1695 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1696 gettransaction()
1696 gettransaction()
1697
1697
1698 op = bundle2.bundleoperation(repo, gettransaction,
1698 op = bundle2.bundleoperation(repo, gettransaction,
1699 captureoutput=captureoutput)
1699 captureoutput=captureoutput)
1700 try:
1700 try:
1701 op = bundle2.processbundle(repo, cg, op=op)
1701 op = bundle2.processbundle(repo, cg, op=op)
1702 finally:
1702 finally:
1703 r = op.reply
1703 r = op.reply
1704 if captureoutput and r is not None:
1704 if captureoutput and r is not None:
1705 repo.ui.pushbuffer(error=True, subproc=True)
1705 repo.ui.pushbuffer(error=True, subproc=True)
1706 def recordout(output):
1706 def recordout(output):
1707 r.newpart('output', data=output, mandatory=False)
1707 r.newpart('output', data=output, mandatory=False)
1708 if lockandtr[2] is not None:
1708 if lockandtr[2] is not None:
1709 lockandtr[2].close()
1709 lockandtr[2].close()
1710 except BaseException as exc:
1710 except BaseException as exc:
1711 exc.duringunbundle2 = True
1711 exc.duringunbundle2 = True
1712 if captureoutput and r is not None:
1712 if captureoutput and r is not None:
1713 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1713 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1714 def recordout(output):
1714 def recordout(output):
1715 part = bundle2.bundlepart('output', data=output,
1715 part = bundle2.bundlepart('output', data=output,
1716 mandatory=False)
1716 mandatory=False)
1717 parts.append(part)
1717 parts.append(part)
1718 raise
1718 raise
1719 else:
1719 else:
1720 lockandtr[1] = repo.lock()
1720 lockandtr[1] = repo.lock()
1721 r = cg.apply(repo, source, url)
1721 r = cg.apply(repo, source, url)
1722 finally:
1722 finally:
1723 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1723 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1724 if recordout is not None:
1724 if recordout is not None:
1725 recordout(repo.ui.popbuffer())
1725 recordout(repo.ui.popbuffer())
1726 return r
1726 return r
1727
1727
1728 def _maybeapplyclonebundle(pullop):
1728 def _maybeapplyclonebundle(pullop):
1729 """Apply a clone bundle from a remote, if possible."""
1729 """Apply a clone bundle from a remote, if possible."""
1730
1730
1731 repo = pullop.repo
1731 repo = pullop.repo
1732 remote = pullop.remote
1732 remote = pullop.remote
1733
1733
1734 if not repo.ui.configbool('ui', 'clonebundles', True):
1734 if not repo.ui.configbool('ui', 'clonebundles', True):
1735 return
1735 return
1736
1736
1737 # Only run if local repo is empty.
1737 # Only run if local repo is empty.
1738 if len(repo):
1738 if len(repo):
1739 return
1739 return
1740
1740
1741 if pullop.heads:
1741 if pullop.heads:
1742 return
1742 return
1743
1743
1744 if not remote.capable('clonebundles'):
1744 if not remote.capable('clonebundles'):
1745 return
1745 return
1746
1746
1747 res = remote._call('clonebundles')
1747 res = remote._call('clonebundles')
1748
1748
1749 # If we call the wire protocol command, that's good enough to record the
1749 # If we call the wire protocol command, that's good enough to record the
1750 # attempt.
1750 # attempt.
1751 pullop.clonebundleattempted = True
1751 pullop.clonebundleattempted = True
1752
1752
1753 entries = parseclonebundlesmanifest(repo, res)
1753 entries = parseclonebundlesmanifest(repo, res)
1754 if not entries:
1754 if not entries:
1755 repo.ui.note(_('no clone bundles available on remote; '
1755 repo.ui.note(_('no clone bundles available on remote; '
1756 'falling back to regular clone\n'))
1756 'falling back to regular clone\n'))
1757 return
1757 return
1758
1758
1759 entries = filterclonebundleentries(repo, entries)
1759 entries = filterclonebundleentries(repo, entries)
1760 if not entries:
1760 if not entries:
1761 # There is a thundering herd concern here. However, if a server
1761 # There is a thundering herd concern here. However, if a server
1762 # operator doesn't advertise bundles appropriate for its clients,
1762 # operator doesn't advertise bundles appropriate for its clients,
1763 # they deserve what's coming. Furthermore, from a client's
1763 # they deserve what's coming. Furthermore, from a client's
1764 # perspective, no automatic fallback would mean not being able to
1764 # perspective, no automatic fallback would mean not being able to
1765 # clone!
1765 # clone!
1766 repo.ui.warn(_('no compatible clone bundles available on server; '
1766 repo.ui.warn(_('no compatible clone bundles available on server; '
1767 'falling back to regular clone\n'))
1767 'falling back to regular clone\n'))
1768 repo.ui.warn(_('(you may want to report this to the server '
1768 repo.ui.warn(_('(you may want to report this to the server '
1769 'operator)\n'))
1769 'operator)\n'))
1770 return
1770 return
1771
1771
1772 entries = sortclonebundleentries(repo.ui, entries)
1772 entries = sortclonebundleentries(repo.ui, entries)
1773
1773
1774 url = entries[0]['URL']
1774 url = entries[0]['URL']
1775 repo.ui.status(_('applying clone bundle from %s\n') % url)
1775 repo.ui.status(_('applying clone bundle from %s\n') % url)
1776 if trypullbundlefromurl(repo.ui, repo, url):
1776 if trypullbundlefromurl(repo.ui, repo, url):
1777 repo.ui.status(_('finished applying clone bundle\n'))
1777 repo.ui.status(_('finished applying clone bundle\n'))
1778 # Bundle failed.
1778 # Bundle failed.
1779 #
1779 #
1780 # We abort by default to avoid the thundering herd of
1780 # We abort by default to avoid the thundering herd of
1781 # clients flooding a server that was expecting expensive
1781 # clients flooding a server that was expecting expensive
1782 # clone load to be offloaded.
1782 # clone load to be offloaded.
1783 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1783 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1784 repo.ui.warn(_('falling back to normal clone\n'))
1784 repo.ui.warn(_('falling back to normal clone\n'))
1785 else:
1785 else:
1786 raise error.Abort(_('error applying bundle'),
1786 raise error.Abort(_('error applying bundle'),
1787 hint=_('if this error persists, consider contacting '
1787 hint=_('if this error persists, consider contacting '
1788 'the server operator or disable clone '
1788 'the server operator or disable clone '
1789 'bundles via '
1789 'bundles via '
1790 '"--config ui.clonebundles=false"'))
1790 '"--config ui.clonebundles=false"'))
1791
1791
1792 def parseclonebundlesmanifest(repo, s):
1792 def parseclonebundlesmanifest(repo, s):
1793 """Parses the raw text of a clone bundles manifest.
1793 """Parses the raw text of a clone bundles manifest.
1794
1794
1795 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1795 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1796 to the URL and other keys are the attributes for the entry.
1796 to the URL and other keys are the attributes for the entry.
1797 """
1797 """
1798 m = []
1798 m = []
1799 for line in s.splitlines():
1799 for line in s.splitlines():
1800 fields = line.split()
1800 fields = line.split()
1801 if not fields:
1801 if not fields:
1802 continue
1802 continue
1803 attrs = {'URL': fields[0]}
1803 attrs = {'URL': fields[0]}
1804 for rawattr in fields[1:]:
1804 for rawattr in fields[1:]:
1805 key, value = rawattr.split('=', 1)
1805 key, value = rawattr.split('=', 1)
1806 key = urlreq.unquote(key)
1806 key = urlreq.unquote(key)
1807 value = urlreq.unquote(value)
1807 value = urlreq.unquote(value)
1808 attrs[key] = value
1808 attrs[key] = value
1809
1809
1810 # Parse BUNDLESPEC into components. This makes client-side
1810 # Parse BUNDLESPEC into components. This makes client-side
1811 # preferences easier to specify since you can prefer a single
1811 # preferences easier to specify since you can prefer a single
1812 # component of the BUNDLESPEC.
1812 # component of the BUNDLESPEC.
1813 if key == 'BUNDLESPEC':
1813 if key == 'BUNDLESPEC':
1814 try:
1814 try:
1815 comp, version, params = parsebundlespec(repo, value,
1815 comp, version, params = parsebundlespec(repo, value,
1816 externalnames=True)
1816 externalnames=True)
1817 attrs['COMPRESSION'] = comp
1817 attrs['COMPRESSION'] = comp
1818 attrs['VERSION'] = version
1818 attrs['VERSION'] = version
1819 except error.InvalidBundleSpecification:
1819 except error.InvalidBundleSpecification:
1820 pass
1820 pass
1821 except error.UnsupportedBundleSpecification:
1821 except error.UnsupportedBundleSpecification:
1822 pass
1822 pass
1823
1823
1824 m.append(attrs)
1824 m.append(attrs)
1825
1825
1826 return m
1826 return m
1827
1827
1828 def filterclonebundleentries(repo, entries):
1828 def filterclonebundleentries(repo, entries):
1829 """Remove incompatible clone bundle manifest entries.
1829 """Remove incompatible clone bundle manifest entries.
1830
1830
1831 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1831 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1832 and returns a new list consisting of only the entries that this client
1832 and returns a new list consisting of only the entries that this client
1833 should be able to apply.
1833 should be able to apply.
1834
1834
1835 There is no guarantee we'll be able to apply all returned entries because
1835 There is no guarantee we'll be able to apply all returned entries because
1836 the metadata we use to filter on may be missing or wrong.
1836 the metadata we use to filter on may be missing or wrong.
1837 """
1837 """
1838 newentries = []
1838 newentries = []
1839 for entry in entries:
1839 for entry in entries:
1840 spec = entry.get('BUNDLESPEC')
1840 spec = entry.get('BUNDLESPEC')
1841 if spec:
1841 if spec:
1842 try:
1842 try:
1843 parsebundlespec(repo, spec, strict=True)
1843 parsebundlespec(repo, spec, strict=True)
1844 except error.InvalidBundleSpecification as e:
1844 except error.InvalidBundleSpecification as e:
1845 repo.ui.debug(str(e) + '\n')
1845 repo.ui.debug(str(e) + '\n')
1846 continue
1846 continue
1847 except error.UnsupportedBundleSpecification as e:
1847 except error.UnsupportedBundleSpecification as e:
1848 repo.ui.debug('filtering %s because unsupported bundle '
1848 repo.ui.debug('filtering %s because unsupported bundle '
1849 'spec: %s\n' % (entry['URL'], str(e)))
1849 'spec: %s\n' % (entry['URL'], str(e)))
1850 continue
1850 continue
1851
1851
1852 if 'REQUIRESNI' in entry and not sslutil.hassni:
1852 if 'REQUIRESNI' in entry and not sslutil.hassni:
1853 repo.ui.debug('filtering %s because SNI not supported\n' %
1853 repo.ui.debug('filtering %s because SNI not supported\n' %
1854 entry['URL'])
1854 entry['URL'])
1855 continue
1855 continue
1856
1856
1857 newentries.append(entry)
1857 newentries.append(entry)
1858
1858
1859 return newentries
1859 return newentries
1860
1860
1861 def sortclonebundleentries(ui, entries):
1861 def sortclonebundleentries(ui, entries):
1862 prefers = ui.configlist('ui', 'clonebundleprefers', default=[])
1862 prefers = ui.configlist('ui', 'clonebundleprefers', default=[])
1863 if not prefers:
1863 if not prefers:
1864 return list(entries)
1864 return list(entries)
1865
1865
1866 prefers = [p.split('=', 1) for p in prefers]
1866 prefers = [p.split('=', 1) for p in prefers]
1867
1867
1868 # Our sort function.
1868 # Our sort function.
1869 def compareentry(a, b):
1869 def compareentry(a, b):
1870 for prefkey, prefvalue in prefers:
1870 for prefkey, prefvalue in prefers:
1871 avalue = a.get(prefkey)
1871 avalue = a.get(prefkey)
1872 bvalue = b.get(prefkey)
1872 bvalue = b.get(prefkey)
1873
1873
1874 # Special case for b missing attribute and a matches exactly.
1874 # Special case for b missing attribute and a matches exactly.
1875 if avalue is not None and bvalue is None and avalue == prefvalue:
1875 if avalue is not None and bvalue is None and avalue == prefvalue:
1876 return -1
1876 return -1
1877
1877
1878 # Special case for a missing attribute and b matches exactly.
1878 # Special case for a missing attribute and b matches exactly.
1879 if bvalue is not None and avalue is None and bvalue == prefvalue:
1879 if bvalue is not None and avalue is None and bvalue == prefvalue:
1880 return 1
1880 return 1
1881
1881
1882 # We can't compare unless attribute present on both.
1882 # We can't compare unless attribute present on both.
1883 if avalue is None or bvalue is None:
1883 if avalue is None or bvalue is None:
1884 continue
1884 continue
1885
1885
1886 # Same values should fall back to next attribute.
1886 # Same values should fall back to next attribute.
1887 if avalue == bvalue:
1887 if avalue == bvalue:
1888 continue
1888 continue
1889
1889
1890 # Exact matches come first.
1890 # Exact matches come first.
1891 if avalue == prefvalue:
1891 if avalue == prefvalue:
1892 return -1
1892 return -1
1893 if bvalue == prefvalue:
1893 if bvalue == prefvalue:
1894 return 1
1894 return 1
1895
1895
1896 # Fall back to next attribute.
1896 # Fall back to next attribute.
1897 continue
1897 continue
1898
1898
1899 # If we got here we couldn't sort by attributes and prefers. Fall
1899 # If we got here we couldn't sort by attributes and prefers. Fall
1900 # back to index order.
1900 # back to index order.
1901 return 0
1901 return 0
1902
1902
1903 return sorted(entries, cmp=compareentry)
1903 return sorted(entries, cmp=compareentry)
1904
1904
1905 def trypullbundlefromurl(ui, repo, url):
1905 def trypullbundlefromurl(ui, repo, url):
1906 """Attempt to apply a bundle from a URL."""
1906 """Attempt to apply a bundle from a URL."""
1907 lock = repo.lock()
1907 lock = repo.lock()
1908 try:
1908 try:
1909 tr = repo.transaction('bundleurl')
1909 tr = repo.transaction('bundleurl')
1910 try:
1910 try:
1911 try:
1911 try:
1912 fh = urlmod.open(ui, url)
1912 fh = urlmod.open(ui, url)
1913 cg = readbundle(ui, fh, 'stream')
1913 cg = readbundle(ui, fh, 'stream')
1914
1914
1915 if isinstance(cg, bundle2.unbundle20):
1915 if isinstance(cg, bundle2.unbundle20):
1916 bundle2.processbundle(repo, cg, lambda: tr)
1916 bundle2.processbundle(repo, cg, lambda: tr)
1917 elif isinstance(cg, streamclone.streamcloneapplier):
1917 elif isinstance(cg, streamclone.streamcloneapplier):
1918 cg.apply(repo)
1918 cg.apply(repo)
1919 else:
1919 else:
1920 cg.apply(repo, 'clonebundles', url)
1920 cg.apply(repo, 'clonebundles', url)
1921 tr.close()
1921 tr.close()
1922 return True
1922 return True
1923 except urlerr.httperror as e:
1923 except urlerr.httperror as e:
1924 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1924 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1925 except urlerr.urlerror as e:
1925 except urlerr.urlerror as e:
1926 ui.warn(_('error fetching bundle: %s\n') % e.reason[1])
1926 ui.warn(_('error fetching bundle: %s\n') % e.reason[1])
1927
1927
1928 return False
1928 return False
1929 finally:
1929 finally:
1930 tr.release()
1930 tr.release()
1931 finally:
1931 finally:
1932 lock.release()
1932 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now