##// END OF EJS Templates
exchange: use cg?unpacker.apply() instead of changegroup.addchangegroup()
Augie Fackler -
r26700:dbc3d945 default
parent child Browse files
Show More
@@ -1,1801 +1,1800
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno, urllib, urllib2
10 import errno, urllib, urllib2
11 import util, scmutil, changegroup, base85, error
11 import util, scmutil, changegroup, base85, error
12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 import lock as lockmod
13 import lock as lockmod
14 import streamclone
14 import streamclone
15 import sslutil
15 import sslutil
16 import tags
16 import tags
17 import url as urlmod
17 import url as urlmod
18
18
19 # Maps bundle compression human names to internal representation.
19 # Maps bundle compression human names to internal representation.
20 _bundlespeccompressions = {'none': None,
20 _bundlespeccompressions = {'none': None,
21 'bzip2': 'BZ',
21 'bzip2': 'BZ',
22 'gzip': 'GZ',
22 'gzip': 'GZ',
23 }
23 }
24
24
25 # Maps bundle version human names to changegroup versions.
25 # Maps bundle version human names to changegroup versions.
26 _bundlespeccgversions = {'v1': '01',
26 _bundlespeccgversions = {'v1': '01',
27 'v2': '02',
27 'v2': '02',
28 'bundle2': '02', #legacy
28 'bundle2': '02', #legacy
29 }
29 }
30
30
31 def parsebundlespec(repo, spec, strict=True, externalnames=False):
31 def parsebundlespec(repo, spec, strict=True, externalnames=False):
32 """Parse a bundle string specification into parts.
32 """Parse a bundle string specification into parts.
33
33
34 Bundle specifications denote a well-defined bundle/exchange format.
34 Bundle specifications denote a well-defined bundle/exchange format.
35 The content of a given specification should not change over time in
35 The content of a given specification should not change over time in
36 order to ensure that bundles produced by a newer version of Mercurial are
36 order to ensure that bundles produced by a newer version of Mercurial are
37 readable from an older version.
37 readable from an older version.
38
38
39 The string currently has the form:
39 The string currently has the form:
40
40
41 <compression>-<type>
41 <compression>-<type>
42
42
43 Where <compression> is one of the supported compression formats
43 Where <compression> is one of the supported compression formats
44 and <type> is (currently) a version string.
44 and <type> is (currently) a version string.
45
45
46 If ``strict`` is True (the default) <compression> is required. Otherwise,
46 If ``strict`` is True (the default) <compression> is required. Otherwise,
47 it is optional.
47 it is optional.
48
48
49 If ``externalnames`` is False (the default), the human-centric names will
49 If ``externalnames`` is False (the default), the human-centric names will
50 be converted to their internal representation.
50 be converted to their internal representation.
51
51
52 Returns a 2-tuple of (compression, version). Compression will be ``None``
52 Returns a 2-tuple of (compression, version). Compression will be ``None``
53 if not in strict mode and a compression isn't defined.
53 if not in strict mode and a compression isn't defined.
54
54
55 An ``InvalidBundleSpecification`` is raised when the specification is
55 An ``InvalidBundleSpecification`` is raised when the specification is
56 not syntactically well formed.
56 not syntactically well formed.
57
57
58 An ``UnsupportedBundleSpecification`` is raised when the compression or
58 An ``UnsupportedBundleSpecification`` is raised when the compression or
59 bundle type/version is not recognized.
59 bundle type/version is not recognized.
60
60
61 Note: this function will likely eventually return a more complex data
61 Note: this function will likely eventually return a more complex data
62 structure, including bundle2 part information.
62 structure, including bundle2 part information.
63 """
63 """
64 if strict and '-' not in spec:
64 if strict and '-' not in spec:
65 raise error.InvalidBundleSpecification(
65 raise error.InvalidBundleSpecification(
66 _('invalid bundle specification; '
66 _('invalid bundle specification; '
67 'must be prefixed with compression: %s') % spec)
67 'must be prefixed with compression: %s') % spec)
68
68
69 if '-' in spec:
69 if '-' in spec:
70 compression, version = spec.split('-', 1)
70 compression, version = spec.split('-', 1)
71
71
72 if compression not in _bundlespeccompressions:
72 if compression not in _bundlespeccompressions:
73 raise error.UnsupportedBundleSpecification(
73 raise error.UnsupportedBundleSpecification(
74 _('%s compression is not supported') % compression)
74 _('%s compression is not supported') % compression)
75
75
76 if version not in _bundlespeccgversions:
76 if version not in _bundlespeccgversions:
77 raise error.UnsupportedBundleSpecification(
77 raise error.UnsupportedBundleSpecification(
78 _('%s is not a recognized bundle version') % version)
78 _('%s is not a recognized bundle version') % version)
79 else:
79 else:
80 # Value could be just the compression or just the version, in which
80 # Value could be just the compression or just the version, in which
81 # case some defaults are assumed (but only when not in strict mode).
81 # case some defaults are assumed (but only when not in strict mode).
82 assert not strict
82 assert not strict
83
83
84 if spec in _bundlespeccompressions:
84 if spec in _bundlespeccompressions:
85 compression = spec
85 compression = spec
86 version = 'v1'
86 version = 'v1'
87 if 'generaldelta' in repo.requirements:
87 if 'generaldelta' in repo.requirements:
88 version = 'v2'
88 version = 'v2'
89 elif spec in _bundlespeccgversions:
89 elif spec in _bundlespeccgversions:
90 compression = 'bzip2'
90 compression = 'bzip2'
91 version = spec
91 version = spec
92 else:
92 else:
93 raise error.UnsupportedBundleSpecification(
93 raise error.UnsupportedBundleSpecification(
94 _('%s is not a recognized bundle specification') % spec)
94 _('%s is not a recognized bundle specification') % spec)
95
95
96 if not externalnames:
96 if not externalnames:
97 compression = _bundlespeccompressions[compression]
97 compression = _bundlespeccompressions[compression]
98 version = _bundlespeccgversions[version]
98 version = _bundlespeccgversions[version]
99 return compression, version
99 return compression, version
100
100
101 def readbundle(ui, fh, fname, vfs=None):
101 def readbundle(ui, fh, fname, vfs=None):
102 header = changegroup.readexactly(fh, 4)
102 header = changegroup.readexactly(fh, 4)
103
103
104 alg = None
104 alg = None
105 if not fname:
105 if not fname:
106 fname = "stream"
106 fname = "stream"
107 if not header.startswith('HG') and header.startswith('\0'):
107 if not header.startswith('HG') and header.startswith('\0'):
108 fh = changegroup.headerlessfixup(fh, header)
108 fh = changegroup.headerlessfixup(fh, header)
109 header = "HG10"
109 header = "HG10"
110 alg = 'UN'
110 alg = 'UN'
111 elif vfs:
111 elif vfs:
112 fname = vfs.join(fname)
112 fname = vfs.join(fname)
113
113
114 magic, version = header[0:2], header[2:4]
114 magic, version = header[0:2], header[2:4]
115
115
116 if magic != 'HG':
116 if magic != 'HG':
117 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
117 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
118 if version == '10':
118 if version == '10':
119 if alg is None:
119 if alg is None:
120 alg = changegroup.readexactly(fh, 2)
120 alg = changegroup.readexactly(fh, 2)
121 return changegroup.cg1unpacker(fh, alg)
121 return changegroup.cg1unpacker(fh, alg)
122 elif version.startswith('2'):
122 elif version.startswith('2'):
123 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
123 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
124 else:
124 else:
125 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
125 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
126
126
127 def buildobsmarkerspart(bundler, markers):
127 def buildobsmarkerspart(bundler, markers):
128 """add an obsmarker part to the bundler with <markers>
128 """add an obsmarker part to the bundler with <markers>
129
129
130 No part is created if markers is empty.
130 No part is created if markers is empty.
131 Raises ValueError if the bundler doesn't support any known obsmarker format.
131 Raises ValueError if the bundler doesn't support any known obsmarker format.
132 """
132 """
133 if markers:
133 if markers:
134 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
134 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
135 version = obsolete.commonversion(remoteversions)
135 version = obsolete.commonversion(remoteversions)
136 if version is None:
136 if version is None:
137 raise ValueError('bundler do not support common obsmarker format')
137 raise ValueError('bundler do not support common obsmarker format')
138 stream = obsolete.encodemarkers(markers, True, version=version)
138 stream = obsolete.encodemarkers(markers, True, version=version)
139 return bundler.newpart('obsmarkers', data=stream)
139 return bundler.newpart('obsmarkers', data=stream)
140 return None
140 return None
141
141
142 def _canusebundle2(op):
142 def _canusebundle2(op):
143 """return true if a pull/push can use bundle2
143 """return true if a pull/push can use bundle2
144
144
145 Feel free to nuke this function when we drop the experimental option"""
145 Feel free to nuke this function when we drop the experimental option"""
146 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
146 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
147 and op.remote.capable('bundle2'))
147 and op.remote.capable('bundle2'))
148
148
149
149
150 class pushoperation(object):
150 class pushoperation(object):
151 """A object that represent a single push operation
151 """A object that represent a single push operation
152
152
153 It purpose is to carry push related state and very common operation.
153 It purpose is to carry push related state and very common operation.
154
154
155 A new should be created at the beginning of each push and discarded
155 A new should be created at the beginning of each push and discarded
156 afterward.
156 afterward.
157 """
157 """
158
158
159 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
159 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
160 bookmarks=()):
160 bookmarks=()):
161 # repo we push from
161 # repo we push from
162 self.repo = repo
162 self.repo = repo
163 self.ui = repo.ui
163 self.ui = repo.ui
164 # repo we push to
164 # repo we push to
165 self.remote = remote
165 self.remote = remote
166 # force option provided
166 # force option provided
167 self.force = force
167 self.force = force
168 # revs to be pushed (None is "all")
168 # revs to be pushed (None is "all")
169 self.revs = revs
169 self.revs = revs
170 # bookmark explicitly pushed
170 # bookmark explicitly pushed
171 self.bookmarks = bookmarks
171 self.bookmarks = bookmarks
172 # allow push of new branch
172 # allow push of new branch
173 self.newbranch = newbranch
173 self.newbranch = newbranch
174 # did a local lock get acquired?
174 # did a local lock get acquired?
175 self.locallocked = None
175 self.locallocked = None
176 # step already performed
176 # step already performed
177 # (used to check what steps have been already performed through bundle2)
177 # (used to check what steps have been already performed through bundle2)
178 self.stepsdone = set()
178 self.stepsdone = set()
179 # Integer version of the changegroup push result
179 # Integer version of the changegroup push result
180 # - None means nothing to push
180 # - None means nothing to push
181 # - 0 means HTTP error
181 # - 0 means HTTP error
182 # - 1 means we pushed and remote head count is unchanged *or*
182 # - 1 means we pushed and remote head count is unchanged *or*
183 # we have outgoing changesets but refused to push
183 # we have outgoing changesets but refused to push
184 # - other values as described by addchangegroup()
184 # - other values as described by addchangegroup()
185 self.cgresult = None
185 self.cgresult = None
186 # Boolean value for the bookmark push
186 # Boolean value for the bookmark push
187 self.bkresult = None
187 self.bkresult = None
188 # discover.outgoing object (contains common and outgoing data)
188 # discover.outgoing object (contains common and outgoing data)
189 self.outgoing = None
189 self.outgoing = None
190 # all remote heads before the push
190 # all remote heads before the push
191 self.remoteheads = None
191 self.remoteheads = None
192 # testable as a boolean indicating if any nodes are missing locally.
192 # testable as a boolean indicating if any nodes are missing locally.
193 self.incoming = None
193 self.incoming = None
194 # phases changes that must be pushed along side the changesets
194 # phases changes that must be pushed along side the changesets
195 self.outdatedphases = None
195 self.outdatedphases = None
196 # phases changes that must be pushed if changeset push fails
196 # phases changes that must be pushed if changeset push fails
197 self.fallbackoutdatedphases = None
197 self.fallbackoutdatedphases = None
198 # outgoing obsmarkers
198 # outgoing obsmarkers
199 self.outobsmarkers = set()
199 self.outobsmarkers = set()
200 # outgoing bookmarks
200 # outgoing bookmarks
201 self.outbookmarks = []
201 self.outbookmarks = []
202 # transaction manager
202 # transaction manager
203 self.trmanager = None
203 self.trmanager = None
204 # map { pushkey partid -> callback handling failure}
204 # map { pushkey partid -> callback handling failure}
205 # used to handle exception from mandatory pushkey part failure
205 # used to handle exception from mandatory pushkey part failure
206 self.pkfailcb = {}
206 self.pkfailcb = {}
207
207
208 @util.propertycache
208 @util.propertycache
209 def futureheads(self):
209 def futureheads(self):
210 """future remote heads if the changeset push succeeds"""
210 """future remote heads if the changeset push succeeds"""
211 return self.outgoing.missingheads
211 return self.outgoing.missingheads
212
212
213 @util.propertycache
213 @util.propertycache
214 def fallbackheads(self):
214 def fallbackheads(self):
215 """future remote heads if the changeset push fails"""
215 """future remote heads if the changeset push fails"""
216 if self.revs is None:
216 if self.revs is None:
217 # not target to push, all common are relevant
217 # not target to push, all common are relevant
218 return self.outgoing.commonheads
218 return self.outgoing.commonheads
219 unfi = self.repo.unfiltered()
219 unfi = self.repo.unfiltered()
220 # I want cheads = heads(::missingheads and ::commonheads)
220 # I want cheads = heads(::missingheads and ::commonheads)
221 # (missingheads is revs with secret changeset filtered out)
221 # (missingheads is revs with secret changeset filtered out)
222 #
222 #
223 # This can be expressed as:
223 # This can be expressed as:
224 # cheads = ( (missingheads and ::commonheads)
224 # cheads = ( (missingheads and ::commonheads)
225 # + (commonheads and ::missingheads))"
225 # + (commonheads and ::missingheads))"
226 # )
226 # )
227 #
227 #
228 # while trying to push we already computed the following:
228 # while trying to push we already computed the following:
229 # common = (::commonheads)
229 # common = (::commonheads)
230 # missing = ((commonheads::missingheads) - commonheads)
230 # missing = ((commonheads::missingheads) - commonheads)
231 #
231 #
232 # We can pick:
232 # We can pick:
233 # * missingheads part of common (::commonheads)
233 # * missingheads part of common (::commonheads)
234 common = self.outgoing.common
234 common = self.outgoing.common
235 nm = self.repo.changelog.nodemap
235 nm = self.repo.changelog.nodemap
236 cheads = [node for node in self.revs if nm[node] in common]
236 cheads = [node for node in self.revs if nm[node] in common]
237 # and
237 # and
238 # * commonheads parents on missing
238 # * commonheads parents on missing
239 revset = unfi.set('%ln and parents(roots(%ln))',
239 revset = unfi.set('%ln and parents(roots(%ln))',
240 self.outgoing.commonheads,
240 self.outgoing.commonheads,
241 self.outgoing.missing)
241 self.outgoing.missing)
242 cheads.extend(c.node() for c in revset)
242 cheads.extend(c.node() for c in revset)
243 return cheads
243 return cheads
244
244
245 @property
245 @property
246 def commonheads(self):
246 def commonheads(self):
247 """set of all common heads after changeset bundle push"""
247 """set of all common heads after changeset bundle push"""
248 if self.cgresult:
248 if self.cgresult:
249 return self.futureheads
249 return self.futureheads
250 else:
250 else:
251 return self.fallbackheads
251 return self.fallbackheads
252
252
253 # mapping of message used when pushing bookmark
253 # mapping of message used when pushing bookmark
254 bookmsgmap = {'update': (_("updating bookmark %s\n"),
254 bookmsgmap = {'update': (_("updating bookmark %s\n"),
255 _('updating bookmark %s failed!\n')),
255 _('updating bookmark %s failed!\n')),
256 'export': (_("exporting bookmark %s\n"),
256 'export': (_("exporting bookmark %s\n"),
257 _('exporting bookmark %s failed!\n')),
257 _('exporting bookmark %s failed!\n')),
258 'delete': (_("deleting remote bookmark %s\n"),
258 'delete': (_("deleting remote bookmark %s\n"),
259 _('deleting remote bookmark %s failed!\n')),
259 _('deleting remote bookmark %s failed!\n')),
260 }
260 }
261
261
262
262
263 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
263 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
264 '''Push outgoing changesets (limited by revs) from a local
264 '''Push outgoing changesets (limited by revs) from a local
265 repository to remote. Return an integer:
265 repository to remote. Return an integer:
266 - None means nothing to push
266 - None means nothing to push
267 - 0 means HTTP error
267 - 0 means HTTP error
268 - 1 means we pushed and remote head count is unchanged *or*
268 - 1 means we pushed and remote head count is unchanged *or*
269 we have outgoing changesets but refused to push
269 we have outgoing changesets but refused to push
270 - other values as described by addchangegroup()
270 - other values as described by addchangegroup()
271 '''
271 '''
272 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
272 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
273 if pushop.remote.local():
273 if pushop.remote.local():
274 missing = (set(pushop.repo.requirements)
274 missing = (set(pushop.repo.requirements)
275 - pushop.remote.local().supported)
275 - pushop.remote.local().supported)
276 if missing:
276 if missing:
277 msg = _("required features are not"
277 msg = _("required features are not"
278 " supported in the destination:"
278 " supported in the destination:"
279 " %s") % (', '.join(sorted(missing)))
279 " %s") % (', '.join(sorted(missing)))
280 raise error.Abort(msg)
280 raise error.Abort(msg)
281
281
282 # there are two ways to push to remote repo:
282 # there are two ways to push to remote repo:
283 #
283 #
284 # addchangegroup assumes local user can lock remote
284 # addchangegroup assumes local user can lock remote
285 # repo (local filesystem, old ssh servers).
285 # repo (local filesystem, old ssh servers).
286 #
286 #
287 # unbundle assumes local user cannot lock remote repo (new ssh
287 # unbundle assumes local user cannot lock remote repo (new ssh
288 # servers, http servers).
288 # servers, http servers).
289
289
290 if not pushop.remote.canpush():
290 if not pushop.remote.canpush():
291 raise error.Abort(_("destination does not support push"))
291 raise error.Abort(_("destination does not support push"))
292 # get local lock as we might write phase data
292 # get local lock as we might write phase data
293 localwlock = locallock = None
293 localwlock = locallock = None
294 try:
294 try:
295 # bundle2 push may receive a reply bundle touching bookmarks or other
295 # bundle2 push may receive a reply bundle touching bookmarks or other
296 # things requiring the wlock. Take it now to ensure proper ordering.
296 # things requiring the wlock. Take it now to ensure proper ordering.
297 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
297 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
298 if _canusebundle2(pushop) and maypushback:
298 if _canusebundle2(pushop) and maypushback:
299 localwlock = pushop.repo.wlock()
299 localwlock = pushop.repo.wlock()
300 locallock = pushop.repo.lock()
300 locallock = pushop.repo.lock()
301 pushop.locallocked = True
301 pushop.locallocked = True
302 except IOError as err:
302 except IOError as err:
303 pushop.locallocked = False
303 pushop.locallocked = False
304 if err.errno != errno.EACCES:
304 if err.errno != errno.EACCES:
305 raise
305 raise
306 # source repo cannot be locked.
306 # source repo cannot be locked.
307 # We do not abort the push, but just disable the local phase
307 # We do not abort the push, but just disable the local phase
308 # synchronisation.
308 # synchronisation.
309 msg = 'cannot lock source repository: %s\n' % err
309 msg = 'cannot lock source repository: %s\n' % err
310 pushop.ui.debug(msg)
310 pushop.ui.debug(msg)
311 try:
311 try:
312 if pushop.locallocked:
312 if pushop.locallocked:
313 pushop.trmanager = transactionmanager(pushop.repo,
313 pushop.trmanager = transactionmanager(pushop.repo,
314 'push-response',
314 'push-response',
315 pushop.remote.url())
315 pushop.remote.url())
316 pushop.repo.checkpush(pushop)
316 pushop.repo.checkpush(pushop)
317 lock = None
317 lock = None
318 unbundle = pushop.remote.capable('unbundle')
318 unbundle = pushop.remote.capable('unbundle')
319 if not unbundle:
319 if not unbundle:
320 lock = pushop.remote.lock()
320 lock = pushop.remote.lock()
321 try:
321 try:
322 _pushdiscovery(pushop)
322 _pushdiscovery(pushop)
323 if _canusebundle2(pushop):
323 if _canusebundle2(pushop):
324 _pushbundle2(pushop)
324 _pushbundle2(pushop)
325 _pushchangeset(pushop)
325 _pushchangeset(pushop)
326 _pushsyncphase(pushop)
326 _pushsyncphase(pushop)
327 _pushobsolete(pushop)
327 _pushobsolete(pushop)
328 _pushbookmark(pushop)
328 _pushbookmark(pushop)
329 finally:
329 finally:
330 if lock is not None:
330 if lock is not None:
331 lock.release()
331 lock.release()
332 if pushop.trmanager:
332 if pushop.trmanager:
333 pushop.trmanager.close()
333 pushop.trmanager.close()
334 finally:
334 finally:
335 if pushop.trmanager:
335 if pushop.trmanager:
336 pushop.trmanager.release()
336 pushop.trmanager.release()
337 if locallock is not None:
337 if locallock is not None:
338 locallock.release()
338 locallock.release()
339 if localwlock is not None:
339 if localwlock is not None:
340 localwlock.release()
340 localwlock.release()
341
341
342 return pushop
342 return pushop
343
343
344 # list of steps to perform discovery before push
344 # list of steps to perform discovery before push
345 pushdiscoveryorder = []
345 pushdiscoveryorder = []
346
346
347 # Mapping between step name and function
347 # Mapping between step name and function
348 #
348 #
349 # This exists to help extensions wrap steps if necessary
349 # This exists to help extensions wrap steps if necessary
350 pushdiscoverymapping = {}
350 pushdiscoverymapping = {}
351
351
352 def pushdiscovery(stepname):
352 def pushdiscovery(stepname):
353 """decorator for function performing discovery before push
353 """decorator for function performing discovery before push
354
354
355 The function is added to the step -> function mapping and appended to the
355 The function is added to the step -> function mapping and appended to the
356 list of steps. Beware that decorated function will be added in order (this
356 list of steps. Beware that decorated function will be added in order (this
357 may matter).
357 may matter).
358
358
359 You can only use this decorator for a new step, if you want to wrap a step
359 You can only use this decorator for a new step, if you want to wrap a step
360 from an extension, change the pushdiscovery dictionary directly."""
360 from an extension, change the pushdiscovery dictionary directly."""
361 def dec(func):
361 def dec(func):
362 assert stepname not in pushdiscoverymapping
362 assert stepname not in pushdiscoverymapping
363 pushdiscoverymapping[stepname] = func
363 pushdiscoverymapping[stepname] = func
364 pushdiscoveryorder.append(stepname)
364 pushdiscoveryorder.append(stepname)
365 return func
365 return func
366 return dec
366 return dec
367
367
368 def _pushdiscovery(pushop):
368 def _pushdiscovery(pushop):
369 """Run all discovery steps"""
369 """Run all discovery steps"""
370 for stepname in pushdiscoveryorder:
370 for stepname in pushdiscoveryorder:
371 step = pushdiscoverymapping[stepname]
371 step = pushdiscoverymapping[stepname]
372 step(pushop)
372 step(pushop)
373
373
374 @pushdiscovery('changeset')
374 @pushdiscovery('changeset')
375 def _pushdiscoverychangeset(pushop):
375 def _pushdiscoverychangeset(pushop):
376 """discover the changeset that need to be pushed"""
376 """discover the changeset that need to be pushed"""
377 fci = discovery.findcommonincoming
377 fci = discovery.findcommonincoming
378 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
378 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
379 common, inc, remoteheads = commoninc
379 common, inc, remoteheads = commoninc
380 fco = discovery.findcommonoutgoing
380 fco = discovery.findcommonoutgoing
381 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
381 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
382 commoninc=commoninc, force=pushop.force)
382 commoninc=commoninc, force=pushop.force)
383 pushop.outgoing = outgoing
383 pushop.outgoing = outgoing
384 pushop.remoteheads = remoteheads
384 pushop.remoteheads = remoteheads
385 pushop.incoming = inc
385 pushop.incoming = inc
386
386
387 @pushdiscovery('phase')
387 @pushdiscovery('phase')
388 def _pushdiscoveryphase(pushop):
388 def _pushdiscoveryphase(pushop):
389 """discover the phase that needs to be pushed
389 """discover the phase that needs to be pushed
390
390
391 (computed for both success and failure case for changesets push)"""
391 (computed for both success and failure case for changesets push)"""
392 outgoing = pushop.outgoing
392 outgoing = pushop.outgoing
393 unfi = pushop.repo.unfiltered()
393 unfi = pushop.repo.unfiltered()
394 remotephases = pushop.remote.listkeys('phases')
394 remotephases = pushop.remote.listkeys('phases')
395 publishing = remotephases.get('publishing', False)
395 publishing = remotephases.get('publishing', False)
396 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
396 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
397 and remotephases # server supports phases
397 and remotephases # server supports phases
398 and not pushop.outgoing.missing # no changesets to be pushed
398 and not pushop.outgoing.missing # no changesets to be pushed
399 and publishing):
399 and publishing):
400 # When:
400 # When:
401 # - this is a subrepo push
401 # - this is a subrepo push
402 # - and remote support phase
402 # - and remote support phase
403 # - and no changeset are to be pushed
403 # - and no changeset are to be pushed
404 # - and remote is publishing
404 # - and remote is publishing
405 # We may be in issue 3871 case!
405 # We may be in issue 3871 case!
406 # We drop the possible phase synchronisation done by
406 # We drop the possible phase synchronisation done by
407 # courtesy to publish changesets possibly locally draft
407 # courtesy to publish changesets possibly locally draft
408 # on the remote.
408 # on the remote.
409 remotephases = {'publishing': 'True'}
409 remotephases = {'publishing': 'True'}
410 ana = phases.analyzeremotephases(pushop.repo,
410 ana = phases.analyzeremotephases(pushop.repo,
411 pushop.fallbackheads,
411 pushop.fallbackheads,
412 remotephases)
412 remotephases)
413 pheads, droots = ana
413 pheads, droots = ana
414 extracond = ''
414 extracond = ''
415 if not publishing:
415 if not publishing:
416 extracond = ' and public()'
416 extracond = ' and public()'
417 revset = 'heads((%%ln::%%ln) %s)' % extracond
417 revset = 'heads((%%ln::%%ln) %s)' % extracond
418 # Get the list of all revs draft on remote by public here.
418 # Get the list of all revs draft on remote by public here.
419 # XXX Beware that revset break if droots is not strictly
419 # XXX Beware that revset break if droots is not strictly
420 # XXX root we may want to ensure it is but it is costly
420 # XXX root we may want to ensure it is but it is costly
421 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
421 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
422 if not outgoing.missing:
422 if not outgoing.missing:
423 future = fallback
423 future = fallback
424 else:
424 else:
425 # adds changeset we are going to push as draft
425 # adds changeset we are going to push as draft
426 #
426 #
427 # should not be necessary for publishing server, but because of an
427 # should not be necessary for publishing server, but because of an
428 # issue fixed in xxxxx we have to do it anyway.
428 # issue fixed in xxxxx we have to do it anyway.
429 fdroots = list(unfi.set('roots(%ln + %ln::)',
429 fdroots = list(unfi.set('roots(%ln + %ln::)',
430 outgoing.missing, droots))
430 outgoing.missing, droots))
431 fdroots = [f.node() for f in fdroots]
431 fdroots = [f.node() for f in fdroots]
432 future = list(unfi.set(revset, fdroots, pushop.futureheads))
432 future = list(unfi.set(revset, fdroots, pushop.futureheads))
433 pushop.outdatedphases = future
433 pushop.outdatedphases = future
434 pushop.fallbackoutdatedphases = fallback
434 pushop.fallbackoutdatedphases = fallback
435
435
436 @pushdiscovery('obsmarker')
436 @pushdiscovery('obsmarker')
437 def _pushdiscoveryobsmarkers(pushop):
437 def _pushdiscoveryobsmarkers(pushop):
438 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
438 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
439 and pushop.repo.obsstore
439 and pushop.repo.obsstore
440 and 'obsolete' in pushop.remote.listkeys('namespaces')):
440 and 'obsolete' in pushop.remote.listkeys('namespaces')):
441 repo = pushop.repo
441 repo = pushop.repo
442 # very naive computation, that can be quite expensive on big repo.
442 # very naive computation, that can be quite expensive on big repo.
443 # However: evolution is currently slow on them anyway.
443 # However: evolution is currently slow on them anyway.
444 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
444 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
445 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
445 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
446
446
447 @pushdiscovery('bookmarks')
447 @pushdiscovery('bookmarks')
448 def _pushdiscoverybookmarks(pushop):
448 def _pushdiscoverybookmarks(pushop):
449 ui = pushop.ui
449 ui = pushop.ui
450 repo = pushop.repo.unfiltered()
450 repo = pushop.repo.unfiltered()
451 remote = pushop.remote
451 remote = pushop.remote
452 ui.debug("checking for updated bookmarks\n")
452 ui.debug("checking for updated bookmarks\n")
453 ancestors = ()
453 ancestors = ()
454 if pushop.revs:
454 if pushop.revs:
455 revnums = map(repo.changelog.rev, pushop.revs)
455 revnums = map(repo.changelog.rev, pushop.revs)
456 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
456 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
457 remotebookmark = remote.listkeys('bookmarks')
457 remotebookmark = remote.listkeys('bookmarks')
458
458
459 explicit = set(pushop.bookmarks)
459 explicit = set(pushop.bookmarks)
460
460
461 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
461 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
462 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
462 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
463 for b, scid, dcid in advsrc:
463 for b, scid, dcid in advsrc:
464 if b in explicit:
464 if b in explicit:
465 explicit.remove(b)
465 explicit.remove(b)
466 if not ancestors or repo[scid].rev() in ancestors:
466 if not ancestors or repo[scid].rev() in ancestors:
467 pushop.outbookmarks.append((b, dcid, scid))
467 pushop.outbookmarks.append((b, dcid, scid))
468 # search added bookmark
468 # search added bookmark
469 for b, scid, dcid in addsrc:
469 for b, scid, dcid in addsrc:
470 if b in explicit:
470 if b in explicit:
471 explicit.remove(b)
471 explicit.remove(b)
472 pushop.outbookmarks.append((b, '', scid))
472 pushop.outbookmarks.append((b, '', scid))
473 # search for overwritten bookmark
473 # search for overwritten bookmark
474 for b, scid, dcid in advdst + diverge + differ:
474 for b, scid, dcid in advdst + diverge + differ:
475 if b in explicit:
475 if b in explicit:
476 explicit.remove(b)
476 explicit.remove(b)
477 pushop.outbookmarks.append((b, dcid, scid))
477 pushop.outbookmarks.append((b, dcid, scid))
478 # search for bookmark to delete
478 # search for bookmark to delete
479 for b, scid, dcid in adddst:
479 for b, scid, dcid in adddst:
480 if b in explicit:
480 if b in explicit:
481 explicit.remove(b)
481 explicit.remove(b)
482 # treat as "deleted locally"
482 # treat as "deleted locally"
483 pushop.outbookmarks.append((b, dcid, ''))
483 pushop.outbookmarks.append((b, dcid, ''))
484 # identical bookmarks shouldn't get reported
484 # identical bookmarks shouldn't get reported
485 for b, scid, dcid in same:
485 for b, scid, dcid in same:
486 if b in explicit:
486 if b in explicit:
487 explicit.remove(b)
487 explicit.remove(b)
488
488
489 if explicit:
489 if explicit:
490 explicit = sorted(explicit)
490 explicit = sorted(explicit)
491 # we should probably list all of them
491 # we should probably list all of them
492 ui.warn(_('bookmark %s does not exist on the local '
492 ui.warn(_('bookmark %s does not exist on the local '
493 'or remote repository!\n') % explicit[0])
493 'or remote repository!\n') % explicit[0])
494 pushop.bkresult = 2
494 pushop.bkresult = 2
495
495
496 pushop.outbookmarks.sort()
496 pushop.outbookmarks.sort()
497
497
498 def _pushcheckoutgoing(pushop):
498 def _pushcheckoutgoing(pushop):
499 outgoing = pushop.outgoing
499 outgoing = pushop.outgoing
500 unfi = pushop.repo.unfiltered()
500 unfi = pushop.repo.unfiltered()
501 if not outgoing.missing:
501 if not outgoing.missing:
502 # nothing to push
502 # nothing to push
503 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
503 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
504 return False
504 return False
505 # something to push
505 # something to push
506 if not pushop.force:
506 if not pushop.force:
507 # if repo.obsstore == False --> no obsolete
507 # if repo.obsstore == False --> no obsolete
508 # then, save the iteration
508 # then, save the iteration
509 if unfi.obsstore:
509 if unfi.obsstore:
510 # this message are here for 80 char limit reason
510 # this message are here for 80 char limit reason
511 mso = _("push includes obsolete changeset: %s!")
511 mso = _("push includes obsolete changeset: %s!")
512 mst = {"unstable": _("push includes unstable changeset: %s!"),
512 mst = {"unstable": _("push includes unstable changeset: %s!"),
513 "bumped": _("push includes bumped changeset: %s!"),
513 "bumped": _("push includes bumped changeset: %s!"),
514 "divergent": _("push includes divergent changeset: %s!")}
514 "divergent": _("push includes divergent changeset: %s!")}
515 # If we are to push if there is at least one
515 # If we are to push if there is at least one
516 # obsolete or unstable changeset in missing, at
516 # obsolete or unstable changeset in missing, at
517 # least one of the missinghead will be obsolete or
517 # least one of the missinghead will be obsolete or
518 # unstable. So checking heads only is ok
518 # unstable. So checking heads only is ok
519 for node in outgoing.missingheads:
519 for node in outgoing.missingheads:
520 ctx = unfi[node]
520 ctx = unfi[node]
521 if ctx.obsolete():
521 if ctx.obsolete():
522 raise error.Abort(mso % ctx)
522 raise error.Abort(mso % ctx)
523 elif ctx.troubled():
523 elif ctx.troubled():
524 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
524 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
525
525
526 # internal config: bookmarks.pushing
526 # internal config: bookmarks.pushing
527 newbm = pushop.ui.configlist('bookmarks', 'pushing')
527 newbm = pushop.ui.configlist('bookmarks', 'pushing')
528 discovery.checkheads(unfi, pushop.remote, outgoing,
528 discovery.checkheads(unfi, pushop.remote, outgoing,
529 pushop.remoteheads,
529 pushop.remoteheads,
530 pushop.newbranch,
530 pushop.newbranch,
531 bool(pushop.incoming),
531 bool(pushop.incoming),
532 newbm)
532 newbm)
533 return True
533 return True
534
534
535 # List of names of steps to perform for an outgoing bundle2, order matters.
535 # List of names of steps to perform for an outgoing bundle2, order matters.
536 b2partsgenorder = []
536 b2partsgenorder = []
537
537
538 # Mapping between step name and function
538 # Mapping between step name and function
539 #
539 #
540 # This exists to help extensions wrap steps if necessary
540 # This exists to help extensions wrap steps if necessary
541 b2partsgenmapping = {}
541 b2partsgenmapping = {}
542
542
543 def b2partsgenerator(stepname, idx=None):
543 def b2partsgenerator(stepname, idx=None):
544 """decorator for function generating bundle2 part
544 """decorator for function generating bundle2 part
545
545
546 The function is added to the step -> function mapping and appended to the
546 The function is added to the step -> function mapping and appended to the
547 list of steps. Beware that decorated functions will be added in order
547 list of steps. Beware that decorated functions will be added in order
548 (this may matter).
548 (this may matter).
549
549
550 You can only use this decorator for new steps, if you want to wrap a step
550 You can only use this decorator for new steps, if you want to wrap a step
551 from an extension, attack the b2partsgenmapping dictionary directly."""
551 from an extension, attack the b2partsgenmapping dictionary directly."""
552 def dec(func):
552 def dec(func):
553 assert stepname not in b2partsgenmapping
553 assert stepname not in b2partsgenmapping
554 b2partsgenmapping[stepname] = func
554 b2partsgenmapping[stepname] = func
555 if idx is None:
555 if idx is None:
556 b2partsgenorder.append(stepname)
556 b2partsgenorder.append(stepname)
557 else:
557 else:
558 b2partsgenorder.insert(idx, stepname)
558 b2partsgenorder.insert(idx, stepname)
559 return func
559 return func
560 return dec
560 return dec
561
561
562 def _pushb2ctxcheckheads(pushop, bundler):
562 def _pushb2ctxcheckheads(pushop, bundler):
563 """Generate race condition checking parts
563 """Generate race condition checking parts
564
564
565 Exists as an indepedent function to aid extensions
565 Exists as an indepedent function to aid extensions
566 """
566 """
567 if not pushop.force:
567 if not pushop.force:
568 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
568 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
569
569
570 @b2partsgenerator('changeset')
570 @b2partsgenerator('changeset')
571 def _pushb2ctx(pushop, bundler):
571 def _pushb2ctx(pushop, bundler):
572 """handle changegroup push through bundle2
572 """handle changegroup push through bundle2
573
573
574 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
574 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
575 """
575 """
576 if 'changesets' in pushop.stepsdone:
576 if 'changesets' in pushop.stepsdone:
577 return
577 return
578 pushop.stepsdone.add('changesets')
578 pushop.stepsdone.add('changesets')
579 # Send known heads to the server for race detection.
579 # Send known heads to the server for race detection.
580 if not _pushcheckoutgoing(pushop):
580 if not _pushcheckoutgoing(pushop):
581 return
581 return
582 pushop.repo.prepushoutgoinghooks(pushop.repo,
582 pushop.repo.prepushoutgoinghooks(pushop.repo,
583 pushop.remote,
583 pushop.remote,
584 pushop.outgoing)
584 pushop.outgoing)
585
585
586 _pushb2ctxcheckheads(pushop, bundler)
586 _pushb2ctxcheckheads(pushop, bundler)
587
587
588 b2caps = bundle2.bundle2caps(pushop.remote)
588 b2caps = bundle2.bundle2caps(pushop.remote)
589 version = None
589 version = None
590 cgversions = b2caps.get('changegroup')
590 cgversions = b2caps.get('changegroup')
591 if not cgversions: # 3.1 and 3.2 ship with an empty value
591 if not cgversions: # 3.1 and 3.2 ship with an empty value
592 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
592 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
593 pushop.outgoing)
593 pushop.outgoing)
594 else:
594 else:
595 cgversions = [v for v in cgversions if v in changegroup.packermap]
595 cgversions = [v for v in cgversions if v in changegroup.packermap]
596 if not cgversions:
596 if not cgversions:
597 raise ValueError(_('no common changegroup version'))
597 raise ValueError(_('no common changegroup version'))
598 version = max(cgversions)
598 version = max(cgversions)
599 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
599 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
600 pushop.outgoing,
600 pushop.outgoing,
601 version=version)
601 version=version)
602 cgpart = bundler.newpart('changegroup', data=cg)
602 cgpart = bundler.newpart('changegroup', data=cg)
603 if version is not None:
603 if version is not None:
604 cgpart.addparam('version', version)
604 cgpart.addparam('version', version)
605 def handlereply(op):
605 def handlereply(op):
606 """extract addchangegroup returns from server reply"""
606 """extract addchangegroup returns from server reply"""
607 cgreplies = op.records.getreplies(cgpart.id)
607 cgreplies = op.records.getreplies(cgpart.id)
608 assert len(cgreplies['changegroup']) == 1
608 assert len(cgreplies['changegroup']) == 1
609 pushop.cgresult = cgreplies['changegroup'][0]['return']
609 pushop.cgresult = cgreplies['changegroup'][0]['return']
610 return handlereply
610 return handlereply
611
611
612 @b2partsgenerator('phase')
612 @b2partsgenerator('phase')
613 def _pushb2phases(pushop, bundler):
613 def _pushb2phases(pushop, bundler):
614 """handle phase push through bundle2"""
614 """handle phase push through bundle2"""
615 if 'phases' in pushop.stepsdone:
615 if 'phases' in pushop.stepsdone:
616 return
616 return
617 b2caps = bundle2.bundle2caps(pushop.remote)
617 b2caps = bundle2.bundle2caps(pushop.remote)
618 if not 'pushkey' in b2caps:
618 if not 'pushkey' in b2caps:
619 return
619 return
620 pushop.stepsdone.add('phases')
620 pushop.stepsdone.add('phases')
621 part2node = []
621 part2node = []
622
622
623 def handlefailure(pushop, exc):
623 def handlefailure(pushop, exc):
624 targetid = int(exc.partid)
624 targetid = int(exc.partid)
625 for partid, node in part2node:
625 for partid, node in part2node:
626 if partid == targetid:
626 if partid == targetid:
627 raise error.Abort(_('updating %s to public failed') % node)
627 raise error.Abort(_('updating %s to public failed') % node)
628
628
629 enc = pushkey.encode
629 enc = pushkey.encode
630 for newremotehead in pushop.outdatedphases:
630 for newremotehead in pushop.outdatedphases:
631 part = bundler.newpart('pushkey')
631 part = bundler.newpart('pushkey')
632 part.addparam('namespace', enc('phases'))
632 part.addparam('namespace', enc('phases'))
633 part.addparam('key', enc(newremotehead.hex()))
633 part.addparam('key', enc(newremotehead.hex()))
634 part.addparam('old', enc(str(phases.draft)))
634 part.addparam('old', enc(str(phases.draft)))
635 part.addparam('new', enc(str(phases.public)))
635 part.addparam('new', enc(str(phases.public)))
636 part2node.append((part.id, newremotehead))
636 part2node.append((part.id, newremotehead))
637 pushop.pkfailcb[part.id] = handlefailure
637 pushop.pkfailcb[part.id] = handlefailure
638
638
639 def handlereply(op):
639 def handlereply(op):
640 for partid, node in part2node:
640 for partid, node in part2node:
641 partrep = op.records.getreplies(partid)
641 partrep = op.records.getreplies(partid)
642 results = partrep['pushkey']
642 results = partrep['pushkey']
643 assert len(results) <= 1
643 assert len(results) <= 1
644 msg = None
644 msg = None
645 if not results:
645 if not results:
646 msg = _('server ignored update of %s to public!\n') % node
646 msg = _('server ignored update of %s to public!\n') % node
647 elif not int(results[0]['return']):
647 elif not int(results[0]['return']):
648 msg = _('updating %s to public failed!\n') % node
648 msg = _('updating %s to public failed!\n') % node
649 if msg is not None:
649 if msg is not None:
650 pushop.ui.warn(msg)
650 pushop.ui.warn(msg)
651 return handlereply
651 return handlereply
652
652
653 @b2partsgenerator('obsmarkers')
653 @b2partsgenerator('obsmarkers')
654 def _pushb2obsmarkers(pushop, bundler):
654 def _pushb2obsmarkers(pushop, bundler):
655 if 'obsmarkers' in pushop.stepsdone:
655 if 'obsmarkers' in pushop.stepsdone:
656 return
656 return
657 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
657 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
658 if obsolete.commonversion(remoteversions) is None:
658 if obsolete.commonversion(remoteversions) is None:
659 return
659 return
660 pushop.stepsdone.add('obsmarkers')
660 pushop.stepsdone.add('obsmarkers')
661 if pushop.outobsmarkers:
661 if pushop.outobsmarkers:
662 markers = sorted(pushop.outobsmarkers)
662 markers = sorted(pushop.outobsmarkers)
663 buildobsmarkerspart(bundler, markers)
663 buildobsmarkerspart(bundler, markers)
664
664
665 @b2partsgenerator('bookmarks')
665 @b2partsgenerator('bookmarks')
666 def _pushb2bookmarks(pushop, bundler):
666 def _pushb2bookmarks(pushop, bundler):
667 """handle bookmark push through bundle2"""
667 """handle bookmark push through bundle2"""
668 if 'bookmarks' in pushop.stepsdone:
668 if 'bookmarks' in pushop.stepsdone:
669 return
669 return
670 b2caps = bundle2.bundle2caps(pushop.remote)
670 b2caps = bundle2.bundle2caps(pushop.remote)
671 if 'pushkey' not in b2caps:
671 if 'pushkey' not in b2caps:
672 return
672 return
673 pushop.stepsdone.add('bookmarks')
673 pushop.stepsdone.add('bookmarks')
674 part2book = []
674 part2book = []
675 enc = pushkey.encode
675 enc = pushkey.encode
676
676
677 def handlefailure(pushop, exc):
677 def handlefailure(pushop, exc):
678 targetid = int(exc.partid)
678 targetid = int(exc.partid)
679 for partid, book, action in part2book:
679 for partid, book, action in part2book:
680 if partid == targetid:
680 if partid == targetid:
681 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
681 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
682 # we should not be called for part we did not generated
682 # we should not be called for part we did not generated
683 assert False
683 assert False
684
684
685 for book, old, new in pushop.outbookmarks:
685 for book, old, new in pushop.outbookmarks:
686 part = bundler.newpart('pushkey')
686 part = bundler.newpart('pushkey')
687 part.addparam('namespace', enc('bookmarks'))
687 part.addparam('namespace', enc('bookmarks'))
688 part.addparam('key', enc(book))
688 part.addparam('key', enc(book))
689 part.addparam('old', enc(old))
689 part.addparam('old', enc(old))
690 part.addparam('new', enc(new))
690 part.addparam('new', enc(new))
691 action = 'update'
691 action = 'update'
692 if not old:
692 if not old:
693 action = 'export'
693 action = 'export'
694 elif not new:
694 elif not new:
695 action = 'delete'
695 action = 'delete'
696 part2book.append((part.id, book, action))
696 part2book.append((part.id, book, action))
697 pushop.pkfailcb[part.id] = handlefailure
697 pushop.pkfailcb[part.id] = handlefailure
698
698
699 def handlereply(op):
699 def handlereply(op):
700 ui = pushop.ui
700 ui = pushop.ui
701 for partid, book, action in part2book:
701 for partid, book, action in part2book:
702 partrep = op.records.getreplies(partid)
702 partrep = op.records.getreplies(partid)
703 results = partrep['pushkey']
703 results = partrep['pushkey']
704 assert len(results) <= 1
704 assert len(results) <= 1
705 if not results:
705 if not results:
706 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
706 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
707 else:
707 else:
708 ret = int(results[0]['return'])
708 ret = int(results[0]['return'])
709 if ret:
709 if ret:
710 ui.status(bookmsgmap[action][0] % book)
710 ui.status(bookmsgmap[action][0] % book)
711 else:
711 else:
712 ui.warn(bookmsgmap[action][1] % book)
712 ui.warn(bookmsgmap[action][1] % book)
713 if pushop.bkresult is not None:
713 if pushop.bkresult is not None:
714 pushop.bkresult = 1
714 pushop.bkresult = 1
715 return handlereply
715 return handlereply
716
716
717
717
718 def _pushbundle2(pushop):
718 def _pushbundle2(pushop):
719 """push data to the remote using bundle2
719 """push data to the remote using bundle2
720
720
721 The only currently supported type of data is changegroup but this will
721 The only currently supported type of data is changegroup but this will
722 evolve in the future."""
722 evolve in the future."""
723 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
723 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
724 pushback = (pushop.trmanager
724 pushback = (pushop.trmanager
725 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
725 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
726
726
727 # create reply capability
727 # create reply capability
728 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
728 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
729 allowpushback=pushback))
729 allowpushback=pushback))
730 bundler.newpart('replycaps', data=capsblob)
730 bundler.newpart('replycaps', data=capsblob)
731 replyhandlers = []
731 replyhandlers = []
732 for partgenname in b2partsgenorder:
732 for partgenname in b2partsgenorder:
733 partgen = b2partsgenmapping[partgenname]
733 partgen = b2partsgenmapping[partgenname]
734 ret = partgen(pushop, bundler)
734 ret = partgen(pushop, bundler)
735 if callable(ret):
735 if callable(ret):
736 replyhandlers.append(ret)
736 replyhandlers.append(ret)
737 # do not push if nothing to push
737 # do not push if nothing to push
738 if bundler.nbparts <= 1:
738 if bundler.nbparts <= 1:
739 return
739 return
740 stream = util.chunkbuffer(bundler.getchunks())
740 stream = util.chunkbuffer(bundler.getchunks())
741 try:
741 try:
742 try:
742 try:
743 reply = pushop.remote.unbundle(stream, ['force'], 'push')
743 reply = pushop.remote.unbundle(stream, ['force'], 'push')
744 except error.BundleValueError as exc:
744 except error.BundleValueError as exc:
745 raise error.Abort('missing support for %s' % exc)
745 raise error.Abort('missing support for %s' % exc)
746 try:
746 try:
747 trgetter = None
747 trgetter = None
748 if pushback:
748 if pushback:
749 trgetter = pushop.trmanager.transaction
749 trgetter = pushop.trmanager.transaction
750 op = bundle2.processbundle(pushop.repo, reply, trgetter)
750 op = bundle2.processbundle(pushop.repo, reply, trgetter)
751 except error.BundleValueError as exc:
751 except error.BundleValueError as exc:
752 raise error.Abort('missing support for %s' % exc)
752 raise error.Abort('missing support for %s' % exc)
753 except error.PushkeyFailed as exc:
753 except error.PushkeyFailed as exc:
754 partid = int(exc.partid)
754 partid = int(exc.partid)
755 if partid not in pushop.pkfailcb:
755 if partid not in pushop.pkfailcb:
756 raise
756 raise
757 pushop.pkfailcb[partid](pushop, exc)
757 pushop.pkfailcb[partid](pushop, exc)
758 for rephand in replyhandlers:
758 for rephand in replyhandlers:
759 rephand(op)
759 rephand(op)
760
760
761 def _pushchangeset(pushop):
761 def _pushchangeset(pushop):
762 """Make the actual push of changeset bundle to remote repo"""
762 """Make the actual push of changeset bundle to remote repo"""
763 if 'changesets' in pushop.stepsdone:
763 if 'changesets' in pushop.stepsdone:
764 return
764 return
765 pushop.stepsdone.add('changesets')
765 pushop.stepsdone.add('changesets')
766 if not _pushcheckoutgoing(pushop):
766 if not _pushcheckoutgoing(pushop):
767 return
767 return
768 pushop.repo.prepushoutgoinghooks(pushop.repo,
768 pushop.repo.prepushoutgoinghooks(pushop.repo,
769 pushop.remote,
769 pushop.remote,
770 pushop.outgoing)
770 pushop.outgoing)
771 outgoing = pushop.outgoing
771 outgoing = pushop.outgoing
772 unbundle = pushop.remote.capable('unbundle')
772 unbundle = pushop.remote.capable('unbundle')
773 # TODO: get bundlecaps from remote
773 # TODO: get bundlecaps from remote
774 bundlecaps = None
774 bundlecaps = None
775 # create a changegroup from local
775 # create a changegroup from local
776 if pushop.revs is None and not (outgoing.excluded
776 if pushop.revs is None and not (outgoing.excluded
777 or pushop.repo.changelog.filteredrevs):
777 or pushop.repo.changelog.filteredrevs):
778 # push everything,
778 # push everything,
779 # use the fast path, no race possible on push
779 # use the fast path, no race possible on push
780 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
780 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
781 cg = changegroup.getsubset(pushop.repo,
781 cg = changegroup.getsubset(pushop.repo,
782 outgoing,
782 outgoing,
783 bundler,
783 bundler,
784 'push',
784 'push',
785 fastpath=True)
785 fastpath=True)
786 else:
786 else:
787 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
787 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
788 bundlecaps)
788 bundlecaps)
789
789
790 # apply changegroup to remote
790 # apply changegroup to remote
791 if unbundle:
791 if unbundle:
792 # local repo finds heads on server, finds out what
792 # local repo finds heads on server, finds out what
793 # revs it must push. once revs transferred, if server
793 # revs it must push. once revs transferred, if server
794 # finds it has different heads (someone else won
794 # finds it has different heads (someone else won
795 # commit/push race), server aborts.
795 # commit/push race), server aborts.
796 if pushop.force:
796 if pushop.force:
797 remoteheads = ['force']
797 remoteheads = ['force']
798 else:
798 else:
799 remoteheads = pushop.remoteheads
799 remoteheads = pushop.remoteheads
800 # ssh: return remote's addchangegroup()
800 # ssh: return remote's addchangegroup()
801 # http: return remote's addchangegroup() or 0 for error
801 # http: return remote's addchangegroup() or 0 for error
802 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
802 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
803 pushop.repo.url())
803 pushop.repo.url())
804 else:
804 else:
805 # we return an integer indicating remote head count
805 # we return an integer indicating remote head count
806 # change
806 # change
807 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
807 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
808 pushop.repo.url())
808 pushop.repo.url())
809
809
810 def _pushsyncphase(pushop):
810 def _pushsyncphase(pushop):
811 """synchronise phase information locally and remotely"""
811 """synchronise phase information locally and remotely"""
812 cheads = pushop.commonheads
812 cheads = pushop.commonheads
813 # even when we don't push, exchanging phase data is useful
813 # even when we don't push, exchanging phase data is useful
814 remotephases = pushop.remote.listkeys('phases')
814 remotephases = pushop.remote.listkeys('phases')
815 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
815 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
816 and remotephases # server supports phases
816 and remotephases # server supports phases
817 and pushop.cgresult is None # nothing was pushed
817 and pushop.cgresult is None # nothing was pushed
818 and remotephases.get('publishing', False)):
818 and remotephases.get('publishing', False)):
819 # When:
819 # When:
820 # - this is a subrepo push
820 # - this is a subrepo push
821 # - and remote support phase
821 # - and remote support phase
822 # - and no changeset was pushed
822 # - and no changeset was pushed
823 # - and remote is publishing
823 # - and remote is publishing
824 # We may be in issue 3871 case!
824 # We may be in issue 3871 case!
825 # We drop the possible phase synchronisation done by
825 # We drop the possible phase synchronisation done by
826 # courtesy to publish changesets possibly locally draft
826 # courtesy to publish changesets possibly locally draft
827 # on the remote.
827 # on the remote.
828 remotephases = {'publishing': 'True'}
828 remotephases = {'publishing': 'True'}
829 if not remotephases: # old server or public only reply from non-publishing
829 if not remotephases: # old server or public only reply from non-publishing
830 _localphasemove(pushop, cheads)
830 _localphasemove(pushop, cheads)
831 # don't push any phase data as there is nothing to push
831 # don't push any phase data as there is nothing to push
832 else:
832 else:
833 ana = phases.analyzeremotephases(pushop.repo, cheads,
833 ana = phases.analyzeremotephases(pushop.repo, cheads,
834 remotephases)
834 remotephases)
835 pheads, droots = ana
835 pheads, droots = ana
836 ### Apply remote phase on local
836 ### Apply remote phase on local
837 if remotephases.get('publishing', False):
837 if remotephases.get('publishing', False):
838 _localphasemove(pushop, cheads)
838 _localphasemove(pushop, cheads)
839 else: # publish = False
839 else: # publish = False
840 _localphasemove(pushop, pheads)
840 _localphasemove(pushop, pheads)
841 _localphasemove(pushop, cheads, phases.draft)
841 _localphasemove(pushop, cheads, phases.draft)
842 ### Apply local phase on remote
842 ### Apply local phase on remote
843
843
844 if pushop.cgresult:
844 if pushop.cgresult:
845 if 'phases' in pushop.stepsdone:
845 if 'phases' in pushop.stepsdone:
846 # phases already pushed though bundle2
846 # phases already pushed though bundle2
847 return
847 return
848 outdated = pushop.outdatedphases
848 outdated = pushop.outdatedphases
849 else:
849 else:
850 outdated = pushop.fallbackoutdatedphases
850 outdated = pushop.fallbackoutdatedphases
851
851
852 pushop.stepsdone.add('phases')
852 pushop.stepsdone.add('phases')
853
853
854 # filter heads already turned public by the push
854 # filter heads already turned public by the push
855 outdated = [c for c in outdated if c.node() not in pheads]
855 outdated = [c for c in outdated if c.node() not in pheads]
856 # fallback to independent pushkey command
856 # fallback to independent pushkey command
857 for newremotehead in outdated:
857 for newremotehead in outdated:
858 r = pushop.remote.pushkey('phases',
858 r = pushop.remote.pushkey('phases',
859 newremotehead.hex(),
859 newremotehead.hex(),
860 str(phases.draft),
860 str(phases.draft),
861 str(phases.public))
861 str(phases.public))
862 if not r:
862 if not r:
863 pushop.ui.warn(_('updating %s to public failed!\n')
863 pushop.ui.warn(_('updating %s to public failed!\n')
864 % newremotehead)
864 % newremotehead)
865
865
866 def _localphasemove(pushop, nodes, phase=phases.public):
866 def _localphasemove(pushop, nodes, phase=phases.public):
867 """move <nodes> to <phase> in the local source repo"""
867 """move <nodes> to <phase> in the local source repo"""
868 if pushop.trmanager:
868 if pushop.trmanager:
869 phases.advanceboundary(pushop.repo,
869 phases.advanceboundary(pushop.repo,
870 pushop.trmanager.transaction(),
870 pushop.trmanager.transaction(),
871 phase,
871 phase,
872 nodes)
872 nodes)
873 else:
873 else:
874 # repo is not locked, do not change any phases!
874 # repo is not locked, do not change any phases!
875 # Informs the user that phases should have been moved when
875 # Informs the user that phases should have been moved when
876 # applicable.
876 # applicable.
877 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
877 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
878 phasestr = phases.phasenames[phase]
878 phasestr = phases.phasenames[phase]
879 if actualmoves:
879 if actualmoves:
880 pushop.ui.status(_('cannot lock source repo, skipping '
880 pushop.ui.status(_('cannot lock source repo, skipping '
881 'local %s phase update\n') % phasestr)
881 'local %s phase update\n') % phasestr)
882
882
883 def _pushobsolete(pushop):
883 def _pushobsolete(pushop):
884 """utility function to push obsolete markers to a remote"""
884 """utility function to push obsolete markers to a remote"""
885 if 'obsmarkers' in pushop.stepsdone:
885 if 'obsmarkers' in pushop.stepsdone:
886 return
886 return
887 repo = pushop.repo
887 repo = pushop.repo
888 remote = pushop.remote
888 remote = pushop.remote
889 pushop.stepsdone.add('obsmarkers')
889 pushop.stepsdone.add('obsmarkers')
890 if pushop.outobsmarkers:
890 if pushop.outobsmarkers:
891 pushop.ui.debug('try to push obsolete markers to remote\n')
891 pushop.ui.debug('try to push obsolete markers to remote\n')
892 rslts = []
892 rslts = []
893 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
893 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
894 for key in sorted(remotedata, reverse=True):
894 for key in sorted(remotedata, reverse=True):
895 # reverse sort to ensure we end with dump0
895 # reverse sort to ensure we end with dump0
896 data = remotedata[key]
896 data = remotedata[key]
897 rslts.append(remote.pushkey('obsolete', key, '', data))
897 rslts.append(remote.pushkey('obsolete', key, '', data))
898 if [r for r in rslts if not r]:
898 if [r for r in rslts if not r]:
899 msg = _('failed to push some obsolete markers!\n')
899 msg = _('failed to push some obsolete markers!\n')
900 repo.ui.warn(msg)
900 repo.ui.warn(msg)
901
901
902 def _pushbookmark(pushop):
902 def _pushbookmark(pushop):
903 """Update bookmark position on remote"""
903 """Update bookmark position on remote"""
904 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
904 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
905 return
905 return
906 pushop.stepsdone.add('bookmarks')
906 pushop.stepsdone.add('bookmarks')
907 ui = pushop.ui
907 ui = pushop.ui
908 remote = pushop.remote
908 remote = pushop.remote
909
909
910 for b, old, new in pushop.outbookmarks:
910 for b, old, new in pushop.outbookmarks:
911 action = 'update'
911 action = 'update'
912 if not old:
912 if not old:
913 action = 'export'
913 action = 'export'
914 elif not new:
914 elif not new:
915 action = 'delete'
915 action = 'delete'
916 if remote.pushkey('bookmarks', b, old, new):
916 if remote.pushkey('bookmarks', b, old, new):
917 ui.status(bookmsgmap[action][0] % b)
917 ui.status(bookmsgmap[action][0] % b)
918 else:
918 else:
919 ui.warn(bookmsgmap[action][1] % b)
919 ui.warn(bookmsgmap[action][1] % b)
920 # discovery can have set the value form invalid entry
920 # discovery can have set the value form invalid entry
921 if pushop.bkresult is not None:
921 if pushop.bkresult is not None:
922 pushop.bkresult = 1
922 pushop.bkresult = 1
923
923
924 class pulloperation(object):
924 class pulloperation(object):
925 """A object that represent a single pull operation
925 """A object that represent a single pull operation
926
926
927 It purpose is to carry pull related state and very common operation.
927 It purpose is to carry pull related state and very common operation.
928
928
929 A new should be created at the beginning of each pull and discarded
929 A new should be created at the beginning of each pull and discarded
930 afterward.
930 afterward.
931 """
931 """
932
932
933 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
933 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
934 remotebookmarks=None, streamclonerequested=None):
934 remotebookmarks=None, streamclonerequested=None):
935 # repo we pull into
935 # repo we pull into
936 self.repo = repo
936 self.repo = repo
937 # repo we pull from
937 # repo we pull from
938 self.remote = remote
938 self.remote = remote
939 # revision we try to pull (None is "all")
939 # revision we try to pull (None is "all")
940 self.heads = heads
940 self.heads = heads
941 # bookmark pulled explicitly
941 # bookmark pulled explicitly
942 self.explicitbookmarks = bookmarks
942 self.explicitbookmarks = bookmarks
943 # do we force pull?
943 # do we force pull?
944 self.force = force
944 self.force = force
945 # whether a streaming clone was requested
945 # whether a streaming clone was requested
946 self.streamclonerequested = streamclonerequested
946 self.streamclonerequested = streamclonerequested
947 # transaction manager
947 # transaction manager
948 self.trmanager = None
948 self.trmanager = None
949 # set of common changeset between local and remote before pull
949 # set of common changeset between local and remote before pull
950 self.common = None
950 self.common = None
951 # set of pulled head
951 # set of pulled head
952 self.rheads = None
952 self.rheads = None
953 # list of missing changeset to fetch remotely
953 # list of missing changeset to fetch remotely
954 self.fetch = None
954 self.fetch = None
955 # remote bookmarks data
955 # remote bookmarks data
956 self.remotebookmarks = remotebookmarks
956 self.remotebookmarks = remotebookmarks
957 # result of changegroup pulling (used as return code by pull)
957 # result of changegroup pulling (used as return code by pull)
958 self.cgresult = None
958 self.cgresult = None
959 # list of step already done
959 # list of step already done
960 self.stepsdone = set()
960 self.stepsdone = set()
961 # Whether we attempted a clone from pre-generated bundles.
961 # Whether we attempted a clone from pre-generated bundles.
962 self.clonebundleattempted = False
962 self.clonebundleattempted = False
963
963
964 @util.propertycache
964 @util.propertycache
965 def pulledsubset(self):
965 def pulledsubset(self):
966 """heads of the set of changeset target by the pull"""
966 """heads of the set of changeset target by the pull"""
967 # compute target subset
967 # compute target subset
968 if self.heads is None:
968 if self.heads is None:
969 # We pulled every thing possible
969 # We pulled every thing possible
970 # sync on everything common
970 # sync on everything common
971 c = set(self.common)
971 c = set(self.common)
972 ret = list(self.common)
972 ret = list(self.common)
973 for n in self.rheads:
973 for n in self.rheads:
974 if n not in c:
974 if n not in c:
975 ret.append(n)
975 ret.append(n)
976 return ret
976 return ret
977 else:
977 else:
978 # We pulled a specific subset
978 # We pulled a specific subset
979 # sync on this subset
979 # sync on this subset
980 return self.heads
980 return self.heads
981
981
982 @util.propertycache
982 @util.propertycache
983 def canusebundle2(self):
983 def canusebundle2(self):
984 return _canusebundle2(self)
984 return _canusebundle2(self)
985
985
986 @util.propertycache
986 @util.propertycache
987 def remotebundle2caps(self):
987 def remotebundle2caps(self):
988 return bundle2.bundle2caps(self.remote)
988 return bundle2.bundle2caps(self.remote)
989
989
990 def gettransaction(self):
990 def gettransaction(self):
991 # deprecated; talk to trmanager directly
991 # deprecated; talk to trmanager directly
992 return self.trmanager.transaction()
992 return self.trmanager.transaction()
993
993
994 class transactionmanager(object):
994 class transactionmanager(object):
995 """An object to manage the life cycle of a transaction
995 """An object to manage the life cycle of a transaction
996
996
997 It creates the transaction on demand and calls the appropriate hooks when
997 It creates the transaction on demand and calls the appropriate hooks when
998 closing the transaction."""
998 closing the transaction."""
999 def __init__(self, repo, source, url):
999 def __init__(self, repo, source, url):
1000 self.repo = repo
1000 self.repo = repo
1001 self.source = source
1001 self.source = source
1002 self.url = url
1002 self.url = url
1003 self._tr = None
1003 self._tr = None
1004
1004
1005 def transaction(self):
1005 def transaction(self):
1006 """Return an open transaction object, constructing if necessary"""
1006 """Return an open transaction object, constructing if necessary"""
1007 if not self._tr:
1007 if not self._tr:
1008 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1008 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1009 self._tr = self.repo.transaction(trname)
1009 self._tr = self.repo.transaction(trname)
1010 self._tr.hookargs['source'] = self.source
1010 self._tr.hookargs['source'] = self.source
1011 self._tr.hookargs['url'] = self.url
1011 self._tr.hookargs['url'] = self.url
1012 return self._tr
1012 return self._tr
1013
1013
1014 def close(self):
1014 def close(self):
1015 """close transaction if created"""
1015 """close transaction if created"""
1016 if self._tr is not None:
1016 if self._tr is not None:
1017 self._tr.close()
1017 self._tr.close()
1018
1018
1019 def release(self):
1019 def release(self):
1020 """release transaction if created"""
1020 """release transaction if created"""
1021 if self._tr is not None:
1021 if self._tr is not None:
1022 self._tr.release()
1022 self._tr.release()
1023
1023
1024 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1024 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1025 streamclonerequested=None):
1025 streamclonerequested=None):
1026 """Fetch repository data from a remote.
1026 """Fetch repository data from a remote.
1027
1027
1028 This is the main function used to retrieve data from a remote repository.
1028 This is the main function used to retrieve data from a remote repository.
1029
1029
1030 ``repo`` is the local repository to clone into.
1030 ``repo`` is the local repository to clone into.
1031 ``remote`` is a peer instance.
1031 ``remote`` is a peer instance.
1032 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1032 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1033 default) means to pull everything from the remote.
1033 default) means to pull everything from the remote.
1034 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1034 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1035 default, all remote bookmarks are pulled.
1035 default, all remote bookmarks are pulled.
1036 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1036 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1037 initialization.
1037 initialization.
1038 ``streamclonerequested`` is a boolean indicating whether a "streaming
1038 ``streamclonerequested`` is a boolean indicating whether a "streaming
1039 clone" is requested. A "streaming clone" is essentially a raw file copy
1039 clone" is requested. A "streaming clone" is essentially a raw file copy
1040 of revlogs from the server. This only works when the local repository is
1040 of revlogs from the server. This only works when the local repository is
1041 empty. The default value of ``None`` means to respect the server
1041 empty. The default value of ``None`` means to respect the server
1042 configuration for preferring stream clones.
1042 configuration for preferring stream clones.
1043
1043
1044 Returns the ``pulloperation`` created for this pull.
1044 Returns the ``pulloperation`` created for this pull.
1045 """
1045 """
1046 if opargs is None:
1046 if opargs is None:
1047 opargs = {}
1047 opargs = {}
1048 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1048 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1049 streamclonerequested=streamclonerequested, **opargs)
1049 streamclonerequested=streamclonerequested, **opargs)
1050 if pullop.remote.local():
1050 if pullop.remote.local():
1051 missing = set(pullop.remote.requirements) - pullop.repo.supported
1051 missing = set(pullop.remote.requirements) - pullop.repo.supported
1052 if missing:
1052 if missing:
1053 msg = _("required features are not"
1053 msg = _("required features are not"
1054 " supported in the destination:"
1054 " supported in the destination:"
1055 " %s") % (', '.join(sorted(missing)))
1055 " %s") % (', '.join(sorted(missing)))
1056 raise error.Abort(msg)
1056 raise error.Abort(msg)
1057
1057
1058 lock = pullop.repo.lock()
1058 lock = pullop.repo.lock()
1059 try:
1059 try:
1060 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1060 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1061 streamclone.maybeperformlegacystreamclone(pullop)
1061 streamclone.maybeperformlegacystreamclone(pullop)
1062 # This should ideally be in _pullbundle2(). However, it needs to run
1062 # This should ideally be in _pullbundle2(). However, it needs to run
1063 # before discovery to avoid extra work.
1063 # before discovery to avoid extra work.
1064 _maybeapplyclonebundle(pullop)
1064 _maybeapplyclonebundle(pullop)
1065 _pulldiscovery(pullop)
1065 _pulldiscovery(pullop)
1066 if pullop.canusebundle2:
1066 if pullop.canusebundle2:
1067 _pullbundle2(pullop)
1067 _pullbundle2(pullop)
1068 _pullchangeset(pullop)
1068 _pullchangeset(pullop)
1069 _pullphase(pullop)
1069 _pullphase(pullop)
1070 _pullbookmarks(pullop)
1070 _pullbookmarks(pullop)
1071 _pullobsolete(pullop)
1071 _pullobsolete(pullop)
1072 pullop.trmanager.close()
1072 pullop.trmanager.close()
1073 finally:
1073 finally:
1074 pullop.trmanager.release()
1074 pullop.trmanager.release()
1075 lock.release()
1075 lock.release()
1076
1076
1077 return pullop
1077 return pullop
1078
1078
1079 # list of steps to perform discovery before pull
1079 # list of steps to perform discovery before pull
1080 pulldiscoveryorder = []
1080 pulldiscoveryorder = []
1081
1081
1082 # Mapping between step name and function
1082 # Mapping between step name and function
1083 #
1083 #
1084 # This exists to help extensions wrap steps if necessary
1084 # This exists to help extensions wrap steps if necessary
1085 pulldiscoverymapping = {}
1085 pulldiscoverymapping = {}
1086
1086
1087 def pulldiscovery(stepname):
1087 def pulldiscovery(stepname):
1088 """decorator for function performing discovery before pull
1088 """decorator for function performing discovery before pull
1089
1089
1090 The function is added to the step -> function mapping and appended to the
1090 The function is added to the step -> function mapping and appended to the
1091 list of steps. Beware that decorated function will be added in order (this
1091 list of steps. Beware that decorated function will be added in order (this
1092 may matter).
1092 may matter).
1093
1093
1094 You can only use this decorator for a new step, if you want to wrap a step
1094 You can only use this decorator for a new step, if you want to wrap a step
1095 from an extension, change the pulldiscovery dictionary directly."""
1095 from an extension, change the pulldiscovery dictionary directly."""
1096 def dec(func):
1096 def dec(func):
1097 assert stepname not in pulldiscoverymapping
1097 assert stepname not in pulldiscoverymapping
1098 pulldiscoverymapping[stepname] = func
1098 pulldiscoverymapping[stepname] = func
1099 pulldiscoveryorder.append(stepname)
1099 pulldiscoveryorder.append(stepname)
1100 return func
1100 return func
1101 return dec
1101 return dec
1102
1102
1103 def _pulldiscovery(pullop):
1103 def _pulldiscovery(pullop):
1104 """Run all discovery steps"""
1104 """Run all discovery steps"""
1105 for stepname in pulldiscoveryorder:
1105 for stepname in pulldiscoveryorder:
1106 step = pulldiscoverymapping[stepname]
1106 step = pulldiscoverymapping[stepname]
1107 step(pullop)
1107 step(pullop)
1108
1108
1109 @pulldiscovery('b1:bookmarks')
1109 @pulldiscovery('b1:bookmarks')
1110 def _pullbookmarkbundle1(pullop):
1110 def _pullbookmarkbundle1(pullop):
1111 """fetch bookmark data in bundle1 case
1111 """fetch bookmark data in bundle1 case
1112
1112
1113 If not using bundle2, we have to fetch bookmarks before changeset
1113 If not using bundle2, we have to fetch bookmarks before changeset
1114 discovery to reduce the chance and impact of race conditions."""
1114 discovery to reduce the chance and impact of race conditions."""
1115 if pullop.remotebookmarks is not None:
1115 if pullop.remotebookmarks is not None:
1116 return
1116 return
1117 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1117 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1118 # all known bundle2 servers now support listkeys, but lets be nice with
1118 # all known bundle2 servers now support listkeys, but lets be nice with
1119 # new implementation.
1119 # new implementation.
1120 return
1120 return
1121 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1121 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1122
1122
1123
1123
1124 @pulldiscovery('changegroup')
1124 @pulldiscovery('changegroup')
1125 def _pulldiscoverychangegroup(pullop):
1125 def _pulldiscoverychangegroup(pullop):
1126 """discovery phase for the pull
1126 """discovery phase for the pull
1127
1127
1128 Current handle changeset discovery only, will change handle all discovery
1128 Current handle changeset discovery only, will change handle all discovery
1129 at some point."""
1129 at some point."""
1130 tmp = discovery.findcommonincoming(pullop.repo,
1130 tmp = discovery.findcommonincoming(pullop.repo,
1131 pullop.remote,
1131 pullop.remote,
1132 heads=pullop.heads,
1132 heads=pullop.heads,
1133 force=pullop.force)
1133 force=pullop.force)
1134 common, fetch, rheads = tmp
1134 common, fetch, rheads = tmp
1135 nm = pullop.repo.unfiltered().changelog.nodemap
1135 nm = pullop.repo.unfiltered().changelog.nodemap
1136 if fetch and rheads:
1136 if fetch and rheads:
1137 # If a remote heads in filtered locally, lets drop it from the unknown
1137 # If a remote heads in filtered locally, lets drop it from the unknown
1138 # remote heads and put in back in common.
1138 # remote heads and put in back in common.
1139 #
1139 #
1140 # This is a hackish solution to catch most of "common but locally
1140 # This is a hackish solution to catch most of "common but locally
1141 # hidden situation". We do not performs discovery on unfiltered
1141 # hidden situation". We do not performs discovery on unfiltered
1142 # repository because it end up doing a pathological amount of round
1142 # repository because it end up doing a pathological amount of round
1143 # trip for w huge amount of changeset we do not care about.
1143 # trip for w huge amount of changeset we do not care about.
1144 #
1144 #
1145 # If a set of such "common but filtered" changeset exist on the server
1145 # If a set of such "common but filtered" changeset exist on the server
1146 # but are not including a remote heads, we'll not be able to detect it,
1146 # but are not including a remote heads, we'll not be able to detect it,
1147 scommon = set(common)
1147 scommon = set(common)
1148 filteredrheads = []
1148 filteredrheads = []
1149 for n in rheads:
1149 for n in rheads:
1150 if n in nm:
1150 if n in nm:
1151 if n not in scommon:
1151 if n not in scommon:
1152 common.append(n)
1152 common.append(n)
1153 else:
1153 else:
1154 filteredrheads.append(n)
1154 filteredrheads.append(n)
1155 if not filteredrheads:
1155 if not filteredrheads:
1156 fetch = []
1156 fetch = []
1157 rheads = filteredrheads
1157 rheads = filteredrheads
1158 pullop.common = common
1158 pullop.common = common
1159 pullop.fetch = fetch
1159 pullop.fetch = fetch
1160 pullop.rheads = rheads
1160 pullop.rheads = rheads
1161
1161
1162 def _pullbundle2(pullop):
1162 def _pullbundle2(pullop):
1163 """pull data using bundle2
1163 """pull data using bundle2
1164
1164
1165 For now, the only supported data are changegroup."""
1165 For now, the only supported data are changegroup."""
1166 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1166 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1167
1167
1168 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1168 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1169
1169
1170 # pulling changegroup
1170 # pulling changegroup
1171 pullop.stepsdone.add('changegroup')
1171 pullop.stepsdone.add('changegroup')
1172
1172
1173 kwargs['common'] = pullop.common
1173 kwargs['common'] = pullop.common
1174 kwargs['heads'] = pullop.heads or pullop.rheads
1174 kwargs['heads'] = pullop.heads or pullop.rheads
1175 kwargs['cg'] = pullop.fetch
1175 kwargs['cg'] = pullop.fetch
1176 if 'listkeys' in pullop.remotebundle2caps:
1176 if 'listkeys' in pullop.remotebundle2caps:
1177 kwargs['listkeys'] = ['phase']
1177 kwargs['listkeys'] = ['phase']
1178 if pullop.remotebookmarks is None:
1178 if pullop.remotebookmarks is None:
1179 # make sure to always includes bookmark data when migrating
1179 # make sure to always includes bookmark data when migrating
1180 # `hg incoming --bundle` to using this function.
1180 # `hg incoming --bundle` to using this function.
1181 kwargs['listkeys'].append('bookmarks')
1181 kwargs['listkeys'].append('bookmarks')
1182
1182
1183 # If this is a full pull / clone and the server supports the clone bundles
1183 # If this is a full pull / clone and the server supports the clone bundles
1184 # feature, tell the server whether we attempted a clone bundle. The
1184 # feature, tell the server whether we attempted a clone bundle. The
1185 # presence of this flag indicates the client supports clone bundles. This
1185 # presence of this flag indicates the client supports clone bundles. This
1186 # will enable the server to treat clients that support clone bundles
1186 # will enable the server to treat clients that support clone bundles
1187 # differently from those that don't.
1187 # differently from those that don't.
1188 if (pullop.remote.capable('clonebundles')
1188 if (pullop.remote.capable('clonebundles')
1189 and pullop.heads is None and list(pullop.common) == [nullid]):
1189 and pullop.heads is None and list(pullop.common) == [nullid]):
1190 kwargs['cbattempted'] = pullop.clonebundleattempted
1190 kwargs['cbattempted'] = pullop.clonebundleattempted
1191
1191
1192 if streaming:
1192 if streaming:
1193 pullop.repo.ui.status(_('streaming all changes\n'))
1193 pullop.repo.ui.status(_('streaming all changes\n'))
1194 elif not pullop.fetch:
1194 elif not pullop.fetch:
1195 pullop.repo.ui.status(_("no changes found\n"))
1195 pullop.repo.ui.status(_("no changes found\n"))
1196 pullop.cgresult = 0
1196 pullop.cgresult = 0
1197 else:
1197 else:
1198 if pullop.heads is None and list(pullop.common) == [nullid]:
1198 if pullop.heads is None and list(pullop.common) == [nullid]:
1199 pullop.repo.ui.status(_("requesting all changes\n"))
1199 pullop.repo.ui.status(_("requesting all changes\n"))
1200 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1200 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1201 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1201 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1202 if obsolete.commonversion(remoteversions) is not None:
1202 if obsolete.commonversion(remoteversions) is not None:
1203 kwargs['obsmarkers'] = True
1203 kwargs['obsmarkers'] = True
1204 pullop.stepsdone.add('obsmarkers')
1204 pullop.stepsdone.add('obsmarkers')
1205 _pullbundle2extraprepare(pullop, kwargs)
1205 _pullbundle2extraprepare(pullop, kwargs)
1206 bundle = pullop.remote.getbundle('pull', **kwargs)
1206 bundle = pullop.remote.getbundle('pull', **kwargs)
1207 try:
1207 try:
1208 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1208 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1209 except error.BundleValueError as exc:
1209 except error.BundleValueError as exc:
1210 raise error.Abort('missing support for %s' % exc)
1210 raise error.Abort('missing support for %s' % exc)
1211
1211
1212 if pullop.fetch:
1212 if pullop.fetch:
1213 results = [cg['return'] for cg in op.records['changegroup']]
1213 results = [cg['return'] for cg in op.records['changegroup']]
1214 pullop.cgresult = changegroup.combineresults(results)
1214 pullop.cgresult = changegroup.combineresults(results)
1215
1215
1216 # processing phases change
1216 # processing phases change
1217 for namespace, value in op.records['listkeys']:
1217 for namespace, value in op.records['listkeys']:
1218 if namespace == 'phases':
1218 if namespace == 'phases':
1219 _pullapplyphases(pullop, value)
1219 _pullapplyphases(pullop, value)
1220
1220
1221 # processing bookmark update
1221 # processing bookmark update
1222 for namespace, value in op.records['listkeys']:
1222 for namespace, value in op.records['listkeys']:
1223 if namespace == 'bookmarks':
1223 if namespace == 'bookmarks':
1224 pullop.remotebookmarks = value
1224 pullop.remotebookmarks = value
1225
1225
1226 # bookmark data were either already there or pulled in the bundle
1226 # bookmark data were either already there or pulled in the bundle
1227 if pullop.remotebookmarks is not None:
1227 if pullop.remotebookmarks is not None:
1228 _pullbookmarks(pullop)
1228 _pullbookmarks(pullop)
1229
1229
1230 def _pullbundle2extraprepare(pullop, kwargs):
1230 def _pullbundle2extraprepare(pullop, kwargs):
1231 """hook function so that extensions can extend the getbundle call"""
1231 """hook function so that extensions can extend the getbundle call"""
1232 pass
1232 pass
1233
1233
1234 def _pullchangeset(pullop):
1234 def _pullchangeset(pullop):
1235 """pull changeset from unbundle into the local repo"""
1235 """pull changeset from unbundle into the local repo"""
1236 # We delay the open of the transaction as late as possible so we
1236 # We delay the open of the transaction as late as possible so we
1237 # don't open transaction for nothing or you break future useful
1237 # don't open transaction for nothing or you break future useful
1238 # rollback call
1238 # rollback call
1239 if 'changegroup' in pullop.stepsdone:
1239 if 'changegroup' in pullop.stepsdone:
1240 return
1240 return
1241 pullop.stepsdone.add('changegroup')
1241 pullop.stepsdone.add('changegroup')
1242 if not pullop.fetch:
1242 if not pullop.fetch:
1243 pullop.repo.ui.status(_("no changes found\n"))
1243 pullop.repo.ui.status(_("no changes found\n"))
1244 pullop.cgresult = 0
1244 pullop.cgresult = 0
1245 return
1245 return
1246 pullop.gettransaction()
1246 pullop.gettransaction()
1247 if pullop.heads is None and list(pullop.common) == [nullid]:
1247 if pullop.heads is None and list(pullop.common) == [nullid]:
1248 pullop.repo.ui.status(_("requesting all changes\n"))
1248 pullop.repo.ui.status(_("requesting all changes\n"))
1249 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1249 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1250 # issue1320, avoid a race if remote changed after discovery
1250 # issue1320, avoid a race if remote changed after discovery
1251 pullop.heads = pullop.rheads
1251 pullop.heads = pullop.rheads
1252
1252
1253 if pullop.remote.capable('getbundle'):
1253 if pullop.remote.capable('getbundle'):
1254 # TODO: get bundlecaps from remote
1254 # TODO: get bundlecaps from remote
1255 cg = pullop.remote.getbundle('pull', common=pullop.common,
1255 cg = pullop.remote.getbundle('pull', common=pullop.common,
1256 heads=pullop.heads or pullop.rheads)
1256 heads=pullop.heads or pullop.rheads)
1257 elif pullop.heads is None:
1257 elif pullop.heads is None:
1258 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1258 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1259 elif not pullop.remote.capable('changegroupsubset'):
1259 elif not pullop.remote.capable('changegroupsubset'):
1260 raise error.Abort(_("partial pull cannot be done because "
1260 raise error.Abort(_("partial pull cannot be done because "
1261 "other repository doesn't support "
1261 "other repository doesn't support "
1262 "changegroupsubset."))
1262 "changegroupsubset."))
1263 else:
1263 else:
1264 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1264 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1265 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1265 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1266 pullop.remote.url())
1267
1266
1268 def _pullphase(pullop):
1267 def _pullphase(pullop):
1269 # Get remote phases data from remote
1268 # Get remote phases data from remote
1270 if 'phases' in pullop.stepsdone:
1269 if 'phases' in pullop.stepsdone:
1271 return
1270 return
1272 remotephases = pullop.remote.listkeys('phases')
1271 remotephases = pullop.remote.listkeys('phases')
1273 _pullapplyphases(pullop, remotephases)
1272 _pullapplyphases(pullop, remotephases)
1274
1273
1275 def _pullapplyphases(pullop, remotephases):
1274 def _pullapplyphases(pullop, remotephases):
1276 """apply phase movement from observed remote state"""
1275 """apply phase movement from observed remote state"""
1277 if 'phases' in pullop.stepsdone:
1276 if 'phases' in pullop.stepsdone:
1278 return
1277 return
1279 pullop.stepsdone.add('phases')
1278 pullop.stepsdone.add('phases')
1280 publishing = bool(remotephases.get('publishing', False))
1279 publishing = bool(remotephases.get('publishing', False))
1281 if remotephases and not publishing:
1280 if remotephases and not publishing:
1282 # remote is new and unpublishing
1281 # remote is new and unpublishing
1283 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1282 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1284 pullop.pulledsubset,
1283 pullop.pulledsubset,
1285 remotephases)
1284 remotephases)
1286 dheads = pullop.pulledsubset
1285 dheads = pullop.pulledsubset
1287 else:
1286 else:
1288 # Remote is old or publishing all common changesets
1287 # Remote is old or publishing all common changesets
1289 # should be seen as public
1288 # should be seen as public
1290 pheads = pullop.pulledsubset
1289 pheads = pullop.pulledsubset
1291 dheads = []
1290 dheads = []
1292 unfi = pullop.repo.unfiltered()
1291 unfi = pullop.repo.unfiltered()
1293 phase = unfi._phasecache.phase
1292 phase = unfi._phasecache.phase
1294 rev = unfi.changelog.nodemap.get
1293 rev = unfi.changelog.nodemap.get
1295 public = phases.public
1294 public = phases.public
1296 draft = phases.draft
1295 draft = phases.draft
1297
1296
1298 # exclude changesets already public locally and update the others
1297 # exclude changesets already public locally and update the others
1299 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1298 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1300 if pheads:
1299 if pheads:
1301 tr = pullop.gettransaction()
1300 tr = pullop.gettransaction()
1302 phases.advanceboundary(pullop.repo, tr, public, pheads)
1301 phases.advanceboundary(pullop.repo, tr, public, pheads)
1303
1302
1304 # exclude changesets already draft locally and update the others
1303 # exclude changesets already draft locally and update the others
1305 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1304 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1306 if dheads:
1305 if dheads:
1307 tr = pullop.gettransaction()
1306 tr = pullop.gettransaction()
1308 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1307 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1309
1308
1310 def _pullbookmarks(pullop):
1309 def _pullbookmarks(pullop):
1311 """process the remote bookmark information to update the local one"""
1310 """process the remote bookmark information to update the local one"""
1312 if 'bookmarks' in pullop.stepsdone:
1311 if 'bookmarks' in pullop.stepsdone:
1313 return
1312 return
1314 pullop.stepsdone.add('bookmarks')
1313 pullop.stepsdone.add('bookmarks')
1315 repo = pullop.repo
1314 repo = pullop.repo
1316 remotebookmarks = pullop.remotebookmarks
1315 remotebookmarks = pullop.remotebookmarks
1317 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1316 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1318 pullop.remote.url(),
1317 pullop.remote.url(),
1319 pullop.gettransaction,
1318 pullop.gettransaction,
1320 explicit=pullop.explicitbookmarks)
1319 explicit=pullop.explicitbookmarks)
1321
1320
1322 def _pullobsolete(pullop):
1321 def _pullobsolete(pullop):
1323 """utility function to pull obsolete markers from a remote
1322 """utility function to pull obsolete markers from a remote
1324
1323
1325 The `gettransaction` is function that return the pull transaction, creating
1324 The `gettransaction` is function that return the pull transaction, creating
1326 one if necessary. We return the transaction to inform the calling code that
1325 one if necessary. We return the transaction to inform the calling code that
1327 a new transaction have been created (when applicable).
1326 a new transaction have been created (when applicable).
1328
1327
1329 Exists mostly to allow overriding for experimentation purpose"""
1328 Exists mostly to allow overriding for experimentation purpose"""
1330 if 'obsmarkers' in pullop.stepsdone:
1329 if 'obsmarkers' in pullop.stepsdone:
1331 return
1330 return
1332 pullop.stepsdone.add('obsmarkers')
1331 pullop.stepsdone.add('obsmarkers')
1333 tr = None
1332 tr = None
1334 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1333 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1335 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1334 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1336 remoteobs = pullop.remote.listkeys('obsolete')
1335 remoteobs = pullop.remote.listkeys('obsolete')
1337 if 'dump0' in remoteobs:
1336 if 'dump0' in remoteobs:
1338 tr = pullop.gettransaction()
1337 tr = pullop.gettransaction()
1339 for key in sorted(remoteobs, reverse=True):
1338 for key in sorted(remoteobs, reverse=True):
1340 if key.startswith('dump'):
1339 if key.startswith('dump'):
1341 data = base85.b85decode(remoteobs[key])
1340 data = base85.b85decode(remoteobs[key])
1342 pullop.repo.obsstore.mergemarkers(tr, data)
1341 pullop.repo.obsstore.mergemarkers(tr, data)
1343 pullop.repo.invalidatevolatilesets()
1342 pullop.repo.invalidatevolatilesets()
1344 return tr
1343 return tr
1345
1344
1346 def caps20to10(repo):
1345 def caps20to10(repo):
1347 """return a set with appropriate options to use bundle20 during getbundle"""
1346 """return a set with appropriate options to use bundle20 during getbundle"""
1348 caps = set(['HG20'])
1347 caps = set(['HG20'])
1349 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1348 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1350 caps.add('bundle2=' + urllib.quote(capsblob))
1349 caps.add('bundle2=' + urllib.quote(capsblob))
1351 return caps
1350 return caps
1352
1351
1353 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1352 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1354 getbundle2partsorder = []
1353 getbundle2partsorder = []
1355
1354
1356 # Mapping between step name and function
1355 # Mapping between step name and function
1357 #
1356 #
1358 # This exists to help extensions wrap steps if necessary
1357 # This exists to help extensions wrap steps if necessary
1359 getbundle2partsmapping = {}
1358 getbundle2partsmapping = {}
1360
1359
1361 def getbundle2partsgenerator(stepname, idx=None):
1360 def getbundle2partsgenerator(stepname, idx=None):
1362 """decorator for function generating bundle2 part for getbundle
1361 """decorator for function generating bundle2 part for getbundle
1363
1362
1364 The function is added to the step -> function mapping and appended to the
1363 The function is added to the step -> function mapping and appended to the
1365 list of steps. Beware that decorated functions will be added in order
1364 list of steps. Beware that decorated functions will be added in order
1366 (this may matter).
1365 (this may matter).
1367
1366
1368 You can only use this decorator for new steps, if you want to wrap a step
1367 You can only use this decorator for new steps, if you want to wrap a step
1369 from an extension, attack the getbundle2partsmapping dictionary directly."""
1368 from an extension, attack the getbundle2partsmapping dictionary directly."""
1370 def dec(func):
1369 def dec(func):
1371 assert stepname not in getbundle2partsmapping
1370 assert stepname not in getbundle2partsmapping
1372 getbundle2partsmapping[stepname] = func
1371 getbundle2partsmapping[stepname] = func
1373 if idx is None:
1372 if idx is None:
1374 getbundle2partsorder.append(stepname)
1373 getbundle2partsorder.append(stepname)
1375 else:
1374 else:
1376 getbundle2partsorder.insert(idx, stepname)
1375 getbundle2partsorder.insert(idx, stepname)
1377 return func
1376 return func
1378 return dec
1377 return dec
1379
1378
1380 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1379 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1381 **kwargs):
1380 **kwargs):
1382 """return a full bundle (with potentially multiple kind of parts)
1381 """return a full bundle (with potentially multiple kind of parts)
1383
1382
1384 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1383 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1385 passed. For now, the bundle can contain only changegroup, but this will
1384 passed. For now, the bundle can contain only changegroup, but this will
1386 changes when more part type will be available for bundle2.
1385 changes when more part type will be available for bundle2.
1387
1386
1388 This is different from changegroup.getchangegroup that only returns an HG10
1387 This is different from changegroup.getchangegroup that only returns an HG10
1389 changegroup bundle. They may eventually get reunited in the future when we
1388 changegroup bundle. They may eventually get reunited in the future when we
1390 have a clearer idea of the API we what to query different data.
1389 have a clearer idea of the API we what to query different data.
1391
1390
1392 The implementation is at a very early stage and will get massive rework
1391 The implementation is at a very early stage and will get massive rework
1393 when the API of bundle is refined.
1392 when the API of bundle is refined.
1394 """
1393 """
1395 # bundle10 case
1394 # bundle10 case
1396 usebundle2 = False
1395 usebundle2 = False
1397 if bundlecaps is not None:
1396 if bundlecaps is not None:
1398 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1397 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1399 if not usebundle2:
1398 if not usebundle2:
1400 if bundlecaps and not kwargs.get('cg', True):
1399 if bundlecaps and not kwargs.get('cg', True):
1401 raise ValueError(_('request for bundle10 must include changegroup'))
1400 raise ValueError(_('request for bundle10 must include changegroup'))
1402
1401
1403 if kwargs:
1402 if kwargs:
1404 raise ValueError(_('unsupported getbundle arguments: %s')
1403 raise ValueError(_('unsupported getbundle arguments: %s')
1405 % ', '.join(sorted(kwargs.keys())))
1404 % ', '.join(sorted(kwargs.keys())))
1406 return changegroup.getchangegroup(repo, source, heads=heads,
1405 return changegroup.getchangegroup(repo, source, heads=heads,
1407 common=common, bundlecaps=bundlecaps)
1406 common=common, bundlecaps=bundlecaps)
1408
1407
1409 # bundle20 case
1408 # bundle20 case
1410 b2caps = {}
1409 b2caps = {}
1411 for bcaps in bundlecaps:
1410 for bcaps in bundlecaps:
1412 if bcaps.startswith('bundle2='):
1411 if bcaps.startswith('bundle2='):
1413 blob = urllib.unquote(bcaps[len('bundle2='):])
1412 blob = urllib.unquote(bcaps[len('bundle2='):])
1414 b2caps.update(bundle2.decodecaps(blob))
1413 b2caps.update(bundle2.decodecaps(blob))
1415 bundler = bundle2.bundle20(repo.ui, b2caps)
1414 bundler = bundle2.bundle20(repo.ui, b2caps)
1416
1415
1417 kwargs['heads'] = heads
1416 kwargs['heads'] = heads
1418 kwargs['common'] = common
1417 kwargs['common'] = common
1419
1418
1420 for name in getbundle2partsorder:
1419 for name in getbundle2partsorder:
1421 func = getbundle2partsmapping[name]
1420 func = getbundle2partsmapping[name]
1422 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1421 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1423 **kwargs)
1422 **kwargs)
1424
1423
1425 return util.chunkbuffer(bundler.getchunks())
1424 return util.chunkbuffer(bundler.getchunks())
1426
1425
1427 @getbundle2partsgenerator('changegroup')
1426 @getbundle2partsgenerator('changegroup')
1428 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1427 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1429 b2caps=None, heads=None, common=None, **kwargs):
1428 b2caps=None, heads=None, common=None, **kwargs):
1430 """add a changegroup part to the requested bundle"""
1429 """add a changegroup part to the requested bundle"""
1431 cg = None
1430 cg = None
1432 if kwargs.get('cg', True):
1431 if kwargs.get('cg', True):
1433 # build changegroup bundle here.
1432 # build changegroup bundle here.
1434 version = None
1433 version = None
1435 cgversions = b2caps.get('changegroup')
1434 cgversions = b2caps.get('changegroup')
1436 getcgkwargs = {}
1435 getcgkwargs = {}
1437 if cgversions: # 3.1 and 3.2 ship with an empty value
1436 if cgversions: # 3.1 and 3.2 ship with an empty value
1438 cgversions = [v for v in cgversions if v in changegroup.packermap]
1437 cgversions = [v for v in cgversions if v in changegroup.packermap]
1439 if not cgversions:
1438 if not cgversions:
1440 raise ValueError(_('no common changegroup version'))
1439 raise ValueError(_('no common changegroup version'))
1441 version = getcgkwargs['version'] = max(cgversions)
1440 version = getcgkwargs['version'] = max(cgversions)
1442 outgoing = changegroup.computeoutgoing(repo, heads, common)
1441 outgoing = changegroup.computeoutgoing(repo, heads, common)
1443 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1442 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1444 bundlecaps=bundlecaps,
1443 bundlecaps=bundlecaps,
1445 **getcgkwargs)
1444 **getcgkwargs)
1446
1445
1447 if cg:
1446 if cg:
1448 part = bundler.newpart('changegroup', data=cg)
1447 part = bundler.newpart('changegroup', data=cg)
1449 if version is not None:
1448 if version is not None:
1450 part.addparam('version', version)
1449 part.addparam('version', version)
1451 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1450 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1452
1451
1453 @getbundle2partsgenerator('listkeys')
1452 @getbundle2partsgenerator('listkeys')
1454 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1453 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1455 b2caps=None, **kwargs):
1454 b2caps=None, **kwargs):
1456 """add parts containing listkeys namespaces to the requested bundle"""
1455 """add parts containing listkeys namespaces to the requested bundle"""
1457 listkeys = kwargs.get('listkeys', ())
1456 listkeys = kwargs.get('listkeys', ())
1458 for namespace in listkeys:
1457 for namespace in listkeys:
1459 part = bundler.newpart('listkeys')
1458 part = bundler.newpart('listkeys')
1460 part.addparam('namespace', namespace)
1459 part.addparam('namespace', namespace)
1461 keys = repo.listkeys(namespace).items()
1460 keys = repo.listkeys(namespace).items()
1462 part.data = pushkey.encodekeys(keys)
1461 part.data = pushkey.encodekeys(keys)
1463
1462
1464 @getbundle2partsgenerator('obsmarkers')
1463 @getbundle2partsgenerator('obsmarkers')
1465 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1464 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1466 b2caps=None, heads=None, **kwargs):
1465 b2caps=None, heads=None, **kwargs):
1467 """add an obsolescence markers part to the requested bundle"""
1466 """add an obsolescence markers part to the requested bundle"""
1468 if kwargs.get('obsmarkers', False):
1467 if kwargs.get('obsmarkers', False):
1469 if heads is None:
1468 if heads is None:
1470 heads = repo.heads()
1469 heads = repo.heads()
1471 subset = [c.node() for c in repo.set('::%ln', heads)]
1470 subset = [c.node() for c in repo.set('::%ln', heads)]
1472 markers = repo.obsstore.relevantmarkers(subset)
1471 markers = repo.obsstore.relevantmarkers(subset)
1473 markers = sorted(markers)
1472 markers = sorted(markers)
1474 buildobsmarkerspart(bundler, markers)
1473 buildobsmarkerspart(bundler, markers)
1475
1474
1476 @getbundle2partsgenerator('hgtagsfnodes')
1475 @getbundle2partsgenerator('hgtagsfnodes')
1477 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1476 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1478 b2caps=None, heads=None, common=None,
1477 b2caps=None, heads=None, common=None,
1479 **kwargs):
1478 **kwargs):
1480 """Transfer the .hgtags filenodes mapping.
1479 """Transfer the .hgtags filenodes mapping.
1481
1480
1482 Only values for heads in this bundle will be transferred.
1481 Only values for heads in this bundle will be transferred.
1483
1482
1484 The part data consists of pairs of 20 byte changeset node and .hgtags
1483 The part data consists of pairs of 20 byte changeset node and .hgtags
1485 filenodes raw values.
1484 filenodes raw values.
1486 """
1485 """
1487 # Don't send unless:
1486 # Don't send unless:
1488 # - changeset are being exchanged,
1487 # - changeset are being exchanged,
1489 # - the client supports it.
1488 # - the client supports it.
1490 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1489 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1491 return
1490 return
1492
1491
1493 outgoing = changegroup.computeoutgoing(repo, heads, common)
1492 outgoing = changegroup.computeoutgoing(repo, heads, common)
1494
1493
1495 if not outgoing.missingheads:
1494 if not outgoing.missingheads:
1496 return
1495 return
1497
1496
1498 cache = tags.hgtagsfnodescache(repo.unfiltered())
1497 cache = tags.hgtagsfnodescache(repo.unfiltered())
1499 chunks = []
1498 chunks = []
1500
1499
1501 # .hgtags fnodes are only relevant for head changesets. While we could
1500 # .hgtags fnodes are only relevant for head changesets. While we could
1502 # transfer values for all known nodes, there will likely be little to
1501 # transfer values for all known nodes, there will likely be little to
1503 # no benefit.
1502 # no benefit.
1504 #
1503 #
1505 # We don't bother using a generator to produce output data because
1504 # We don't bother using a generator to produce output data because
1506 # a) we only have 40 bytes per head and even esoteric numbers of heads
1505 # a) we only have 40 bytes per head and even esoteric numbers of heads
1507 # consume little memory (1M heads is 40MB) b) we don't want to send the
1506 # consume little memory (1M heads is 40MB) b) we don't want to send the
1508 # part if we don't have entries and knowing if we have entries requires
1507 # part if we don't have entries and knowing if we have entries requires
1509 # cache lookups.
1508 # cache lookups.
1510 for node in outgoing.missingheads:
1509 for node in outgoing.missingheads:
1511 # Don't compute missing, as this may slow down serving.
1510 # Don't compute missing, as this may slow down serving.
1512 fnode = cache.getfnode(node, computemissing=False)
1511 fnode = cache.getfnode(node, computemissing=False)
1513 if fnode is not None:
1512 if fnode is not None:
1514 chunks.extend([node, fnode])
1513 chunks.extend([node, fnode])
1515
1514
1516 if chunks:
1515 if chunks:
1517 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1516 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1518
1517
1519 def check_heads(repo, their_heads, context):
1518 def check_heads(repo, their_heads, context):
1520 """check if the heads of a repo have been modified
1519 """check if the heads of a repo have been modified
1521
1520
1522 Used by peer for unbundling.
1521 Used by peer for unbundling.
1523 """
1522 """
1524 heads = repo.heads()
1523 heads = repo.heads()
1525 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1524 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1526 if not (their_heads == ['force'] or their_heads == heads or
1525 if not (their_heads == ['force'] or their_heads == heads or
1527 their_heads == ['hashed', heads_hash]):
1526 their_heads == ['hashed', heads_hash]):
1528 # someone else committed/pushed/unbundled while we
1527 # someone else committed/pushed/unbundled while we
1529 # were transferring data
1528 # were transferring data
1530 raise error.PushRaced('repository changed while %s - '
1529 raise error.PushRaced('repository changed while %s - '
1531 'please try again' % context)
1530 'please try again' % context)
1532
1531
1533 def unbundle(repo, cg, heads, source, url):
1532 def unbundle(repo, cg, heads, source, url):
1534 """Apply a bundle to a repo.
1533 """Apply a bundle to a repo.
1535
1534
1536 this function makes sure the repo is locked during the application and have
1535 this function makes sure the repo is locked during the application and have
1537 mechanism to check that no push race occurred between the creation of the
1536 mechanism to check that no push race occurred between the creation of the
1538 bundle and its application.
1537 bundle and its application.
1539
1538
1540 If the push was raced as PushRaced exception is raised."""
1539 If the push was raced as PushRaced exception is raised."""
1541 r = 0
1540 r = 0
1542 # need a transaction when processing a bundle2 stream
1541 # need a transaction when processing a bundle2 stream
1543 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1542 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1544 lockandtr = [None, None, None]
1543 lockandtr = [None, None, None]
1545 recordout = None
1544 recordout = None
1546 # quick fix for output mismatch with bundle2 in 3.4
1545 # quick fix for output mismatch with bundle2 in 3.4
1547 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1546 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1548 False)
1547 False)
1549 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1548 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1550 captureoutput = True
1549 captureoutput = True
1551 try:
1550 try:
1552 check_heads(repo, heads, 'uploading changes')
1551 check_heads(repo, heads, 'uploading changes')
1553 # push can proceed
1552 # push can proceed
1554 if util.safehasattr(cg, 'params'):
1553 if util.safehasattr(cg, 'params'):
1555 r = None
1554 r = None
1556 try:
1555 try:
1557 def gettransaction():
1556 def gettransaction():
1558 if not lockandtr[2]:
1557 if not lockandtr[2]:
1559 lockandtr[0] = repo.wlock()
1558 lockandtr[0] = repo.wlock()
1560 lockandtr[1] = repo.lock()
1559 lockandtr[1] = repo.lock()
1561 lockandtr[2] = repo.transaction(source)
1560 lockandtr[2] = repo.transaction(source)
1562 lockandtr[2].hookargs['source'] = source
1561 lockandtr[2].hookargs['source'] = source
1563 lockandtr[2].hookargs['url'] = url
1562 lockandtr[2].hookargs['url'] = url
1564 lockandtr[2].hookargs['bundle2'] = '1'
1563 lockandtr[2].hookargs['bundle2'] = '1'
1565 return lockandtr[2]
1564 return lockandtr[2]
1566
1565
1567 # Do greedy locking by default until we're satisfied with lazy
1566 # Do greedy locking by default until we're satisfied with lazy
1568 # locking.
1567 # locking.
1569 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1568 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1570 gettransaction()
1569 gettransaction()
1571
1570
1572 op = bundle2.bundleoperation(repo, gettransaction,
1571 op = bundle2.bundleoperation(repo, gettransaction,
1573 captureoutput=captureoutput)
1572 captureoutput=captureoutput)
1574 try:
1573 try:
1575 op = bundle2.processbundle(repo, cg, op=op)
1574 op = bundle2.processbundle(repo, cg, op=op)
1576 finally:
1575 finally:
1577 r = op.reply
1576 r = op.reply
1578 if captureoutput and r is not None:
1577 if captureoutput and r is not None:
1579 repo.ui.pushbuffer(error=True, subproc=True)
1578 repo.ui.pushbuffer(error=True, subproc=True)
1580 def recordout(output):
1579 def recordout(output):
1581 r.newpart('output', data=output, mandatory=False)
1580 r.newpart('output', data=output, mandatory=False)
1582 if lockandtr[2] is not None:
1581 if lockandtr[2] is not None:
1583 lockandtr[2].close()
1582 lockandtr[2].close()
1584 except BaseException as exc:
1583 except BaseException as exc:
1585 exc.duringunbundle2 = True
1584 exc.duringunbundle2 = True
1586 if captureoutput and r is not None:
1585 if captureoutput and r is not None:
1587 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1586 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1588 def recordout(output):
1587 def recordout(output):
1589 part = bundle2.bundlepart('output', data=output,
1588 part = bundle2.bundlepart('output', data=output,
1590 mandatory=False)
1589 mandatory=False)
1591 parts.append(part)
1590 parts.append(part)
1592 raise
1591 raise
1593 else:
1592 else:
1594 lockandtr[1] = repo.lock()
1593 lockandtr[1] = repo.lock()
1595 r = changegroup.addchangegroup(repo, cg, source, url)
1594 r = cg.apply(repo, source, url)
1596 finally:
1595 finally:
1597 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1596 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1598 if recordout is not None:
1597 if recordout is not None:
1599 recordout(repo.ui.popbuffer())
1598 recordout(repo.ui.popbuffer())
1600 return r
1599 return r
1601
1600
1602 def _maybeapplyclonebundle(pullop):
1601 def _maybeapplyclonebundle(pullop):
1603 """Apply a clone bundle from a remote, if possible."""
1602 """Apply a clone bundle from a remote, if possible."""
1604
1603
1605 repo = pullop.repo
1604 repo = pullop.repo
1606 remote = pullop.remote
1605 remote = pullop.remote
1607
1606
1608 if not repo.ui.configbool('experimental', 'clonebundles', False):
1607 if not repo.ui.configbool('experimental', 'clonebundles', False):
1609 return
1608 return
1610
1609
1611 if pullop.heads:
1610 if pullop.heads:
1612 return
1611 return
1613
1612
1614 if not remote.capable('clonebundles'):
1613 if not remote.capable('clonebundles'):
1615 return
1614 return
1616
1615
1617 res = remote._call('clonebundles')
1616 res = remote._call('clonebundles')
1618
1617
1619 # If we call the wire protocol command, that's good enough to record the
1618 # If we call the wire protocol command, that's good enough to record the
1620 # attempt.
1619 # attempt.
1621 pullop.clonebundleattempted = True
1620 pullop.clonebundleattempted = True
1622
1621
1623 entries = parseclonebundlesmanifest(repo, res)
1622 entries = parseclonebundlesmanifest(repo, res)
1624 if not entries:
1623 if not entries:
1625 repo.ui.note(_('no clone bundles available on remote; '
1624 repo.ui.note(_('no clone bundles available on remote; '
1626 'falling back to regular clone\n'))
1625 'falling back to regular clone\n'))
1627 return
1626 return
1628
1627
1629 entries = filterclonebundleentries(repo, entries)
1628 entries = filterclonebundleentries(repo, entries)
1630 if not entries:
1629 if not entries:
1631 # There is a thundering herd concern here. However, if a server
1630 # There is a thundering herd concern here. However, if a server
1632 # operator doesn't advertise bundles appropriate for its clients,
1631 # operator doesn't advertise bundles appropriate for its clients,
1633 # they deserve what's coming. Furthermore, from a client's
1632 # they deserve what's coming. Furthermore, from a client's
1634 # perspective, no automatic fallback would mean not being able to
1633 # perspective, no automatic fallback would mean not being able to
1635 # clone!
1634 # clone!
1636 repo.ui.warn(_('no compatible clone bundles available on server; '
1635 repo.ui.warn(_('no compatible clone bundles available on server; '
1637 'falling back to regular clone\n'))
1636 'falling back to regular clone\n'))
1638 repo.ui.warn(_('(you may want to report this to the server '
1637 repo.ui.warn(_('(you may want to report this to the server '
1639 'operator)\n'))
1638 'operator)\n'))
1640 return
1639 return
1641
1640
1642 entries = sortclonebundleentries(repo.ui, entries)
1641 entries = sortclonebundleentries(repo.ui, entries)
1643
1642
1644 url = entries[0]['URL']
1643 url = entries[0]['URL']
1645 repo.ui.status(_('applying clone bundle from %s\n') % url)
1644 repo.ui.status(_('applying clone bundle from %s\n') % url)
1646 if trypullbundlefromurl(repo.ui, repo, url):
1645 if trypullbundlefromurl(repo.ui, repo, url):
1647 repo.ui.status(_('finished applying clone bundle\n'))
1646 repo.ui.status(_('finished applying clone bundle\n'))
1648 # Bundle failed.
1647 # Bundle failed.
1649 #
1648 #
1650 # We abort by default to avoid the thundering herd of
1649 # We abort by default to avoid the thundering herd of
1651 # clients flooding a server that was expecting expensive
1650 # clients flooding a server that was expecting expensive
1652 # clone load to be offloaded.
1651 # clone load to be offloaded.
1653 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1652 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1654 repo.ui.warn(_('falling back to normal clone\n'))
1653 repo.ui.warn(_('falling back to normal clone\n'))
1655 else:
1654 else:
1656 raise error.Abort(_('error applying bundle'),
1655 raise error.Abort(_('error applying bundle'),
1657 hint=_('if this error persists, consider contacting '
1656 hint=_('if this error persists, consider contacting '
1658 'the server operator or disable clone '
1657 'the server operator or disable clone '
1659 'bundles via '
1658 'bundles via '
1660 '"--config experimental.clonebundles=false"'))
1659 '"--config experimental.clonebundles=false"'))
1661
1660
1662 def parseclonebundlesmanifest(repo, s):
1661 def parseclonebundlesmanifest(repo, s):
1663 """Parses the raw text of a clone bundles manifest.
1662 """Parses the raw text of a clone bundles manifest.
1664
1663
1665 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1664 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1666 to the URL and other keys are the attributes for the entry.
1665 to the URL and other keys are the attributes for the entry.
1667 """
1666 """
1668 m = []
1667 m = []
1669 for line in s.splitlines():
1668 for line in s.splitlines():
1670 fields = line.split()
1669 fields = line.split()
1671 if not fields:
1670 if not fields:
1672 continue
1671 continue
1673 attrs = {'URL': fields[0]}
1672 attrs = {'URL': fields[0]}
1674 for rawattr in fields[1:]:
1673 for rawattr in fields[1:]:
1675 key, value = rawattr.split('=', 1)
1674 key, value = rawattr.split('=', 1)
1676 key = urllib.unquote(key)
1675 key = urllib.unquote(key)
1677 value = urllib.unquote(value)
1676 value = urllib.unquote(value)
1678 attrs[key] = value
1677 attrs[key] = value
1679
1678
1680 # Parse BUNDLESPEC into components. This makes client-side
1679 # Parse BUNDLESPEC into components. This makes client-side
1681 # preferences easier to specify since you can prefer a single
1680 # preferences easier to specify since you can prefer a single
1682 # component of the BUNDLESPEC.
1681 # component of the BUNDLESPEC.
1683 if key == 'BUNDLESPEC':
1682 if key == 'BUNDLESPEC':
1684 try:
1683 try:
1685 comp, version = parsebundlespec(repo, value,
1684 comp, version = parsebundlespec(repo, value,
1686 externalnames=True)
1685 externalnames=True)
1687 attrs['COMPRESSION'] = comp
1686 attrs['COMPRESSION'] = comp
1688 attrs['VERSION'] = version
1687 attrs['VERSION'] = version
1689 except error.InvalidBundleSpecification:
1688 except error.InvalidBundleSpecification:
1690 pass
1689 pass
1691 except error.UnsupportedBundleSpecification:
1690 except error.UnsupportedBundleSpecification:
1692 pass
1691 pass
1693
1692
1694 m.append(attrs)
1693 m.append(attrs)
1695
1694
1696 return m
1695 return m
1697
1696
1698 def filterclonebundleentries(repo, entries):
1697 def filterclonebundleentries(repo, entries):
1699 """Remove incompatible clone bundle manifest entries.
1698 """Remove incompatible clone bundle manifest entries.
1700
1699
1701 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1700 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1702 and returns a new list consisting of only the entries that this client
1701 and returns a new list consisting of only the entries that this client
1703 should be able to apply.
1702 should be able to apply.
1704
1703
1705 There is no guarantee we'll be able to apply all returned entries because
1704 There is no guarantee we'll be able to apply all returned entries because
1706 the metadata we use to filter on may be missing or wrong.
1705 the metadata we use to filter on may be missing or wrong.
1707 """
1706 """
1708 newentries = []
1707 newentries = []
1709 for entry in entries:
1708 for entry in entries:
1710 spec = entry.get('BUNDLESPEC')
1709 spec = entry.get('BUNDLESPEC')
1711 if spec:
1710 if spec:
1712 try:
1711 try:
1713 parsebundlespec(repo, spec, strict=True)
1712 parsebundlespec(repo, spec, strict=True)
1714 except error.InvalidBundleSpecification as e:
1713 except error.InvalidBundleSpecification as e:
1715 repo.ui.debug(str(e) + '\n')
1714 repo.ui.debug(str(e) + '\n')
1716 continue
1715 continue
1717 except error.UnsupportedBundleSpecification as e:
1716 except error.UnsupportedBundleSpecification as e:
1718 repo.ui.debug('filtering %s because unsupported bundle '
1717 repo.ui.debug('filtering %s because unsupported bundle '
1719 'spec: %s\n' % (entry['URL'], str(e)))
1718 'spec: %s\n' % (entry['URL'], str(e)))
1720 continue
1719 continue
1721
1720
1722 if 'REQUIRESNI' in entry and not sslutil.hassni:
1721 if 'REQUIRESNI' in entry and not sslutil.hassni:
1723 repo.ui.debug('filtering %s because SNI not supported\n' %
1722 repo.ui.debug('filtering %s because SNI not supported\n' %
1724 entry['URL'])
1723 entry['URL'])
1725 continue
1724 continue
1726
1725
1727 newentries.append(entry)
1726 newentries.append(entry)
1728
1727
1729 return newentries
1728 return newentries
1730
1729
1731 def sortclonebundleentries(ui, entries):
1730 def sortclonebundleentries(ui, entries):
1732 # experimental config: experimental.clonebundleprefers
1731 # experimental config: experimental.clonebundleprefers
1733 prefers = ui.configlist('experimental', 'clonebundleprefers', default=[])
1732 prefers = ui.configlist('experimental', 'clonebundleprefers', default=[])
1734 if not prefers:
1733 if not prefers:
1735 return list(entries)
1734 return list(entries)
1736
1735
1737 prefers = [p.split('=', 1) for p in prefers]
1736 prefers = [p.split('=', 1) for p in prefers]
1738
1737
1739 # Our sort function.
1738 # Our sort function.
1740 def compareentry(a, b):
1739 def compareentry(a, b):
1741 for prefkey, prefvalue in prefers:
1740 for prefkey, prefvalue in prefers:
1742 avalue = a.get(prefkey)
1741 avalue = a.get(prefkey)
1743 bvalue = b.get(prefkey)
1742 bvalue = b.get(prefkey)
1744
1743
1745 # Special case for b missing attribute and a matches exactly.
1744 # Special case for b missing attribute and a matches exactly.
1746 if avalue is not None and bvalue is None and avalue == prefvalue:
1745 if avalue is not None and bvalue is None and avalue == prefvalue:
1747 return -1
1746 return -1
1748
1747
1749 # Special case for a missing attribute and b matches exactly.
1748 # Special case for a missing attribute and b matches exactly.
1750 if bvalue is not None and avalue is None and bvalue == prefvalue:
1749 if bvalue is not None and avalue is None and bvalue == prefvalue:
1751 return 1
1750 return 1
1752
1751
1753 # We can't compare unless attribute present on both.
1752 # We can't compare unless attribute present on both.
1754 if avalue is None or bvalue is None:
1753 if avalue is None or bvalue is None:
1755 continue
1754 continue
1756
1755
1757 # Same values should fall back to next attribute.
1756 # Same values should fall back to next attribute.
1758 if avalue == bvalue:
1757 if avalue == bvalue:
1759 continue
1758 continue
1760
1759
1761 # Exact matches come first.
1760 # Exact matches come first.
1762 if avalue == prefvalue:
1761 if avalue == prefvalue:
1763 return -1
1762 return -1
1764 if bvalue == prefvalue:
1763 if bvalue == prefvalue:
1765 return 1
1764 return 1
1766
1765
1767 # Fall back to next attribute.
1766 # Fall back to next attribute.
1768 continue
1767 continue
1769
1768
1770 # If we got here we couldn't sort by attributes and prefers. Fall
1769 # If we got here we couldn't sort by attributes and prefers. Fall
1771 # back to index order.
1770 # back to index order.
1772 return 0
1771 return 0
1773
1772
1774 return sorted(entries, cmp=compareentry)
1773 return sorted(entries, cmp=compareentry)
1775
1774
1776 def trypullbundlefromurl(ui, repo, url):
1775 def trypullbundlefromurl(ui, repo, url):
1777 """Attempt to apply a bundle from a URL."""
1776 """Attempt to apply a bundle from a URL."""
1778 lock = repo.lock()
1777 lock = repo.lock()
1779 try:
1778 try:
1780 tr = repo.transaction('bundleurl')
1779 tr = repo.transaction('bundleurl')
1781 try:
1780 try:
1782 try:
1781 try:
1783 fh = urlmod.open(ui, url)
1782 fh = urlmod.open(ui, url)
1784 cg = readbundle(ui, fh, 'stream')
1783 cg = readbundle(ui, fh, 'stream')
1785
1784
1786 if isinstance(cg, bundle2.unbundle20):
1785 if isinstance(cg, bundle2.unbundle20):
1787 bundle2.processbundle(repo, cg, lambda: tr)
1786 bundle2.processbundle(repo, cg, lambda: tr)
1788 else:
1787 else:
1789 changegroup.addchangegroup(repo, cg, 'clonebundles', url)
1788 cg.apply(repo, 'clonebundles', url)
1790 tr.close()
1789 tr.close()
1791 return True
1790 return True
1792 except urllib2.HTTPError as e:
1791 except urllib2.HTTPError as e:
1793 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1792 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1794 except urllib2.URLError as e:
1793 except urllib2.URLError as e:
1795 ui.warn(_('error fetching bundle: %s\n') % e.reason)
1794 ui.warn(_('error fetching bundle: %s\n') % e.reason)
1796
1795
1797 return False
1796 return False
1798 finally:
1797 finally:
1799 tr.release()
1798 tr.release()
1800 finally:
1799 finally:
1801 lock.release()
1800 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now