##// END OF EJS Templates
clonebundle: support bundle2...
Gregory Szorc -
r26643:d2e16419 default
parent child Browse files
Show More
@@ -1,1668 +1,1672 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno, urllib, urllib2
10 import errno, urllib, urllib2
11 import util, scmutil, changegroup, base85, error
11 import util, scmutil, changegroup, base85, error
12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 import lock as lockmod
13 import lock as lockmod
14 import streamclone
14 import streamclone
15 import tags
15 import tags
16 import url as urlmod
16 import url as urlmod
17
17
18 # Maps bundle compression human names to internal representation.
18 # Maps bundle compression human names to internal representation.
19 _bundlespeccompressions = {'none': None,
19 _bundlespeccompressions = {'none': None,
20 'bzip2': 'BZ',
20 'bzip2': 'BZ',
21 'gzip': 'GZ',
21 'gzip': 'GZ',
22 }
22 }
23
23
24 # Maps bundle version human names to changegroup versions.
24 # Maps bundle version human names to changegroup versions.
25 _bundlespeccgversions = {'v1': '01',
25 _bundlespeccgversions = {'v1': '01',
26 'v2': '02',
26 'v2': '02',
27 'bundle2': '02', #legacy
27 'bundle2': '02', #legacy
28 }
28 }
29
29
30 def parsebundlespec(repo, spec, strict=True):
30 def parsebundlespec(repo, spec, strict=True):
31 """Parse a bundle string specification into parts.
31 """Parse a bundle string specification into parts.
32
32
33 Bundle specifications denote a well-defined bundle/exchange format.
33 Bundle specifications denote a well-defined bundle/exchange format.
34 The content of a given specification should not change over time in
34 The content of a given specification should not change over time in
35 order to ensure that bundles produced by a newer version of Mercurial are
35 order to ensure that bundles produced by a newer version of Mercurial are
36 readable from an older version.
36 readable from an older version.
37
37
38 The string currently has the form:
38 The string currently has the form:
39
39
40 <compression>-<type>
40 <compression>-<type>
41
41
42 Where <compression> is one of the supported compression formats
42 Where <compression> is one of the supported compression formats
43 and <type> is (currently) a version string.
43 and <type> is (currently) a version string.
44
44
45 If ``strict`` is True (the default) <compression> is required. Otherwise,
45 If ``strict`` is True (the default) <compression> is required. Otherwise,
46 it is optional.
46 it is optional.
47
47
48 Returns a 2-tuple of (compression, version). Compression will be ``None``
48 Returns a 2-tuple of (compression, version). Compression will be ``None``
49 if not in strict mode and a compression isn't defined.
49 if not in strict mode and a compression isn't defined.
50
50
51 An ``InvalidBundleSpecification`` is raised when the specification is
51 An ``InvalidBundleSpecification`` is raised when the specification is
52 not syntactically well formed.
52 not syntactically well formed.
53
53
54 An ``UnsupportedBundleSpecification`` is raised when the compression or
54 An ``UnsupportedBundleSpecification`` is raised when the compression or
55 bundle type/version is not recognized.
55 bundle type/version is not recognized.
56
56
57 Note: this function will likely eventually return a more complex data
57 Note: this function will likely eventually return a more complex data
58 structure, including bundle2 part information.
58 structure, including bundle2 part information.
59 """
59 """
60 if strict and '-' not in spec:
60 if strict and '-' not in spec:
61 raise error.InvalidBundleSpecification(
61 raise error.InvalidBundleSpecification(
62 _('invalid bundle specification; '
62 _('invalid bundle specification; '
63 'must be prefixed with compression: %s') % spec)
63 'must be prefixed with compression: %s') % spec)
64
64
65 if '-' in spec:
65 if '-' in spec:
66 compression, version = spec.split('-', 1)
66 compression, version = spec.split('-', 1)
67
67
68 if compression not in _bundlespeccompressions:
68 if compression not in _bundlespeccompressions:
69 raise error.UnsupportedBundleSpecification(
69 raise error.UnsupportedBundleSpecification(
70 _('%s compression is not supported') % compression)
70 _('%s compression is not supported') % compression)
71
71
72 if version not in _bundlespeccgversions:
72 if version not in _bundlespeccgversions:
73 raise error.UnsupportedBundleSpecification(
73 raise error.UnsupportedBundleSpecification(
74 _('%s is not a recognized bundle version') % version)
74 _('%s is not a recognized bundle version') % version)
75 else:
75 else:
76 # Value could be just the compression or just the version, in which
76 # Value could be just the compression or just the version, in which
77 # case some defaults are assumed (but only when not in strict mode).
77 # case some defaults are assumed (but only when not in strict mode).
78 assert not strict
78 assert not strict
79
79
80 if spec in _bundlespeccompressions:
80 if spec in _bundlespeccompressions:
81 compression = spec
81 compression = spec
82 version = 'v1'
82 version = 'v1'
83 if 'generaldelta' in repo.requirements:
83 if 'generaldelta' in repo.requirements:
84 version = 'v2'
84 version = 'v2'
85 elif spec in _bundlespeccgversions:
85 elif spec in _bundlespeccgversions:
86 compression = 'bzip2'
86 compression = 'bzip2'
87 version = spec
87 version = spec
88 else:
88 else:
89 raise error.UnsupportedBundleSpecification(
89 raise error.UnsupportedBundleSpecification(
90 _('%s is not a recognized bundle specification') % spec)
90 _('%s is not a recognized bundle specification') % spec)
91
91
92 compression = _bundlespeccompressions[compression]
92 compression = _bundlespeccompressions[compression]
93 version = _bundlespeccgversions[version]
93 version = _bundlespeccgversions[version]
94 return compression, version
94 return compression, version
95
95
96 def readbundle(ui, fh, fname, vfs=None):
96 def readbundle(ui, fh, fname, vfs=None):
97 header = changegroup.readexactly(fh, 4)
97 header = changegroup.readexactly(fh, 4)
98
98
99 alg = None
99 alg = None
100 if not fname:
100 if not fname:
101 fname = "stream"
101 fname = "stream"
102 if not header.startswith('HG') and header.startswith('\0'):
102 if not header.startswith('HG') and header.startswith('\0'):
103 fh = changegroup.headerlessfixup(fh, header)
103 fh = changegroup.headerlessfixup(fh, header)
104 header = "HG10"
104 header = "HG10"
105 alg = 'UN'
105 alg = 'UN'
106 elif vfs:
106 elif vfs:
107 fname = vfs.join(fname)
107 fname = vfs.join(fname)
108
108
109 magic, version = header[0:2], header[2:4]
109 magic, version = header[0:2], header[2:4]
110
110
111 if magic != 'HG':
111 if magic != 'HG':
112 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
112 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
113 if version == '10':
113 if version == '10':
114 if alg is None:
114 if alg is None:
115 alg = changegroup.readexactly(fh, 2)
115 alg = changegroup.readexactly(fh, 2)
116 return changegroup.cg1unpacker(fh, alg)
116 return changegroup.cg1unpacker(fh, alg)
117 elif version.startswith('2'):
117 elif version.startswith('2'):
118 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
118 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
119 else:
119 else:
120 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
120 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
121
121
122 def buildobsmarkerspart(bundler, markers):
122 def buildobsmarkerspart(bundler, markers):
123 """add an obsmarker part to the bundler with <markers>
123 """add an obsmarker part to the bundler with <markers>
124
124
125 No part is created if markers is empty.
125 No part is created if markers is empty.
126 Raises ValueError if the bundler doesn't support any known obsmarker format.
126 Raises ValueError if the bundler doesn't support any known obsmarker format.
127 """
127 """
128 if markers:
128 if markers:
129 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
129 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
130 version = obsolete.commonversion(remoteversions)
130 version = obsolete.commonversion(remoteversions)
131 if version is None:
131 if version is None:
132 raise ValueError('bundler do not support common obsmarker format')
132 raise ValueError('bundler do not support common obsmarker format')
133 stream = obsolete.encodemarkers(markers, True, version=version)
133 stream = obsolete.encodemarkers(markers, True, version=version)
134 return bundler.newpart('obsmarkers', data=stream)
134 return bundler.newpart('obsmarkers', data=stream)
135 return None
135 return None
136
136
137 def _canusebundle2(op):
137 def _canusebundle2(op):
138 """return true if a pull/push can use bundle2
138 """return true if a pull/push can use bundle2
139
139
140 Feel free to nuke this function when we drop the experimental option"""
140 Feel free to nuke this function when we drop the experimental option"""
141 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
141 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
142 and op.remote.capable('bundle2'))
142 and op.remote.capable('bundle2'))
143
143
144
144
145 class pushoperation(object):
145 class pushoperation(object):
146 """A object that represent a single push operation
146 """A object that represent a single push operation
147
147
148 It purpose is to carry push related state and very common operation.
148 It purpose is to carry push related state and very common operation.
149
149
150 A new should be created at the beginning of each push and discarded
150 A new should be created at the beginning of each push and discarded
151 afterward.
151 afterward.
152 """
152 """
153
153
154 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
154 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
155 bookmarks=()):
155 bookmarks=()):
156 # repo we push from
156 # repo we push from
157 self.repo = repo
157 self.repo = repo
158 self.ui = repo.ui
158 self.ui = repo.ui
159 # repo we push to
159 # repo we push to
160 self.remote = remote
160 self.remote = remote
161 # force option provided
161 # force option provided
162 self.force = force
162 self.force = force
163 # revs to be pushed (None is "all")
163 # revs to be pushed (None is "all")
164 self.revs = revs
164 self.revs = revs
165 # bookmark explicitly pushed
165 # bookmark explicitly pushed
166 self.bookmarks = bookmarks
166 self.bookmarks = bookmarks
167 # allow push of new branch
167 # allow push of new branch
168 self.newbranch = newbranch
168 self.newbranch = newbranch
169 # did a local lock get acquired?
169 # did a local lock get acquired?
170 self.locallocked = None
170 self.locallocked = None
171 # step already performed
171 # step already performed
172 # (used to check what steps have been already performed through bundle2)
172 # (used to check what steps have been already performed through bundle2)
173 self.stepsdone = set()
173 self.stepsdone = set()
174 # Integer version of the changegroup push result
174 # Integer version of the changegroup push result
175 # - None means nothing to push
175 # - None means nothing to push
176 # - 0 means HTTP error
176 # - 0 means HTTP error
177 # - 1 means we pushed and remote head count is unchanged *or*
177 # - 1 means we pushed and remote head count is unchanged *or*
178 # we have outgoing changesets but refused to push
178 # we have outgoing changesets but refused to push
179 # - other values as described by addchangegroup()
179 # - other values as described by addchangegroup()
180 self.cgresult = None
180 self.cgresult = None
181 # Boolean value for the bookmark push
181 # Boolean value for the bookmark push
182 self.bkresult = None
182 self.bkresult = None
183 # discover.outgoing object (contains common and outgoing data)
183 # discover.outgoing object (contains common and outgoing data)
184 self.outgoing = None
184 self.outgoing = None
185 # all remote heads before the push
185 # all remote heads before the push
186 self.remoteheads = None
186 self.remoteheads = None
187 # testable as a boolean indicating if any nodes are missing locally.
187 # testable as a boolean indicating if any nodes are missing locally.
188 self.incoming = None
188 self.incoming = None
189 # phases changes that must be pushed along side the changesets
189 # phases changes that must be pushed along side the changesets
190 self.outdatedphases = None
190 self.outdatedphases = None
191 # phases changes that must be pushed if changeset push fails
191 # phases changes that must be pushed if changeset push fails
192 self.fallbackoutdatedphases = None
192 self.fallbackoutdatedphases = None
193 # outgoing obsmarkers
193 # outgoing obsmarkers
194 self.outobsmarkers = set()
194 self.outobsmarkers = set()
195 # outgoing bookmarks
195 # outgoing bookmarks
196 self.outbookmarks = []
196 self.outbookmarks = []
197 # transaction manager
197 # transaction manager
198 self.trmanager = None
198 self.trmanager = None
199 # map { pushkey partid -> callback handling failure}
199 # map { pushkey partid -> callback handling failure}
200 # used to handle exception from mandatory pushkey part failure
200 # used to handle exception from mandatory pushkey part failure
201 self.pkfailcb = {}
201 self.pkfailcb = {}
202
202
203 @util.propertycache
203 @util.propertycache
204 def futureheads(self):
204 def futureheads(self):
205 """future remote heads if the changeset push succeeds"""
205 """future remote heads if the changeset push succeeds"""
206 return self.outgoing.missingheads
206 return self.outgoing.missingheads
207
207
208 @util.propertycache
208 @util.propertycache
209 def fallbackheads(self):
209 def fallbackheads(self):
210 """future remote heads if the changeset push fails"""
210 """future remote heads if the changeset push fails"""
211 if self.revs is None:
211 if self.revs is None:
212 # not target to push, all common are relevant
212 # not target to push, all common are relevant
213 return self.outgoing.commonheads
213 return self.outgoing.commonheads
214 unfi = self.repo.unfiltered()
214 unfi = self.repo.unfiltered()
215 # I want cheads = heads(::missingheads and ::commonheads)
215 # I want cheads = heads(::missingheads and ::commonheads)
216 # (missingheads is revs with secret changeset filtered out)
216 # (missingheads is revs with secret changeset filtered out)
217 #
217 #
218 # This can be expressed as:
218 # This can be expressed as:
219 # cheads = ( (missingheads and ::commonheads)
219 # cheads = ( (missingheads and ::commonheads)
220 # + (commonheads and ::missingheads))"
220 # + (commonheads and ::missingheads))"
221 # )
221 # )
222 #
222 #
223 # while trying to push we already computed the following:
223 # while trying to push we already computed the following:
224 # common = (::commonheads)
224 # common = (::commonheads)
225 # missing = ((commonheads::missingheads) - commonheads)
225 # missing = ((commonheads::missingheads) - commonheads)
226 #
226 #
227 # We can pick:
227 # We can pick:
228 # * missingheads part of common (::commonheads)
228 # * missingheads part of common (::commonheads)
229 common = self.outgoing.common
229 common = self.outgoing.common
230 nm = self.repo.changelog.nodemap
230 nm = self.repo.changelog.nodemap
231 cheads = [node for node in self.revs if nm[node] in common]
231 cheads = [node for node in self.revs if nm[node] in common]
232 # and
232 # and
233 # * commonheads parents on missing
233 # * commonheads parents on missing
234 revset = unfi.set('%ln and parents(roots(%ln))',
234 revset = unfi.set('%ln and parents(roots(%ln))',
235 self.outgoing.commonheads,
235 self.outgoing.commonheads,
236 self.outgoing.missing)
236 self.outgoing.missing)
237 cheads.extend(c.node() for c in revset)
237 cheads.extend(c.node() for c in revset)
238 return cheads
238 return cheads
239
239
240 @property
240 @property
241 def commonheads(self):
241 def commonheads(self):
242 """set of all common heads after changeset bundle push"""
242 """set of all common heads after changeset bundle push"""
243 if self.cgresult:
243 if self.cgresult:
244 return self.futureheads
244 return self.futureheads
245 else:
245 else:
246 return self.fallbackheads
246 return self.fallbackheads
247
247
248 # mapping of message used when pushing bookmark
248 # mapping of message used when pushing bookmark
249 bookmsgmap = {'update': (_("updating bookmark %s\n"),
249 bookmsgmap = {'update': (_("updating bookmark %s\n"),
250 _('updating bookmark %s failed!\n')),
250 _('updating bookmark %s failed!\n')),
251 'export': (_("exporting bookmark %s\n"),
251 'export': (_("exporting bookmark %s\n"),
252 _('exporting bookmark %s failed!\n')),
252 _('exporting bookmark %s failed!\n')),
253 'delete': (_("deleting remote bookmark %s\n"),
253 'delete': (_("deleting remote bookmark %s\n"),
254 _('deleting remote bookmark %s failed!\n')),
254 _('deleting remote bookmark %s failed!\n')),
255 }
255 }
256
256
257
257
258 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
258 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
259 '''Push outgoing changesets (limited by revs) from a local
259 '''Push outgoing changesets (limited by revs) from a local
260 repository to remote. Return an integer:
260 repository to remote. Return an integer:
261 - None means nothing to push
261 - None means nothing to push
262 - 0 means HTTP error
262 - 0 means HTTP error
263 - 1 means we pushed and remote head count is unchanged *or*
263 - 1 means we pushed and remote head count is unchanged *or*
264 we have outgoing changesets but refused to push
264 we have outgoing changesets but refused to push
265 - other values as described by addchangegroup()
265 - other values as described by addchangegroup()
266 '''
266 '''
267 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
267 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
268 if pushop.remote.local():
268 if pushop.remote.local():
269 missing = (set(pushop.repo.requirements)
269 missing = (set(pushop.repo.requirements)
270 - pushop.remote.local().supported)
270 - pushop.remote.local().supported)
271 if missing:
271 if missing:
272 msg = _("required features are not"
272 msg = _("required features are not"
273 " supported in the destination:"
273 " supported in the destination:"
274 " %s") % (', '.join(sorted(missing)))
274 " %s") % (', '.join(sorted(missing)))
275 raise error.Abort(msg)
275 raise error.Abort(msg)
276
276
277 # there are two ways to push to remote repo:
277 # there are two ways to push to remote repo:
278 #
278 #
279 # addchangegroup assumes local user can lock remote
279 # addchangegroup assumes local user can lock remote
280 # repo (local filesystem, old ssh servers).
280 # repo (local filesystem, old ssh servers).
281 #
281 #
282 # unbundle assumes local user cannot lock remote repo (new ssh
282 # unbundle assumes local user cannot lock remote repo (new ssh
283 # servers, http servers).
283 # servers, http servers).
284
284
285 if not pushop.remote.canpush():
285 if not pushop.remote.canpush():
286 raise error.Abort(_("destination does not support push"))
286 raise error.Abort(_("destination does not support push"))
287 # get local lock as we might write phase data
287 # get local lock as we might write phase data
288 localwlock = locallock = None
288 localwlock = locallock = None
289 try:
289 try:
290 # bundle2 push may receive a reply bundle touching bookmarks or other
290 # bundle2 push may receive a reply bundle touching bookmarks or other
291 # things requiring the wlock. Take it now to ensure proper ordering.
291 # things requiring the wlock. Take it now to ensure proper ordering.
292 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
292 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
293 if _canusebundle2(pushop) and maypushback:
293 if _canusebundle2(pushop) and maypushback:
294 localwlock = pushop.repo.wlock()
294 localwlock = pushop.repo.wlock()
295 locallock = pushop.repo.lock()
295 locallock = pushop.repo.lock()
296 pushop.locallocked = True
296 pushop.locallocked = True
297 except IOError as err:
297 except IOError as err:
298 pushop.locallocked = False
298 pushop.locallocked = False
299 if err.errno != errno.EACCES:
299 if err.errno != errno.EACCES:
300 raise
300 raise
301 # source repo cannot be locked.
301 # source repo cannot be locked.
302 # We do not abort the push, but just disable the local phase
302 # We do not abort the push, but just disable the local phase
303 # synchronisation.
303 # synchronisation.
304 msg = 'cannot lock source repository: %s\n' % err
304 msg = 'cannot lock source repository: %s\n' % err
305 pushop.ui.debug(msg)
305 pushop.ui.debug(msg)
306 try:
306 try:
307 if pushop.locallocked:
307 if pushop.locallocked:
308 pushop.trmanager = transactionmanager(repo,
308 pushop.trmanager = transactionmanager(repo,
309 'push-response',
309 'push-response',
310 pushop.remote.url())
310 pushop.remote.url())
311 pushop.repo.checkpush(pushop)
311 pushop.repo.checkpush(pushop)
312 lock = None
312 lock = None
313 unbundle = pushop.remote.capable('unbundle')
313 unbundle = pushop.remote.capable('unbundle')
314 if not unbundle:
314 if not unbundle:
315 lock = pushop.remote.lock()
315 lock = pushop.remote.lock()
316 try:
316 try:
317 _pushdiscovery(pushop)
317 _pushdiscovery(pushop)
318 if _canusebundle2(pushop):
318 if _canusebundle2(pushop):
319 _pushbundle2(pushop)
319 _pushbundle2(pushop)
320 _pushchangeset(pushop)
320 _pushchangeset(pushop)
321 _pushsyncphase(pushop)
321 _pushsyncphase(pushop)
322 _pushobsolete(pushop)
322 _pushobsolete(pushop)
323 _pushbookmark(pushop)
323 _pushbookmark(pushop)
324 finally:
324 finally:
325 if lock is not None:
325 if lock is not None:
326 lock.release()
326 lock.release()
327 if pushop.trmanager:
327 if pushop.trmanager:
328 pushop.trmanager.close()
328 pushop.trmanager.close()
329 finally:
329 finally:
330 if pushop.trmanager:
330 if pushop.trmanager:
331 pushop.trmanager.release()
331 pushop.trmanager.release()
332 if locallock is not None:
332 if locallock is not None:
333 locallock.release()
333 locallock.release()
334 if localwlock is not None:
334 if localwlock is not None:
335 localwlock.release()
335 localwlock.release()
336
336
337 return pushop
337 return pushop
338
338
339 # list of steps to perform discovery before push
339 # list of steps to perform discovery before push
340 pushdiscoveryorder = []
340 pushdiscoveryorder = []
341
341
342 # Mapping between step name and function
342 # Mapping between step name and function
343 #
343 #
344 # This exists to help extensions wrap steps if necessary
344 # This exists to help extensions wrap steps if necessary
345 pushdiscoverymapping = {}
345 pushdiscoverymapping = {}
346
346
347 def pushdiscovery(stepname):
347 def pushdiscovery(stepname):
348 """decorator for function performing discovery before push
348 """decorator for function performing discovery before push
349
349
350 The function is added to the step -> function mapping and appended to the
350 The function is added to the step -> function mapping and appended to the
351 list of steps. Beware that decorated function will be added in order (this
351 list of steps. Beware that decorated function will be added in order (this
352 may matter).
352 may matter).
353
353
354 You can only use this decorator for a new step, if you want to wrap a step
354 You can only use this decorator for a new step, if you want to wrap a step
355 from an extension, change the pushdiscovery dictionary directly."""
355 from an extension, change the pushdiscovery dictionary directly."""
356 def dec(func):
356 def dec(func):
357 assert stepname not in pushdiscoverymapping
357 assert stepname not in pushdiscoverymapping
358 pushdiscoverymapping[stepname] = func
358 pushdiscoverymapping[stepname] = func
359 pushdiscoveryorder.append(stepname)
359 pushdiscoveryorder.append(stepname)
360 return func
360 return func
361 return dec
361 return dec
362
362
363 def _pushdiscovery(pushop):
363 def _pushdiscovery(pushop):
364 """Run all discovery steps"""
364 """Run all discovery steps"""
365 for stepname in pushdiscoveryorder:
365 for stepname in pushdiscoveryorder:
366 step = pushdiscoverymapping[stepname]
366 step = pushdiscoverymapping[stepname]
367 step(pushop)
367 step(pushop)
368
368
369 @pushdiscovery('changeset')
369 @pushdiscovery('changeset')
370 def _pushdiscoverychangeset(pushop):
370 def _pushdiscoverychangeset(pushop):
371 """discover the changeset that need to be pushed"""
371 """discover the changeset that need to be pushed"""
372 fci = discovery.findcommonincoming
372 fci = discovery.findcommonincoming
373 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
373 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
374 common, inc, remoteheads = commoninc
374 common, inc, remoteheads = commoninc
375 fco = discovery.findcommonoutgoing
375 fco = discovery.findcommonoutgoing
376 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
376 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
377 commoninc=commoninc, force=pushop.force)
377 commoninc=commoninc, force=pushop.force)
378 pushop.outgoing = outgoing
378 pushop.outgoing = outgoing
379 pushop.remoteheads = remoteheads
379 pushop.remoteheads = remoteheads
380 pushop.incoming = inc
380 pushop.incoming = inc
381
381
382 @pushdiscovery('phase')
382 @pushdiscovery('phase')
383 def _pushdiscoveryphase(pushop):
383 def _pushdiscoveryphase(pushop):
384 """discover the phase that needs to be pushed
384 """discover the phase that needs to be pushed
385
385
386 (computed for both success and failure case for changesets push)"""
386 (computed for both success and failure case for changesets push)"""
387 outgoing = pushop.outgoing
387 outgoing = pushop.outgoing
388 unfi = pushop.repo.unfiltered()
388 unfi = pushop.repo.unfiltered()
389 remotephases = pushop.remote.listkeys('phases')
389 remotephases = pushop.remote.listkeys('phases')
390 publishing = remotephases.get('publishing', False)
390 publishing = remotephases.get('publishing', False)
391 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
391 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
392 and remotephases # server supports phases
392 and remotephases # server supports phases
393 and not pushop.outgoing.missing # no changesets to be pushed
393 and not pushop.outgoing.missing # no changesets to be pushed
394 and publishing):
394 and publishing):
395 # When:
395 # When:
396 # - this is a subrepo push
396 # - this is a subrepo push
397 # - and remote support phase
397 # - and remote support phase
398 # - and no changeset are to be pushed
398 # - and no changeset are to be pushed
399 # - and remote is publishing
399 # - and remote is publishing
400 # We may be in issue 3871 case!
400 # We may be in issue 3871 case!
401 # We drop the possible phase synchronisation done by
401 # We drop the possible phase synchronisation done by
402 # courtesy to publish changesets possibly locally draft
402 # courtesy to publish changesets possibly locally draft
403 # on the remote.
403 # on the remote.
404 remotephases = {'publishing': 'True'}
404 remotephases = {'publishing': 'True'}
405 ana = phases.analyzeremotephases(pushop.repo,
405 ana = phases.analyzeremotephases(pushop.repo,
406 pushop.fallbackheads,
406 pushop.fallbackheads,
407 remotephases)
407 remotephases)
408 pheads, droots = ana
408 pheads, droots = ana
409 extracond = ''
409 extracond = ''
410 if not publishing:
410 if not publishing:
411 extracond = ' and public()'
411 extracond = ' and public()'
412 revset = 'heads((%%ln::%%ln) %s)' % extracond
412 revset = 'heads((%%ln::%%ln) %s)' % extracond
413 # Get the list of all revs draft on remote by public here.
413 # Get the list of all revs draft on remote by public here.
414 # XXX Beware that revset break if droots is not strictly
414 # XXX Beware that revset break if droots is not strictly
415 # XXX root we may want to ensure it is but it is costly
415 # XXX root we may want to ensure it is but it is costly
416 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
416 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
417 if not outgoing.missing:
417 if not outgoing.missing:
418 future = fallback
418 future = fallback
419 else:
419 else:
420 # adds changeset we are going to push as draft
420 # adds changeset we are going to push as draft
421 #
421 #
422 # should not be necessary for publishing server, but because of an
422 # should not be necessary for publishing server, but because of an
423 # issue fixed in xxxxx we have to do it anyway.
423 # issue fixed in xxxxx we have to do it anyway.
424 fdroots = list(unfi.set('roots(%ln + %ln::)',
424 fdroots = list(unfi.set('roots(%ln + %ln::)',
425 outgoing.missing, droots))
425 outgoing.missing, droots))
426 fdroots = [f.node() for f in fdroots]
426 fdroots = [f.node() for f in fdroots]
427 future = list(unfi.set(revset, fdroots, pushop.futureheads))
427 future = list(unfi.set(revset, fdroots, pushop.futureheads))
428 pushop.outdatedphases = future
428 pushop.outdatedphases = future
429 pushop.fallbackoutdatedphases = fallback
429 pushop.fallbackoutdatedphases = fallback
430
430
431 @pushdiscovery('obsmarker')
431 @pushdiscovery('obsmarker')
432 def _pushdiscoveryobsmarkers(pushop):
432 def _pushdiscoveryobsmarkers(pushop):
433 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
433 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
434 and pushop.repo.obsstore
434 and pushop.repo.obsstore
435 and 'obsolete' in pushop.remote.listkeys('namespaces')):
435 and 'obsolete' in pushop.remote.listkeys('namespaces')):
436 repo = pushop.repo
436 repo = pushop.repo
437 # very naive computation, that can be quite expensive on big repo.
437 # very naive computation, that can be quite expensive on big repo.
438 # However: evolution is currently slow on them anyway.
438 # However: evolution is currently slow on them anyway.
439 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
439 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
440 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
440 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
441
441
442 @pushdiscovery('bookmarks')
442 @pushdiscovery('bookmarks')
443 def _pushdiscoverybookmarks(pushop):
443 def _pushdiscoverybookmarks(pushop):
444 ui = pushop.ui
444 ui = pushop.ui
445 repo = pushop.repo.unfiltered()
445 repo = pushop.repo.unfiltered()
446 remote = pushop.remote
446 remote = pushop.remote
447 ui.debug("checking for updated bookmarks\n")
447 ui.debug("checking for updated bookmarks\n")
448 ancestors = ()
448 ancestors = ()
449 if pushop.revs:
449 if pushop.revs:
450 revnums = map(repo.changelog.rev, pushop.revs)
450 revnums = map(repo.changelog.rev, pushop.revs)
451 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
451 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
452 remotebookmark = remote.listkeys('bookmarks')
452 remotebookmark = remote.listkeys('bookmarks')
453
453
454 explicit = set(pushop.bookmarks)
454 explicit = set(pushop.bookmarks)
455
455
456 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
456 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
457 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
457 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
458 for b, scid, dcid in advsrc:
458 for b, scid, dcid in advsrc:
459 if b in explicit:
459 if b in explicit:
460 explicit.remove(b)
460 explicit.remove(b)
461 if not ancestors or repo[scid].rev() in ancestors:
461 if not ancestors or repo[scid].rev() in ancestors:
462 pushop.outbookmarks.append((b, dcid, scid))
462 pushop.outbookmarks.append((b, dcid, scid))
463 # search added bookmark
463 # search added bookmark
464 for b, scid, dcid in addsrc:
464 for b, scid, dcid in addsrc:
465 if b in explicit:
465 if b in explicit:
466 explicit.remove(b)
466 explicit.remove(b)
467 pushop.outbookmarks.append((b, '', scid))
467 pushop.outbookmarks.append((b, '', scid))
468 # search for overwritten bookmark
468 # search for overwritten bookmark
469 for b, scid, dcid in advdst + diverge + differ:
469 for b, scid, dcid in advdst + diverge + differ:
470 if b in explicit:
470 if b in explicit:
471 explicit.remove(b)
471 explicit.remove(b)
472 pushop.outbookmarks.append((b, dcid, scid))
472 pushop.outbookmarks.append((b, dcid, scid))
473 # search for bookmark to delete
473 # search for bookmark to delete
474 for b, scid, dcid in adddst:
474 for b, scid, dcid in adddst:
475 if b in explicit:
475 if b in explicit:
476 explicit.remove(b)
476 explicit.remove(b)
477 # treat as "deleted locally"
477 # treat as "deleted locally"
478 pushop.outbookmarks.append((b, dcid, ''))
478 pushop.outbookmarks.append((b, dcid, ''))
479 # identical bookmarks shouldn't get reported
479 # identical bookmarks shouldn't get reported
480 for b, scid, dcid in same:
480 for b, scid, dcid in same:
481 if b in explicit:
481 if b in explicit:
482 explicit.remove(b)
482 explicit.remove(b)
483
483
484 if explicit:
484 if explicit:
485 explicit = sorted(explicit)
485 explicit = sorted(explicit)
486 # we should probably list all of them
486 # we should probably list all of them
487 ui.warn(_('bookmark %s does not exist on the local '
487 ui.warn(_('bookmark %s does not exist on the local '
488 'or remote repository!\n') % explicit[0])
488 'or remote repository!\n') % explicit[0])
489 pushop.bkresult = 2
489 pushop.bkresult = 2
490
490
491 pushop.outbookmarks.sort()
491 pushop.outbookmarks.sort()
492
492
493 def _pushcheckoutgoing(pushop):
493 def _pushcheckoutgoing(pushop):
494 outgoing = pushop.outgoing
494 outgoing = pushop.outgoing
495 unfi = pushop.repo.unfiltered()
495 unfi = pushop.repo.unfiltered()
496 if not outgoing.missing:
496 if not outgoing.missing:
497 # nothing to push
497 # nothing to push
498 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
498 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
499 return False
499 return False
500 # something to push
500 # something to push
501 if not pushop.force:
501 if not pushop.force:
502 # if repo.obsstore == False --> no obsolete
502 # if repo.obsstore == False --> no obsolete
503 # then, save the iteration
503 # then, save the iteration
504 if unfi.obsstore:
504 if unfi.obsstore:
505 # this message are here for 80 char limit reason
505 # this message are here for 80 char limit reason
506 mso = _("push includes obsolete changeset: %s!")
506 mso = _("push includes obsolete changeset: %s!")
507 mst = {"unstable": _("push includes unstable changeset: %s!"),
507 mst = {"unstable": _("push includes unstable changeset: %s!"),
508 "bumped": _("push includes bumped changeset: %s!"),
508 "bumped": _("push includes bumped changeset: %s!"),
509 "divergent": _("push includes divergent changeset: %s!")}
509 "divergent": _("push includes divergent changeset: %s!")}
510 # If we are to push if there is at least one
510 # If we are to push if there is at least one
511 # obsolete or unstable changeset in missing, at
511 # obsolete or unstable changeset in missing, at
512 # least one of the missinghead will be obsolete or
512 # least one of the missinghead will be obsolete or
513 # unstable. So checking heads only is ok
513 # unstable. So checking heads only is ok
514 for node in outgoing.missingheads:
514 for node in outgoing.missingheads:
515 ctx = unfi[node]
515 ctx = unfi[node]
516 if ctx.obsolete():
516 if ctx.obsolete():
517 raise error.Abort(mso % ctx)
517 raise error.Abort(mso % ctx)
518 elif ctx.troubled():
518 elif ctx.troubled():
519 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
519 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
520
520
521 # internal config: bookmarks.pushing
521 # internal config: bookmarks.pushing
522 newbm = pushop.ui.configlist('bookmarks', 'pushing')
522 newbm = pushop.ui.configlist('bookmarks', 'pushing')
523 discovery.checkheads(unfi, pushop.remote, outgoing,
523 discovery.checkheads(unfi, pushop.remote, outgoing,
524 pushop.remoteheads,
524 pushop.remoteheads,
525 pushop.newbranch,
525 pushop.newbranch,
526 bool(pushop.incoming),
526 bool(pushop.incoming),
527 newbm)
527 newbm)
528 return True
528 return True
529
529
530 # List of names of steps to perform for an outgoing bundle2, order matters.
530 # List of names of steps to perform for an outgoing bundle2, order matters.
531 b2partsgenorder = []
531 b2partsgenorder = []
532
532
533 # Mapping between step name and function
533 # Mapping between step name and function
534 #
534 #
535 # This exists to help extensions wrap steps if necessary
535 # This exists to help extensions wrap steps if necessary
536 b2partsgenmapping = {}
536 b2partsgenmapping = {}
537
537
538 def b2partsgenerator(stepname, idx=None):
538 def b2partsgenerator(stepname, idx=None):
539 """decorator for function generating bundle2 part
539 """decorator for function generating bundle2 part
540
540
541 The function is added to the step -> function mapping and appended to the
541 The function is added to the step -> function mapping and appended to the
542 list of steps. Beware that decorated functions will be added in order
542 list of steps. Beware that decorated functions will be added in order
543 (this may matter).
543 (this may matter).
544
544
545 You can only use this decorator for new steps, if you want to wrap a step
545 You can only use this decorator for new steps, if you want to wrap a step
546 from an extension, attack the b2partsgenmapping dictionary directly."""
546 from an extension, attack the b2partsgenmapping dictionary directly."""
547 def dec(func):
547 def dec(func):
548 assert stepname not in b2partsgenmapping
548 assert stepname not in b2partsgenmapping
549 b2partsgenmapping[stepname] = func
549 b2partsgenmapping[stepname] = func
550 if idx is None:
550 if idx is None:
551 b2partsgenorder.append(stepname)
551 b2partsgenorder.append(stepname)
552 else:
552 else:
553 b2partsgenorder.insert(idx, stepname)
553 b2partsgenorder.insert(idx, stepname)
554 return func
554 return func
555 return dec
555 return dec
556
556
557 def _pushb2ctxcheckheads(pushop, bundler):
557 def _pushb2ctxcheckheads(pushop, bundler):
558 """Generate race condition checking parts
558 """Generate race condition checking parts
559
559
560 Exists as an indepedent function to aid extensions
560 Exists as an indepedent function to aid extensions
561 """
561 """
562 if not pushop.force:
562 if not pushop.force:
563 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
563 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
564
564
565 @b2partsgenerator('changeset')
565 @b2partsgenerator('changeset')
566 def _pushb2ctx(pushop, bundler):
566 def _pushb2ctx(pushop, bundler):
567 """handle changegroup push through bundle2
567 """handle changegroup push through bundle2
568
568
569 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
569 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
570 """
570 """
571 if 'changesets' in pushop.stepsdone:
571 if 'changesets' in pushop.stepsdone:
572 return
572 return
573 pushop.stepsdone.add('changesets')
573 pushop.stepsdone.add('changesets')
574 # Send known heads to the server for race detection.
574 # Send known heads to the server for race detection.
575 if not _pushcheckoutgoing(pushop):
575 if not _pushcheckoutgoing(pushop):
576 return
576 return
577 pushop.repo.prepushoutgoinghooks(pushop.repo,
577 pushop.repo.prepushoutgoinghooks(pushop.repo,
578 pushop.remote,
578 pushop.remote,
579 pushop.outgoing)
579 pushop.outgoing)
580
580
581 _pushb2ctxcheckheads(pushop, bundler)
581 _pushb2ctxcheckheads(pushop, bundler)
582
582
583 b2caps = bundle2.bundle2caps(pushop.remote)
583 b2caps = bundle2.bundle2caps(pushop.remote)
584 version = None
584 version = None
585 cgversions = b2caps.get('changegroup')
585 cgversions = b2caps.get('changegroup')
586 if not cgversions: # 3.1 and 3.2 ship with an empty value
586 if not cgversions: # 3.1 and 3.2 ship with an empty value
587 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
587 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
588 pushop.outgoing)
588 pushop.outgoing)
589 else:
589 else:
590 cgversions = [v for v in cgversions if v in changegroup.packermap]
590 cgversions = [v for v in cgversions if v in changegroup.packermap]
591 if not cgversions:
591 if not cgversions:
592 raise ValueError(_('no common changegroup version'))
592 raise ValueError(_('no common changegroup version'))
593 version = max(cgversions)
593 version = max(cgversions)
594 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
594 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
595 pushop.outgoing,
595 pushop.outgoing,
596 version=version)
596 version=version)
597 cgpart = bundler.newpart('changegroup', data=cg)
597 cgpart = bundler.newpart('changegroup', data=cg)
598 if version is not None:
598 if version is not None:
599 cgpart.addparam('version', version)
599 cgpart.addparam('version', version)
600 def handlereply(op):
600 def handlereply(op):
601 """extract addchangegroup returns from server reply"""
601 """extract addchangegroup returns from server reply"""
602 cgreplies = op.records.getreplies(cgpart.id)
602 cgreplies = op.records.getreplies(cgpart.id)
603 assert len(cgreplies['changegroup']) == 1
603 assert len(cgreplies['changegroup']) == 1
604 pushop.cgresult = cgreplies['changegroup'][0]['return']
604 pushop.cgresult = cgreplies['changegroup'][0]['return']
605 return handlereply
605 return handlereply
606
606
607 @b2partsgenerator('phase')
607 @b2partsgenerator('phase')
608 def _pushb2phases(pushop, bundler):
608 def _pushb2phases(pushop, bundler):
609 """handle phase push through bundle2"""
609 """handle phase push through bundle2"""
610 if 'phases' in pushop.stepsdone:
610 if 'phases' in pushop.stepsdone:
611 return
611 return
612 b2caps = bundle2.bundle2caps(pushop.remote)
612 b2caps = bundle2.bundle2caps(pushop.remote)
613 if not 'pushkey' in b2caps:
613 if not 'pushkey' in b2caps:
614 return
614 return
615 pushop.stepsdone.add('phases')
615 pushop.stepsdone.add('phases')
616 part2node = []
616 part2node = []
617
617
618 def handlefailure(pushop, exc):
618 def handlefailure(pushop, exc):
619 targetid = int(exc.partid)
619 targetid = int(exc.partid)
620 for partid, node in part2node:
620 for partid, node in part2node:
621 if partid == targetid:
621 if partid == targetid:
622 raise error.Abort(_('updating %s to public failed') % node)
622 raise error.Abort(_('updating %s to public failed') % node)
623
623
624 enc = pushkey.encode
624 enc = pushkey.encode
625 for newremotehead in pushop.outdatedphases:
625 for newremotehead in pushop.outdatedphases:
626 part = bundler.newpart('pushkey')
626 part = bundler.newpart('pushkey')
627 part.addparam('namespace', enc('phases'))
627 part.addparam('namespace', enc('phases'))
628 part.addparam('key', enc(newremotehead.hex()))
628 part.addparam('key', enc(newremotehead.hex()))
629 part.addparam('old', enc(str(phases.draft)))
629 part.addparam('old', enc(str(phases.draft)))
630 part.addparam('new', enc(str(phases.public)))
630 part.addparam('new', enc(str(phases.public)))
631 part2node.append((part.id, newremotehead))
631 part2node.append((part.id, newremotehead))
632 pushop.pkfailcb[part.id] = handlefailure
632 pushop.pkfailcb[part.id] = handlefailure
633
633
634 def handlereply(op):
634 def handlereply(op):
635 for partid, node in part2node:
635 for partid, node in part2node:
636 partrep = op.records.getreplies(partid)
636 partrep = op.records.getreplies(partid)
637 results = partrep['pushkey']
637 results = partrep['pushkey']
638 assert len(results) <= 1
638 assert len(results) <= 1
639 msg = None
639 msg = None
640 if not results:
640 if not results:
641 msg = _('server ignored update of %s to public!\n') % node
641 msg = _('server ignored update of %s to public!\n') % node
642 elif not int(results[0]['return']):
642 elif not int(results[0]['return']):
643 msg = _('updating %s to public failed!\n') % node
643 msg = _('updating %s to public failed!\n') % node
644 if msg is not None:
644 if msg is not None:
645 pushop.ui.warn(msg)
645 pushop.ui.warn(msg)
646 return handlereply
646 return handlereply
647
647
648 @b2partsgenerator('obsmarkers')
648 @b2partsgenerator('obsmarkers')
649 def _pushb2obsmarkers(pushop, bundler):
649 def _pushb2obsmarkers(pushop, bundler):
650 if 'obsmarkers' in pushop.stepsdone:
650 if 'obsmarkers' in pushop.stepsdone:
651 return
651 return
652 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
652 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
653 if obsolete.commonversion(remoteversions) is None:
653 if obsolete.commonversion(remoteversions) is None:
654 return
654 return
655 pushop.stepsdone.add('obsmarkers')
655 pushop.stepsdone.add('obsmarkers')
656 if pushop.outobsmarkers:
656 if pushop.outobsmarkers:
657 markers = sorted(pushop.outobsmarkers)
657 markers = sorted(pushop.outobsmarkers)
658 buildobsmarkerspart(bundler, markers)
658 buildobsmarkerspart(bundler, markers)
659
659
660 @b2partsgenerator('bookmarks')
660 @b2partsgenerator('bookmarks')
661 def _pushb2bookmarks(pushop, bundler):
661 def _pushb2bookmarks(pushop, bundler):
662 """handle bookmark push through bundle2"""
662 """handle bookmark push through bundle2"""
663 if 'bookmarks' in pushop.stepsdone:
663 if 'bookmarks' in pushop.stepsdone:
664 return
664 return
665 b2caps = bundle2.bundle2caps(pushop.remote)
665 b2caps = bundle2.bundle2caps(pushop.remote)
666 if 'pushkey' not in b2caps:
666 if 'pushkey' not in b2caps:
667 return
667 return
668 pushop.stepsdone.add('bookmarks')
668 pushop.stepsdone.add('bookmarks')
669 part2book = []
669 part2book = []
670 enc = pushkey.encode
670 enc = pushkey.encode
671
671
672 def handlefailure(pushop, exc):
672 def handlefailure(pushop, exc):
673 targetid = int(exc.partid)
673 targetid = int(exc.partid)
674 for partid, book, action in part2book:
674 for partid, book, action in part2book:
675 if partid == targetid:
675 if partid == targetid:
676 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
676 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
677 # we should not be called for part we did not generated
677 # we should not be called for part we did not generated
678 assert False
678 assert False
679
679
680 for book, old, new in pushop.outbookmarks:
680 for book, old, new in pushop.outbookmarks:
681 part = bundler.newpart('pushkey')
681 part = bundler.newpart('pushkey')
682 part.addparam('namespace', enc('bookmarks'))
682 part.addparam('namespace', enc('bookmarks'))
683 part.addparam('key', enc(book))
683 part.addparam('key', enc(book))
684 part.addparam('old', enc(old))
684 part.addparam('old', enc(old))
685 part.addparam('new', enc(new))
685 part.addparam('new', enc(new))
686 action = 'update'
686 action = 'update'
687 if not old:
687 if not old:
688 action = 'export'
688 action = 'export'
689 elif not new:
689 elif not new:
690 action = 'delete'
690 action = 'delete'
691 part2book.append((part.id, book, action))
691 part2book.append((part.id, book, action))
692 pushop.pkfailcb[part.id] = handlefailure
692 pushop.pkfailcb[part.id] = handlefailure
693
693
694 def handlereply(op):
694 def handlereply(op):
695 ui = pushop.ui
695 ui = pushop.ui
696 for partid, book, action in part2book:
696 for partid, book, action in part2book:
697 partrep = op.records.getreplies(partid)
697 partrep = op.records.getreplies(partid)
698 results = partrep['pushkey']
698 results = partrep['pushkey']
699 assert len(results) <= 1
699 assert len(results) <= 1
700 if not results:
700 if not results:
701 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
701 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
702 else:
702 else:
703 ret = int(results[0]['return'])
703 ret = int(results[0]['return'])
704 if ret:
704 if ret:
705 ui.status(bookmsgmap[action][0] % book)
705 ui.status(bookmsgmap[action][0] % book)
706 else:
706 else:
707 ui.warn(bookmsgmap[action][1] % book)
707 ui.warn(bookmsgmap[action][1] % book)
708 if pushop.bkresult is not None:
708 if pushop.bkresult is not None:
709 pushop.bkresult = 1
709 pushop.bkresult = 1
710 return handlereply
710 return handlereply
711
711
712
712
713 def _pushbundle2(pushop):
713 def _pushbundle2(pushop):
714 """push data to the remote using bundle2
714 """push data to the remote using bundle2
715
715
716 The only currently supported type of data is changegroup but this will
716 The only currently supported type of data is changegroup but this will
717 evolve in the future."""
717 evolve in the future."""
718 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
718 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
719 pushback = (pushop.trmanager
719 pushback = (pushop.trmanager
720 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
720 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
721
721
722 # create reply capability
722 # create reply capability
723 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
723 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
724 allowpushback=pushback))
724 allowpushback=pushback))
725 bundler.newpart('replycaps', data=capsblob)
725 bundler.newpart('replycaps', data=capsblob)
726 replyhandlers = []
726 replyhandlers = []
727 for partgenname in b2partsgenorder:
727 for partgenname in b2partsgenorder:
728 partgen = b2partsgenmapping[partgenname]
728 partgen = b2partsgenmapping[partgenname]
729 ret = partgen(pushop, bundler)
729 ret = partgen(pushop, bundler)
730 if callable(ret):
730 if callable(ret):
731 replyhandlers.append(ret)
731 replyhandlers.append(ret)
732 # do not push if nothing to push
732 # do not push if nothing to push
733 if bundler.nbparts <= 1:
733 if bundler.nbparts <= 1:
734 return
734 return
735 stream = util.chunkbuffer(bundler.getchunks())
735 stream = util.chunkbuffer(bundler.getchunks())
736 try:
736 try:
737 try:
737 try:
738 reply = pushop.remote.unbundle(stream, ['force'], 'push')
738 reply = pushop.remote.unbundle(stream, ['force'], 'push')
739 except error.BundleValueError as exc:
739 except error.BundleValueError as exc:
740 raise error.Abort('missing support for %s' % exc)
740 raise error.Abort('missing support for %s' % exc)
741 try:
741 try:
742 trgetter = None
742 trgetter = None
743 if pushback:
743 if pushback:
744 trgetter = pushop.trmanager.transaction
744 trgetter = pushop.trmanager.transaction
745 op = bundle2.processbundle(pushop.repo, reply, trgetter)
745 op = bundle2.processbundle(pushop.repo, reply, trgetter)
746 except error.BundleValueError as exc:
746 except error.BundleValueError as exc:
747 raise error.Abort('missing support for %s' % exc)
747 raise error.Abort('missing support for %s' % exc)
748 except error.PushkeyFailed as exc:
748 except error.PushkeyFailed as exc:
749 partid = int(exc.partid)
749 partid = int(exc.partid)
750 if partid not in pushop.pkfailcb:
750 if partid not in pushop.pkfailcb:
751 raise
751 raise
752 pushop.pkfailcb[partid](pushop, exc)
752 pushop.pkfailcb[partid](pushop, exc)
753 for rephand in replyhandlers:
753 for rephand in replyhandlers:
754 rephand(op)
754 rephand(op)
755
755
756 def _pushchangeset(pushop):
756 def _pushchangeset(pushop):
757 """Make the actual push of changeset bundle to remote repo"""
757 """Make the actual push of changeset bundle to remote repo"""
758 if 'changesets' in pushop.stepsdone:
758 if 'changesets' in pushop.stepsdone:
759 return
759 return
760 pushop.stepsdone.add('changesets')
760 pushop.stepsdone.add('changesets')
761 if not _pushcheckoutgoing(pushop):
761 if not _pushcheckoutgoing(pushop):
762 return
762 return
763 pushop.repo.prepushoutgoinghooks(pushop.repo,
763 pushop.repo.prepushoutgoinghooks(pushop.repo,
764 pushop.remote,
764 pushop.remote,
765 pushop.outgoing)
765 pushop.outgoing)
766 outgoing = pushop.outgoing
766 outgoing = pushop.outgoing
767 unbundle = pushop.remote.capable('unbundle')
767 unbundle = pushop.remote.capable('unbundle')
768 # TODO: get bundlecaps from remote
768 # TODO: get bundlecaps from remote
769 bundlecaps = None
769 bundlecaps = None
770 # create a changegroup from local
770 # create a changegroup from local
771 if pushop.revs is None and not (outgoing.excluded
771 if pushop.revs is None and not (outgoing.excluded
772 or pushop.repo.changelog.filteredrevs):
772 or pushop.repo.changelog.filteredrevs):
773 # push everything,
773 # push everything,
774 # use the fast path, no race possible on push
774 # use the fast path, no race possible on push
775 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
775 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
776 cg = changegroup.getsubset(pushop.repo,
776 cg = changegroup.getsubset(pushop.repo,
777 outgoing,
777 outgoing,
778 bundler,
778 bundler,
779 'push',
779 'push',
780 fastpath=True)
780 fastpath=True)
781 else:
781 else:
782 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
782 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
783 bundlecaps)
783 bundlecaps)
784
784
785 # apply changegroup to remote
785 # apply changegroup to remote
786 if unbundle:
786 if unbundle:
787 # local repo finds heads on server, finds out what
787 # local repo finds heads on server, finds out what
788 # revs it must push. once revs transferred, if server
788 # revs it must push. once revs transferred, if server
789 # finds it has different heads (someone else won
789 # finds it has different heads (someone else won
790 # commit/push race), server aborts.
790 # commit/push race), server aborts.
791 if pushop.force:
791 if pushop.force:
792 remoteheads = ['force']
792 remoteheads = ['force']
793 else:
793 else:
794 remoteheads = pushop.remoteheads
794 remoteheads = pushop.remoteheads
795 # ssh: return remote's addchangegroup()
795 # ssh: return remote's addchangegroup()
796 # http: return remote's addchangegroup() or 0 for error
796 # http: return remote's addchangegroup() or 0 for error
797 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
797 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
798 pushop.repo.url())
798 pushop.repo.url())
799 else:
799 else:
800 # we return an integer indicating remote head count
800 # we return an integer indicating remote head count
801 # change
801 # change
802 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
802 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
803 pushop.repo.url())
803 pushop.repo.url())
804
804
805 def _pushsyncphase(pushop):
805 def _pushsyncphase(pushop):
806 """synchronise phase information locally and remotely"""
806 """synchronise phase information locally and remotely"""
807 cheads = pushop.commonheads
807 cheads = pushop.commonheads
808 # even when we don't push, exchanging phase data is useful
808 # even when we don't push, exchanging phase data is useful
809 remotephases = pushop.remote.listkeys('phases')
809 remotephases = pushop.remote.listkeys('phases')
810 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
810 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
811 and remotephases # server supports phases
811 and remotephases # server supports phases
812 and pushop.cgresult is None # nothing was pushed
812 and pushop.cgresult is None # nothing was pushed
813 and remotephases.get('publishing', False)):
813 and remotephases.get('publishing', False)):
814 # When:
814 # When:
815 # - this is a subrepo push
815 # - this is a subrepo push
816 # - and remote support phase
816 # - and remote support phase
817 # - and no changeset was pushed
817 # - and no changeset was pushed
818 # - and remote is publishing
818 # - and remote is publishing
819 # We may be in issue 3871 case!
819 # We may be in issue 3871 case!
820 # We drop the possible phase synchronisation done by
820 # We drop the possible phase synchronisation done by
821 # courtesy to publish changesets possibly locally draft
821 # courtesy to publish changesets possibly locally draft
822 # on the remote.
822 # on the remote.
823 remotephases = {'publishing': 'True'}
823 remotephases = {'publishing': 'True'}
824 if not remotephases: # old server or public only reply from non-publishing
824 if not remotephases: # old server or public only reply from non-publishing
825 _localphasemove(pushop, cheads)
825 _localphasemove(pushop, cheads)
826 # don't push any phase data as there is nothing to push
826 # don't push any phase data as there is nothing to push
827 else:
827 else:
828 ana = phases.analyzeremotephases(pushop.repo, cheads,
828 ana = phases.analyzeremotephases(pushop.repo, cheads,
829 remotephases)
829 remotephases)
830 pheads, droots = ana
830 pheads, droots = ana
831 ### Apply remote phase on local
831 ### Apply remote phase on local
832 if remotephases.get('publishing', False):
832 if remotephases.get('publishing', False):
833 _localphasemove(pushop, cheads)
833 _localphasemove(pushop, cheads)
834 else: # publish = False
834 else: # publish = False
835 _localphasemove(pushop, pheads)
835 _localphasemove(pushop, pheads)
836 _localphasemove(pushop, cheads, phases.draft)
836 _localphasemove(pushop, cheads, phases.draft)
837 ### Apply local phase on remote
837 ### Apply local phase on remote
838
838
839 if pushop.cgresult:
839 if pushop.cgresult:
840 if 'phases' in pushop.stepsdone:
840 if 'phases' in pushop.stepsdone:
841 # phases already pushed though bundle2
841 # phases already pushed though bundle2
842 return
842 return
843 outdated = pushop.outdatedphases
843 outdated = pushop.outdatedphases
844 else:
844 else:
845 outdated = pushop.fallbackoutdatedphases
845 outdated = pushop.fallbackoutdatedphases
846
846
847 pushop.stepsdone.add('phases')
847 pushop.stepsdone.add('phases')
848
848
849 # filter heads already turned public by the push
849 # filter heads already turned public by the push
850 outdated = [c for c in outdated if c.node() not in pheads]
850 outdated = [c for c in outdated if c.node() not in pheads]
851 # fallback to independent pushkey command
851 # fallback to independent pushkey command
852 for newremotehead in outdated:
852 for newremotehead in outdated:
853 r = pushop.remote.pushkey('phases',
853 r = pushop.remote.pushkey('phases',
854 newremotehead.hex(),
854 newremotehead.hex(),
855 str(phases.draft),
855 str(phases.draft),
856 str(phases.public))
856 str(phases.public))
857 if not r:
857 if not r:
858 pushop.ui.warn(_('updating %s to public failed!\n')
858 pushop.ui.warn(_('updating %s to public failed!\n')
859 % newremotehead)
859 % newremotehead)
860
860
861 def _localphasemove(pushop, nodes, phase=phases.public):
861 def _localphasemove(pushop, nodes, phase=phases.public):
862 """move <nodes> to <phase> in the local source repo"""
862 """move <nodes> to <phase> in the local source repo"""
863 if pushop.trmanager:
863 if pushop.trmanager:
864 phases.advanceboundary(pushop.repo,
864 phases.advanceboundary(pushop.repo,
865 pushop.trmanager.transaction(),
865 pushop.trmanager.transaction(),
866 phase,
866 phase,
867 nodes)
867 nodes)
868 else:
868 else:
869 # repo is not locked, do not change any phases!
869 # repo is not locked, do not change any phases!
870 # Informs the user that phases should have been moved when
870 # Informs the user that phases should have been moved when
871 # applicable.
871 # applicable.
872 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
872 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
873 phasestr = phases.phasenames[phase]
873 phasestr = phases.phasenames[phase]
874 if actualmoves:
874 if actualmoves:
875 pushop.ui.status(_('cannot lock source repo, skipping '
875 pushop.ui.status(_('cannot lock source repo, skipping '
876 'local %s phase update\n') % phasestr)
876 'local %s phase update\n') % phasestr)
877
877
878 def _pushobsolete(pushop):
878 def _pushobsolete(pushop):
879 """utility function to push obsolete markers to a remote"""
879 """utility function to push obsolete markers to a remote"""
880 if 'obsmarkers' in pushop.stepsdone:
880 if 'obsmarkers' in pushop.stepsdone:
881 return
881 return
882 repo = pushop.repo
882 repo = pushop.repo
883 remote = pushop.remote
883 remote = pushop.remote
884 pushop.stepsdone.add('obsmarkers')
884 pushop.stepsdone.add('obsmarkers')
885 if pushop.outobsmarkers:
885 if pushop.outobsmarkers:
886 pushop.ui.debug('try to push obsolete markers to remote\n')
886 pushop.ui.debug('try to push obsolete markers to remote\n')
887 rslts = []
887 rslts = []
888 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
888 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
889 for key in sorted(remotedata, reverse=True):
889 for key in sorted(remotedata, reverse=True):
890 # reverse sort to ensure we end with dump0
890 # reverse sort to ensure we end with dump0
891 data = remotedata[key]
891 data = remotedata[key]
892 rslts.append(remote.pushkey('obsolete', key, '', data))
892 rslts.append(remote.pushkey('obsolete', key, '', data))
893 if [r for r in rslts if not r]:
893 if [r for r in rslts if not r]:
894 msg = _('failed to push some obsolete markers!\n')
894 msg = _('failed to push some obsolete markers!\n')
895 repo.ui.warn(msg)
895 repo.ui.warn(msg)
896
896
897 def _pushbookmark(pushop):
897 def _pushbookmark(pushop):
898 """Update bookmark position on remote"""
898 """Update bookmark position on remote"""
899 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
899 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
900 return
900 return
901 pushop.stepsdone.add('bookmarks')
901 pushop.stepsdone.add('bookmarks')
902 ui = pushop.ui
902 ui = pushop.ui
903 remote = pushop.remote
903 remote = pushop.remote
904
904
905 for b, old, new in pushop.outbookmarks:
905 for b, old, new in pushop.outbookmarks:
906 action = 'update'
906 action = 'update'
907 if not old:
907 if not old:
908 action = 'export'
908 action = 'export'
909 elif not new:
909 elif not new:
910 action = 'delete'
910 action = 'delete'
911 if remote.pushkey('bookmarks', b, old, new):
911 if remote.pushkey('bookmarks', b, old, new):
912 ui.status(bookmsgmap[action][0] % b)
912 ui.status(bookmsgmap[action][0] % b)
913 else:
913 else:
914 ui.warn(bookmsgmap[action][1] % b)
914 ui.warn(bookmsgmap[action][1] % b)
915 # discovery can have set the value form invalid entry
915 # discovery can have set the value form invalid entry
916 if pushop.bkresult is not None:
916 if pushop.bkresult is not None:
917 pushop.bkresult = 1
917 pushop.bkresult = 1
918
918
919 class pulloperation(object):
919 class pulloperation(object):
920 """A object that represent a single pull operation
920 """A object that represent a single pull operation
921
921
922 It purpose is to carry pull related state and very common operation.
922 It purpose is to carry pull related state and very common operation.
923
923
924 A new should be created at the beginning of each pull and discarded
924 A new should be created at the beginning of each pull and discarded
925 afterward.
925 afterward.
926 """
926 """
927
927
928 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
928 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
929 remotebookmarks=None, streamclonerequested=None):
929 remotebookmarks=None, streamclonerequested=None):
930 # repo we pull into
930 # repo we pull into
931 self.repo = repo
931 self.repo = repo
932 # repo we pull from
932 # repo we pull from
933 self.remote = remote
933 self.remote = remote
934 # revision we try to pull (None is "all")
934 # revision we try to pull (None is "all")
935 self.heads = heads
935 self.heads = heads
936 # bookmark pulled explicitly
936 # bookmark pulled explicitly
937 self.explicitbookmarks = bookmarks
937 self.explicitbookmarks = bookmarks
938 # do we force pull?
938 # do we force pull?
939 self.force = force
939 self.force = force
940 # whether a streaming clone was requested
940 # whether a streaming clone was requested
941 self.streamclonerequested = streamclonerequested
941 self.streamclonerequested = streamclonerequested
942 # transaction manager
942 # transaction manager
943 self.trmanager = None
943 self.trmanager = None
944 # set of common changeset between local and remote before pull
944 # set of common changeset between local and remote before pull
945 self.common = None
945 self.common = None
946 # set of pulled head
946 # set of pulled head
947 self.rheads = None
947 self.rheads = None
948 # list of missing changeset to fetch remotely
948 # list of missing changeset to fetch remotely
949 self.fetch = None
949 self.fetch = None
950 # remote bookmarks data
950 # remote bookmarks data
951 self.remotebookmarks = remotebookmarks
951 self.remotebookmarks = remotebookmarks
952 # result of changegroup pulling (used as return code by pull)
952 # result of changegroup pulling (used as return code by pull)
953 self.cgresult = None
953 self.cgresult = None
954 # list of step already done
954 # list of step already done
955 self.stepsdone = set()
955 self.stepsdone = set()
956
956
957 @util.propertycache
957 @util.propertycache
958 def pulledsubset(self):
958 def pulledsubset(self):
959 """heads of the set of changeset target by the pull"""
959 """heads of the set of changeset target by the pull"""
960 # compute target subset
960 # compute target subset
961 if self.heads is None:
961 if self.heads is None:
962 # We pulled every thing possible
962 # We pulled every thing possible
963 # sync on everything common
963 # sync on everything common
964 c = set(self.common)
964 c = set(self.common)
965 ret = list(self.common)
965 ret = list(self.common)
966 for n in self.rheads:
966 for n in self.rheads:
967 if n not in c:
967 if n not in c:
968 ret.append(n)
968 ret.append(n)
969 return ret
969 return ret
970 else:
970 else:
971 # We pulled a specific subset
971 # We pulled a specific subset
972 # sync on this subset
972 # sync on this subset
973 return self.heads
973 return self.heads
974
974
975 @util.propertycache
975 @util.propertycache
976 def canusebundle2(self):
976 def canusebundle2(self):
977 return _canusebundle2(self)
977 return _canusebundle2(self)
978
978
979 @util.propertycache
979 @util.propertycache
980 def remotebundle2caps(self):
980 def remotebundle2caps(self):
981 return bundle2.bundle2caps(self.remote)
981 return bundle2.bundle2caps(self.remote)
982
982
983 def gettransaction(self):
983 def gettransaction(self):
984 # deprecated; talk to trmanager directly
984 # deprecated; talk to trmanager directly
985 return self.trmanager.transaction()
985 return self.trmanager.transaction()
986
986
987 class transactionmanager(object):
987 class transactionmanager(object):
988 """An object to manage the life cycle of a transaction
988 """An object to manage the life cycle of a transaction
989
989
990 It creates the transaction on demand and calls the appropriate hooks when
990 It creates the transaction on demand and calls the appropriate hooks when
991 closing the transaction."""
991 closing the transaction."""
992 def __init__(self, repo, source, url):
992 def __init__(self, repo, source, url):
993 self.repo = repo
993 self.repo = repo
994 self.source = source
994 self.source = source
995 self.url = url
995 self.url = url
996 self._tr = None
996 self._tr = None
997
997
998 def transaction(self):
998 def transaction(self):
999 """Return an open transaction object, constructing if necessary"""
999 """Return an open transaction object, constructing if necessary"""
1000 if not self._tr:
1000 if not self._tr:
1001 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1001 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1002 self._tr = self.repo.transaction(trname)
1002 self._tr = self.repo.transaction(trname)
1003 self._tr.hookargs['source'] = self.source
1003 self._tr.hookargs['source'] = self.source
1004 self._tr.hookargs['url'] = self.url
1004 self._tr.hookargs['url'] = self.url
1005 return self._tr
1005 return self._tr
1006
1006
1007 def close(self):
1007 def close(self):
1008 """close transaction if created"""
1008 """close transaction if created"""
1009 if self._tr is not None:
1009 if self._tr is not None:
1010 self._tr.close()
1010 self._tr.close()
1011
1011
1012 def release(self):
1012 def release(self):
1013 """release transaction if created"""
1013 """release transaction if created"""
1014 if self._tr is not None:
1014 if self._tr is not None:
1015 self._tr.release()
1015 self._tr.release()
1016
1016
1017 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1017 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1018 streamclonerequested=None):
1018 streamclonerequested=None):
1019 """Fetch repository data from a remote.
1019 """Fetch repository data from a remote.
1020
1020
1021 This is the main function used to retrieve data from a remote repository.
1021 This is the main function used to retrieve data from a remote repository.
1022
1022
1023 ``repo`` is the local repository to clone into.
1023 ``repo`` is the local repository to clone into.
1024 ``remote`` is a peer instance.
1024 ``remote`` is a peer instance.
1025 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1025 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1026 default) means to pull everything from the remote.
1026 default) means to pull everything from the remote.
1027 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1027 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1028 default, all remote bookmarks are pulled.
1028 default, all remote bookmarks are pulled.
1029 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1029 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1030 initialization.
1030 initialization.
1031 ``streamclonerequested`` is a boolean indicating whether a "streaming
1031 ``streamclonerequested`` is a boolean indicating whether a "streaming
1032 clone" is requested. A "streaming clone" is essentially a raw file copy
1032 clone" is requested. A "streaming clone" is essentially a raw file copy
1033 of revlogs from the server. This only works when the local repository is
1033 of revlogs from the server. This only works when the local repository is
1034 empty. The default value of ``None`` means to respect the server
1034 empty. The default value of ``None`` means to respect the server
1035 configuration for preferring stream clones.
1035 configuration for preferring stream clones.
1036
1036
1037 Returns the ``pulloperation`` created for this pull.
1037 Returns the ``pulloperation`` created for this pull.
1038 """
1038 """
1039 if opargs is None:
1039 if opargs is None:
1040 opargs = {}
1040 opargs = {}
1041 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1041 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1042 streamclonerequested=streamclonerequested, **opargs)
1042 streamclonerequested=streamclonerequested, **opargs)
1043 if pullop.remote.local():
1043 if pullop.remote.local():
1044 missing = set(pullop.remote.requirements) - pullop.repo.supported
1044 missing = set(pullop.remote.requirements) - pullop.repo.supported
1045 if missing:
1045 if missing:
1046 msg = _("required features are not"
1046 msg = _("required features are not"
1047 " supported in the destination:"
1047 " supported in the destination:"
1048 " %s") % (', '.join(sorted(missing)))
1048 " %s") % (', '.join(sorted(missing)))
1049 raise error.Abort(msg)
1049 raise error.Abort(msg)
1050
1050
1051 lock = pullop.repo.lock()
1051 lock = pullop.repo.lock()
1052 try:
1052 try:
1053 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1053 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1054 streamclone.maybeperformlegacystreamclone(pullop)
1054 streamclone.maybeperformlegacystreamclone(pullop)
1055 # This should ideally be in _pullbundle2(). However, it needs to run
1055 # This should ideally be in _pullbundle2(). However, it needs to run
1056 # before discovery to avoid extra work.
1056 # before discovery to avoid extra work.
1057 _maybeapplyclonebundle(pullop)
1057 _maybeapplyclonebundle(pullop)
1058 _pulldiscovery(pullop)
1058 _pulldiscovery(pullop)
1059 if pullop.canusebundle2:
1059 if pullop.canusebundle2:
1060 _pullbundle2(pullop)
1060 _pullbundle2(pullop)
1061 _pullchangeset(pullop)
1061 _pullchangeset(pullop)
1062 _pullphase(pullop)
1062 _pullphase(pullop)
1063 _pullbookmarks(pullop)
1063 _pullbookmarks(pullop)
1064 _pullobsolete(pullop)
1064 _pullobsolete(pullop)
1065 pullop.trmanager.close()
1065 pullop.trmanager.close()
1066 finally:
1066 finally:
1067 pullop.trmanager.release()
1067 pullop.trmanager.release()
1068 lock.release()
1068 lock.release()
1069
1069
1070 return pullop
1070 return pullop
1071
1071
1072 # list of steps to perform discovery before pull
1072 # list of steps to perform discovery before pull
1073 pulldiscoveryorder = []
1073 pulldiscoveryorder = []
1074
1074
1075 # Mapping between step name and function
1075 # Mapping between step name and function
1076 #
1076 #
1077 # This exists to help extensions wrap steps if necessary
1077 # This exists to help extensions wrap steps if necessary
1078 pulldiscoverymapping = {}
1078 pulldiscoverymapping = {}
1079
1079
1080 def pulldiscovery(stepname):
1080 def pulldiscovery(stepname):
1081 """decorator for function performing discovery before pull
1081 """decorator for function performing discovery before pull
1082
1082
1083 The function is added to the step -> function mapping and appended to the
1083 The function is added to the step -> function mapping and appended to the
1084 list of steps. Beware that decorated function will be added in order (this
1084 list of steps. Beware that decorated function will be added in order (this
1085 may matter).
1085 may matter).
1086
1086
1087 You can only use this decorator for a new step, if you want to wrap a step
1087 You can only use this decorator for a new step, if you want to wrap a step
1088 from an extension, change the pulldiscovery dictionary directly."""
1088 from an extension, change the pulldiscovery dictionary directly."""
1089 def dec(func):
1089 def dec(func):
1090 assert stepname not in pulldiscoverymapping
1090 assert stepname not in pulldiscoverymapping
1091 pulldiscoverymapping[stepname] = func
1091 pulldiscoverymapping[stepname] = func
1092 pulldiscoveryorder.append(stepname)
1092 pulldiscoveryorder.append(stepname)
1093 return func
1093 return func
1094 return dec
1094 return dec
1095
1095
1096 def _pulldiscovery(pullop):
1096 def _pulldiscovery(pullop):
1097 """Run all discovery steps"""
1097 """Run all discovery steps"""
1098 for stepname in pulldiscoveryorder:
1098 for stepname in pulldiscoveryorder:
1099 step = pulldiscoverymapping[stepname]
1099 step = pulldiscoverymapping[stepname]
1100 step(pullop)
1100 step(pullop)
1101
1101
1102 @pulldiscovery('b1:bookmarks')
1102 @pulldiscovery('b1:bookmarks')
1103 def _pullbookmarkbundle1(pullop):
1103 def _pullbookmarkbundle1(pullop):
1104 """fetch bookmark data in bundle1 case
1104 """fetch bookmark data in bundle1 case
1105
1105
1106 If not using bundle2, we have to fetch bookmarks before changeset
1106 If not using bundle2, we have to fetch bookmarks before changeset
1107 discovery to reduce the chance and impact of race conditions."""
1107 discovery to reduce the chance and impact of race conditions."""
1108 if pullop.remotebookmarks is not None:
1108 if pullop.remotebookmarks is not None:
1109 return
1109 return
1110 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1110 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1111 # all known bundle2 servers now support listkeys, but lets be nice with
1111 # all known bundle2 servers now support listkeys, but lets be nice with
1112 # new implementation.
1112 # new implementation.
1113 return
1113 return
1114 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1114 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1115
1115
1116
1116
1117 @pulldiscovery('changegroup')
1117 @pulldiscovery('changegroup')
1118 def _pulldiscoverychangegroup(pullop):
1118 def _pulldiscoverychangegroup(pullop):
1119 """discovery phase for the pull
1119 """discovery phase for the pull
1120
1120
1121 Current handle changeset discovery only, will change handle all discovery
1121 Current handle changeset discovery only, will change handle all discovery
1122 at some point."""
1122 at some point."""
1123 tmp = discovery.findcommonincoming(pullop.repo,
1123 tmp = discovery.findcommonincoming(pullop.repo,
1124 pullop.remote,
1124 pullop.remote,
1125 heads=pullop.heads,
1125 heads=pullop.heads,
1126 force=pullop.force)
1126 force=pullop.force)
1127 common, fetch, rheads = tmp
1127 common, fetch, rheads = tmp
1128 nm = pullop.repo.unfiltered().changelog.nodemap
1128 nm = pullop.repo.unfiltered().changelog.nodemap
1129 if fetch and rheads:
1129 if fetch and rheads:
1130 # If a remote heads in filtered locally, lets drop it from the unknown
1130 # If a remote heads in filtered locally, lets drop it from the unknown
1131 # remote heads and put in back in common.
1131 # remote heads and put in back in common.
1132 #
1132 #
1133 # This is a hackish solution to catch most of "common but locally
1133 # This is a hackish solution to catch most of "common but locally
1134 # hidden situation". We do not performs discovery on unfiltered
1134 # hidden situation". We do not performs discovery on unfiltered
1135 # repository because it end up doing a pathological amount of round
1135 # repository because it end up doing a pathological amount of round
1136 # trip for w huge amount of changeset we do not care about.
1136 # trip for w huge amount of changeset we do not care about.
1137 #
1137 #
1138 # If a set of such "common but filtered" changeset exist on the server
1138 # If a set of such "common but filtered" changeset exist on the server
1139 # but are not including a remote heads, we'll not be able to detect it,
1139 # but are not including a remote heads, we'll not be able to detect it,
1140 scommon = set(common)
1140 scommon = set(common)
1141 filteredrheads = []
1141 filteredrheads = []
1142 for n in rheads:
1142 for n in rheads:
1143 if n in nm:
1143 if n in nm:
1144 if n not in scommon:
1144 if n not in scommon:
1145 common.append(n)
1145 common.append(n)
1146 else:
1146 else:
1147 filteredrheads.append(n)
1147 filteredrheads.append(n)
1148 if not filteredrheads:
1148 if not filteredrheads:
1149 fetch = []
1149 fetch = []
1150 rheads = filteredrheads
1150 rheads = filteredrheads
1151 pullop.common = common
1151 pullop.common = common
1152 pullop.fetch = fetch
1152 pullop.fetch = fetch
1153 pullop.rheads = rheads
1153 pullop.rheads = rheads
1154
1154
1155 def _pullbundle2(pullop):
1155 def _pullbundle2(pullop):
1156 """pull data using bundle2
1156 """pull data using bundle2
1157
1157
1158 For now, the only supported data are changegroup."""
1158 For now, the only supported data are changegroup."""
1159 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1159 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1160
1160
1161 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1161 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1162
1162
1163 # pulling changegroup
1163 # pulling changegroup
1164 pullop.stepsdone.add('changegroup')
1164 pullop.stepsdone.add('changegroup')
1165
1165
1166 kwargs['common'] = pullop.common
1166 kwargs['common'] = pullop.common
1167 kwargs['heads'] = pullop.heads or pullop.rheads
1167 kwargs['heads'] = pullop.heads or pullop.rheads
1168 kwargs['cg'] = pullop.fetch
1168 kwargs['cg'] = pullop.fetch
1169 if 'listkeys' in pullop.remotebundle2caps:
1169 if 'listkeys' in pullop.remotebundle2caps:
1170 kwargs['listkeys'] = ['phase']
1170 kwargs['listkeys'] = ['phase']
1171 if pullop.remotebookmarks is None:
1171 if pullop.remotebookmarks is None:
1172 # make sure to always includes bookmark data when migrating
1172 # make sure to always includes bookmark data when migrating
1173 # `hg incoming --bundle` to using this function.
1173 # `hg incoming --bundle` to using this function.
1174 kwargs['listkeys'].append('bookmarks')
1174 kwargs['listkeys'].append('bookmarks')
1175 if streaming:
1175 if streaming:
1176 pullop.repo.ui.status(_('streaming all changes\n'))
1176 pullop.repo.ui.status(_('streaming all changes\n'))
1177 elif not pullop.fetch:
1177 elif not pullop.fetch:
1178 pullop.repo.ui.status(_("no changes found\n"))
1178 pullop.repo.ui.status(_("no changes found\n"))
1179 pullop.cgresult = 0
1179 pullop.cgresult = 0
1180 else:
1180 else:
1181 if pullop.heads is None and list(pullop.common) == [nullid]:
1181 if pullop.heads is None and list(pullop.common) == [nullid]:
1182 pullop.repo.ui.status(_("requesting all changes\n"))
1182 pullop.repo.ui.status(_("requesting all changes\n"))
1183 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1183 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1184 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1184 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1185 if obsolete.commonversion(remoteversions) is not None:
1185 if obsolete.commonversion(remoteversions) is not None:
1186 kwargs['obsmarkers'] = True
1186 kwargs['obsmarkers'] = True
1187 pullop.stepsdone.add('obsmarkers')
1187 pullop.stepsdone.add('obsmarkers')
1188 _pullbundle2extraprepare(pullop, kwargs)
1188 _pullbundle2extraprepare(pullop, kwargs)
1189 bundle = pullop.remote.getbundle('pull', **kwargs)
1189 bundle = pullop.remote.getbundle('pull', **kwargs)
1190 try:
1190 try:
1191 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1191 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1192 except error.BundleValueError as exc:
1192 except error.BundleValueError as exc:
1193 raise error.Abort('missing support for %s' % exc)
1193 raise error.Abort('missing support for %s' % exc)
1194
1194
1195 if pullop.fetch:
1195 if pullop.fetch:
1196 results = [cg['return'] for cg in op.records['changegroup']]
1196 results = [cg['return'] for cg in op.records['changegroup']]
1197 pullop.cgresult = changegroup.combineresults(results)
1197 pullop.cgresult = changegroup.combineresults(results)
1198
1198
1199 # processing phases change
1199 # processing phases change
1200 for namespace, value in op.records['listkeys']:
1200 for namespace, value in op.records['listkeys']:
1201 if namespace == 'phases':
1201 if namespace == 'phases':
1202 _pullapplyphases(pullop, value)
1202 _pullapplyphases(pullop, value)
1203
1203
1204 # processing bookmark update
1204 # processing bookmark update
1205 for namespace, value in op.records['listkeys']:
1205 for namespace, value in op.records['listkeys']:
1206 if namespace == 'bookmarks':
1206 if namespace == 'bookmarks':
1207 pullop.remotebookmarks = value
1207 pullop.remotebookmarks = value
1208
1208
1209 # bookmark data were either already there or pulled in the bundle
1209 # bookmark data were either already there or pulled in the bundle
1210 if pullop.remotebookmarks is not None:
1210 if pullop.remotebookmarks is not None:
1211 _pullbookmarks(pullop)
1211 _pullbookmarks(pullop)
1212
1212
1213 def _pullbundle2extraprepare(pullop, kwargs):
1213 def _pullbundle2extraprepare(pullop, kwargs):
1214 """hook function so that extensions can extend the getbundle call"""
1214 """hook function so that extensions can extend the getbundle call"""
1215 pass
1215 pass
1216
1216
1217 def _pullchangeset(pullop):
1217 def _pullchangeset(pullop):
1218 """pull changeset from unbundle into the local repo"""
1218 """pull changeset from unbundle into the local repo"""
1219 # We delay the open of the transaction as late as possible so we
1219 # We delay the open of the transaction as late as possible so we
1220 # don't open transaction for nothing or you break future useful
1220 # don't open transaction for nothing or you break future useful
1221 # rollback call
1221 # rollback call
1222 if 'changegroup' in pullop.stepsdone:
1222 if 'changegroup' in pullop.stepsdone:
1223 return
1223 return
1224 pullop.stepsdone.add('changegroup')
1224 pullop.stepsdone.add('changegroup')
1225 if not pullop.fetch:
1225 if not pullop.fetch:
1226 pullop.repo.ui.status(_("no changes found\n"))
1226 pullop.repo.ui.status(_("no changes found\n"))
1227 pullop.cgresult = 0
1227 pullop.cgresult = 0
1228 return
1228 return
1229 pullop.gettransaction()
1229 pullop.gettransaction()
1230 if pullop.heads is None and list(pullop.common) == [nullid]:
1230 if pullop.heads is None and list(pullop.common) == [nullid]:
1231 pullop.repo.ui.status(_("requesting all changes\n"))
1231 pullop.repo.ui.status(_("requesting all changes\n"))
1232 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1232 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1233 # issue1320, avoid a race if remote changed after discovery
1233 # issue1320, avoid a race if remote changed after discovery
1234 pullop.heads = pullop.rheads
1234 pullop.heads = pullop.rheads
1235
1235
1236 if pullop.remote.capable('getbundle'):
1236 if pullop.remote.capable('getbundle'):
1237 # TODO: get bundlecaps from remote
1237 # TODO: get bundlecaps from remote
1238 cg = pullop.remote.getbundle('pull', common=pullop.common,
1238 cg = pullop.remote.getbundle('pull', common=pullop.common,
1239 heads=pullop.heads or pullop.rheads)
1239 heads=pullop.heads or pullop.rheads)
1240 elif pullop.heads is None:
1240 elif pullop.heads is None:
1241 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1241 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1242 elif not pullop.remote.capable('changegroupsubset'):
1242 elif not pullop.remote.capable('changegroupsubset'):
1243 raise error.Abort(_("partial pull cannot be done because "
1243 raise error.Abort(_("partial pull cannot be done because "
1244 "other repository doesn't support "
1244 "other repository doesn't support "
1245 "changegroupsubset."))
1245 "changegroupsubset."))
1246 else:
1246 else:
1247 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1247 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1248 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1248 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1249 pullop.remote.url())
1249 pullop.remote.url())
1250
1250
1251 def _pullphase(pullop):
1251 def _pullphase(pullop):
1252 # Get remote phases data from remote
1252 # Get remote phases data from remote
1253 if 'phases' in pullop.stepsdone:
1253 if 'phases' in pullop.stepsdone:
1254 return
1254 return
1255 remotephases = pullop.remote.listkeys('phases')
1255 remotephases = pullop.remote.listkeys('phases')
1256 _pullapplyphases(pullop, remotephases)
1256 _pullapplyphases(pullop, remotephases)
1257
1257
1258 def _pullapplyphases(pullop, remotephases):
1258 def _pullapplyphases(pullop, remotephases):
1259 """apply phase movement from observed remote state"""
1259 """apply phase movement from observed remote state"""
1260 if 'phases' in pullop.stepsdone:
1260 if 'phases' in pullop.stepsdone:
1261 return
1261 return
1262 pullop.stepsdone.add('phases')
1262 pullop.stepsdone.add('phases')
1263 publishing = bool(remotephases.get('publishing', False))
1263 publishing = bool(remotephases.get('publishing', False))
1264 if remotephases and not publishing:
1264 if remotephases and not publishing:
1265 # remote is new and unpublishing
1265 # remote is new and unpublishing
1266 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1266 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1267 pullop.pulledsubset,
1267 pullop.pulledsubset,
1268 remotephases)
1268 remotephases)
1269 dheads = pullop.pulledsubset
1269 dheads = pullop.pulledsubset
1270 else:
1270 else:
1271 # Remote is old or publishing all common changesets
1271 # Remote is old or publishing all common changesets
1272 # should be seen as public
1272 # should be seen as public
1273 pheads = pullop.pulledsubset
1273 pheads = pullop.pulledsubset
1274 dheads = []
1274 dheads = []
1275 unfi = pullop.repo.unfiltered()
1275 unfi = pullop.repo.unfiltered()
1276 phase = unfi._phasecache.phase
1276 phase = unfi._phasecache.phase
1277 rev = unfi.changelog.nodemap.get
1277 rev = unfi.changelog.nodemap.get
1278 public = phases.public
1278 public = phases.public
1279 draft = phases.draft
1279 draft = phases.draft
1280
1280
1281 # exclude changesets already public locally and update the others
1281 # exclude changesets already public locally and update the others
1282 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1282 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1283 if pheads:
1283 if pheads:
1284 tr = pullop.gettransaction()
1284 tr = pullop.gettransaction()
1285 phases.advanceboundary(pullop.repo, tr, public, pheads)
1285 phases.advanceboundary(pullop.repo, tr, public, pheads)
1286
1286
1287 # exclude changesets already draft locally and update the others
1287 # exclude changesets already draft locally and update the others
1288 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1288 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1289 if dheads:
1289 if dheads:
1290 tr = pullop.gettransaction()
1290 tr = pullop.gettransaction()
1291 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1291 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1292
1292
1293 def _pullbookmarks(pullop):
1293 def _pullbookmarks(pullop):
1294 """process the remote bookmark information to update the local one"""
1294 """process the remote bookmark information to update the local one"""
1295 if 'bookmarks' in pullop.stepsdone:
1295 if 'bookmarks' in pullop.stepsdone:
1296 return
1296 return
1297 pullop.stepsdone.add('bookmarks')
1297 pullop.stepsdone.add('bookmarks')
1298 repo = pullop.repo
1298 repo = pullop.repo
1299 remotebookmarks = pullop.remotebookmarks
1299 remotebookmarks = pullop.remotebookmarks
1300 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1300 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1301 pullop.remote.url(),
1301 pullop.remote.url(),
1302 pullop.gettransaction,
1302 pullop.gettransaction,
1303 explicit=pullop.explicitbookmarks)
1303 explicit=pullop.explicitbookmarks)
1304
1304
1305 def _pullobsolete(pullop):
1305 def _pullobsolete(pullop):
1306 """utility function to pull obsolete markers from a remote
1306 """utility function to pull obsolete markers from a remote
1307
1307
1308 The `gettransaction` is function that return the pull transaction, creating
1308 The `gettransaction` is function that return the pull transaction, creating
1309 one if necessary. We return the transaction to inform the calling code that
1309 one if necessary. We return the transaction to inform the calling code that
1310 a new transaction have been created (when applicable).
1310 a new transaction have been created (when applicable).
1311
1311
1312 Exists mostly to allow overriding for experimentation purpose"""
1312 Exists mostly to allow overriding for experimentation purpose"""
1313 if 'obsmarkers' in pullop.stepsdone:
1313 if 'obsmarkers' in pullop.stepsdone:
1314 return
1314 return
1315 pullop.stepsdone.add('obsmarkers')
1315 pullop.stepsdone.add('obsmarkers')
1316 tr = None
1316 tr = None
1317 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1317 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1318 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1318 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1319 remoteobs = pullop.remote.listkeys('obsolete')
1319 remoteobs = pullop.remote.listkeys('obsolete')
1320 if 'dump0' in remoteobs:
1320 if 'dump0' in remoteobs:
1321 tr = pullop.gettransaction()
1321 tr = pullop.gettransaction()
1322 for key in sorted(remoteobs, reverse=True):
1322 for key in sorted(remoteobs, reverse=True):
1323 if key.startswith('dump'):
1323 if key.startswith('dump'):
1324 data = base85.b85decode(remoteobs[key])
1324 data = base85.b85decode(remoteobs[key])
1325 pullop.repo.obsstore.mergemarkers(tr, data)
1325 pullop.repo.obsstore.mergemarkers(tr, data)
1326 pullop.repo.invalidatevolatilesets()
1326 pullop.repo.invalidatevolatilesets()
1327 return tr
1327 return tr
1328
1328
1329 def caps20to10(repo):
1329 def caps20to10(repo):
1330 """return a set with appropriate options to use bundle20 during getbundle"""
1330 """return a set with appropriate options to use bundle20 during getbundle"""
1331 caps = set(['HG20'])
1331 caps = set(['HG20'])
1332 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1332 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1333 caps.add('bundle2=' + urllib.quote(capsblob))
1333 caps.add('bundle2=' + urllib.quote(capsblob))
1334 return caps
1334 return caps
1335
1335
1336 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1336 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1337 getbundle2partsorder = []
1337 getbundle2partsorder = []
1338
1338
1339 # Mapping between step name and function
1339 # Mapping between step name and function
1340 #
1340 #
1341 # This exists to help extensions wrap steps if necessary
1341 # This exists to help extensions wrap steps if necessary
1342 getbundle2partsmapping = {}
1342 getbundle2partsmapping = {}
1343
1343
1344 def getbundle2partsgenerator(stepname, idx=None):
1344 def getbundle2partsgenerator(stepname, idx=None):
1345 """decorator for function generating bundle2 part for getbundle
1345 """decorator for function generating bundle2 part for getbundle
1346
1346
1347 The function is added to the step -> function mapping and appended to the
1347 The function is added to the step -> function mapping and appended to the
1348 list of steps. Beware that decorated functions will be added in order
1348 list of steps. Beware that decorated functions will be added in order
1349 (this may matter).
1349 (this may matter).
1350
1350
1351 You can only use this decorator for new steps, if you want to wrap a step
1351 You can only use this decorator for new steps, if you want to wrap a step
1352 from an extension, attack the getbundle2partsmapping dictionary directly."""
1352 from an extension, attack the getbundle2partsmapping dictionary directly."""
1353 def dec(func):
1353 def dec(func):
1354 assert stepname not in getbundle2partsmapping
1354 assert stepname not in getbundle2partsmapping
1355 getbundle2partsmapping[stepname] = func
1355 getbundle2partsmapping[stepname] = func
1356 if idx is None:
1356 if idx is None:
1357 getbundle2partsorder.append(stepname)
1357 getbundle2partsorder.append(stepname)
1358 else:
1358 else:
1359 getbundle2partsorder.insert(idx, stepname)
1359 getbundle2partsorder.insert(idx, stepname)
1360 return func
1360 return func
1361 return dec
1361 return dec
1362
1362
1363 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1363 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1364 **kwargs):
1364 **kwargs):
1365 """return a full bundle (with potentially multiple kind of parts)
1365 """return a full bundle (with potentially multiple kind of parts)
1366
1366
1367 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1367 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1368 passed. For now, the bundle can contain only changegroup, but this will
1368 passed. For now, the bundle can contain only changegroup, but this will
1369 changes when more part type will be available for bundle2.
1369 changes when more part type will be available for bundle2.
1370
1370
1371 This is different from changegroup.getchangegroup that only returns an HG10
1371 This is different from changegroup.getchangegroup that only returns an HG10
1372 changegroup bundle. They may eventually get reunited in the future when we
1372 changegroup bundle. They may eventually get reunited in the future when we
1373 have a clearer idea of the API we what to query different data.
1373 have a clearer idea of the API we what to query different data.
1374
1374
1375 The implementation is at a very early stage and will get massive rework
1375 The implementation is at a very early stage and will get massive rework
1376 when the API of bundle is refined.
1376 when the API of bundle is refined.
1377 """
1377 """
1378 # bundle10 case
1378 # bundle10 case
1379 usebundle2 = False
1379 usebundle2 = False
1380 if bundlecaps is not None:
1380 if bundlecaps is not None:
1381 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1381 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1382 if not usebundle2:
1382 if not usebundle2:
1383 if bundlecaps and not kwargs.get('cg', True):
1383 if bundlecaps and not kwargs.get('cg', True):
1384 raise ValueError(_('request for bundle10 must include changegroup'))
1384 raise ValueError(_('request for bundle10 must include changegroup'))
1385
1385
1386 if kwargs:
1386 if kwargs:
1387 raise ValueError(_('unsupported getbundle arguments: %s')
1387 raise ValueError(_('unsupported getbundle arguments: %s')
1388 % ', '.join(sorted(kwargs.keys())))
1388 % ', '.join(sorted(kwargs.keys())))
1389 return changegroup.getchangegroup(repo, source, heads=heads,
1389 return changegroup.getchangegroup(repo, source, heads=heads,
1390 common=common, bundlecaps=bundlecaps)
1390 common=common, bundlecaps=bundlecaps)
1391
1391
1392 # bundle20 case
1392 # bundle20 case
1393 b2caps = {}
1393 b2caps = {}
1394 for bcaps in bundlecaps:
1394 for bcaps in bundlecaps:
1395 if bcaps.startswith('bundle2='):
1395 if bcaps.startswith('bundle2='):
1396 blob = urllib.unquote(bcaps[len('bundle2='):])
1396 blob = urllib.unquote(bcaps[len('bundle2='):])
1397 b2caps.update(bundle2.decodecaps(blob))
1397 b2caps.update(bundle2.decodecaps(blob))
1398 bundler = bundle2.bundle20(repo.ui, b2caps)
1398 bundler = bundle2.bundle20(repo.ui, b2caps)
1399
1399
1400 kwargs['heads'] = heads
1400 kwargs['heads'] = heads
1401 kwargs['common'] = common
1401 kwargs['common'] = common
1402
1402
1403 for name in getbundle2partsorder:
1403 for name in getbundle2partsorder:
1404 func = getbundle2partsmapping[name]
1404 func = getbundle2partsmapping[name]
1405 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1405 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1406 **kwargs)
1406 **kwargs)
1407
1407
1408 return util.chunkbuffer(bundler.getchunks())
1408 return util.chunkbuffer(bundler.getchunks())
1409
1409
1410 @getbundle2partsgenerator('changegroup')
1410 @getbundle2partsgenerator('changegroup')
1411 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1411 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1412 b2caps=None, heads=None, common=None, **kwargs):
1412 b2caps=None, heads=None, common=None, **kwargs):
1413 """add a changegroup part to the requested bundle"""
1413 """add a changegroup part to the requested bundle"""
1414 cg = None
1414 cg = None
1415 if kwargs.get('cg', True):
1415 if kwargs.get('cg', True):
1416 # build changegroup bundle here.
1416 # build changegroup bundle here.
1417 version = None
1417 version = None
1418 cgversions = b2caps.get('changegroup')
1418 cgversions = b2caps.get('changegroup')
1419 getcgkwargs = {}
1419 getcgkwargs = {}
1420 if cgversions: # 3.1 and 3.2 ship with an empty value
1420 if cgversions: # 3.1 and 3.2 ship with an empty value
1421 cgversions = [v for v in cgversions if v in changegroup.packermap]
1421 cgversions = [v for v in cgversions if v in changegroup.packermap]
1422 if not cgversions:
1422 if not cgversions:
1423 raise ValueError(_('no common changegroup version'))
1423 raise ValueError(_('no common changegroup version'))
1424 version = getcgkwargs['version'] = max(cgversions)
1424 version = getcgkwargs['version'] = max(cgversions)
1425 outgoing = changegroup.computeoutgoing(repo, heads, common)
1425 outgoing = changegroup.computeoutgoing(repo, heads, common)
1426 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1426 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1427 bundlecaps=bundlecaps,
1427 bundlecaps=bundlecaps,
1428 **getcgkwargs)
1428 **getcgkwargs)
1429
1429
1430 if cg:
1430 if cg:
1431 part = bundler.newpart('changegroup', data=cg)
1431 part = bundler.newpart('changegroup', data=cg)
1432 if version is not None:
1432 if version is not None:
1433 part.addparam('version', version)
1433 part.addparam('version', version)
1434 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1434 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1435
1435
1436 @getbundle2partsgenerator('listkeys')
1436 @getbundle2partsgenerator('listkeys')
1437 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1437 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1438 b2caps=None, **kwargs):
1438 b2caps=None, **kwargs):
1439 """add parts containing listkeys namespaces to the requested bundle"""
1439 """add parts containing listkeys namespaces to the requested bundle"""
1440 listkeys = kwargs.get('listkeys', ())
1440 listkeys = kwargs.get('listkeys', ())
1441 for namespace in listkeys:
1441 for namespace in listkeys:
1442 part = bundler.newpart('listkeys')
1442 part = bundler.newpart('listkeys')
1443 part.addparam('namespace', namespace)
1443 part.addparam('namespace', namespace)
1444 keys = repo.listkeys(namespace).items()
1444 keys = repo.listkeys(namespace).items()
1445 part.data = pushkey.encodekeys(keys)
1445 part.data = pushkey.encodekeys(keys)
1446
1446
1447 @getbundle2partsgenerator('obsmarkers')
1447 @getbundle2partsgenerator('obsmarkers')
1448 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1448 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1449 b2caps=None, heads=None, **kwargs):
1449 b2caps=None, heads=None, **kwargs):
1450 """add an obsolescence markers part to the requested bundle"""
1450 """add an obsolescence markers part to the requested bundle"""
1451 if kwargs.get('obsmarkers', False):
1451 if kwargs.get('obsmarkers', False):
1452 if heads is None:
1452 if heads is None:
1453 heads = repo.heads()
1453 heads = repo.heads()
1454 subset = [c.node() for c in repo.set('::%ln', heads)]
1454 subset = [c.node() for c in repo.set('::%ln', heads)]
1455 markers = repo.obsstore.relevantmarkers(subset)
1455 markers = repo.obsstore.relevantmarkers(subset)
1456 markers = sorted(markers)
1456 markers = sorted(markers)
1457 buildobsmarkerspart(bundler, markers)
1457 buildobsmarkerspart(bundler, markers)
1458
1458
1459 @getbundle2partsgenerator('hgtagsfnodes')
1459 @getbundle2partsgenerator('hgtagsfnodes')
1460 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1460 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1461 b2caps=None, heads=None, common=None,
1461 b2caps=None, heads=None, common=None,
1462 **kwargs):
1462 **kwargs):
1463 """Transfer the .hgtags filenodes mapping.
1463 """Transfer the .hgtags filenodes mapping.
1464
1464
1465 Only values for heads in this bundle will be transferred.
1465 Only values for heads in this bundle will be transferred.
1466
1466
1467 The part data consists of pairs of 20 byte changeset node and .hgtags
1467 The part data consists of pairs of 20 byte changeset node and .hgtags
1468 filenodes raw values.
1468 filenodes raw values.
1469 """
1469 """
1470 # Don't send unless:
1470 # Don't send unless:
1471 # - changeset are being exchanged,
1471 # - changeset are being exchanged,
1472 # - the client supports it.
1472 # - the client supports it.
1473 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1473 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1474 return
1474 return
1475
1475
1476 outgoing = changegroup.computeoutgoing(repo, heads, common)
1476 outgoing = changegroup.computeoutgoing(repo, heads, common)
1477
1477
1478 if not outgoing.missingheads:
1478 if not outgoing.missingheads:
1479 return
1479 return
1480
1480
1481 cache = tags.hgtagsfnodescache(repo.unfiltered())
1481 cache = tags.hgtagsfnodescache(repo.unfiltered())
1482 chunks = []
1482 chunks = []
1483
1483
1484 # .hgtags fnodes are only relevant for head changesets. While we could
1484 # .hgtags fnodes are only relevant for head changesets. While we could
1485 # transfer values for all known nodes, there will likely be little to
1485 # transfer values for all known nodes, there will likely be little to
1486 # no benefit.
1486 # no benefit.
1487 #
1487 #
1488 # We don't bother using a generator to produce output data because
1488 # We don't bother using a generator to produce output data because
1489 # a) we only have 40 bytes per head and even esoteric numbers of heads
1489 # a) we only have 40 bytes per head and even esoteric numbers of heads
1490 # consume little memory (1M heads is 40MB) b) we don't want to send the
1490 # consume little memory (1M heads is 40MB) b) we don't want to send the
1491 # part if we don't have entries and knowing if we have entries requires
1491 # part if we don't have entries and knowing if we have entries requires
1492 # cache lookups.
1492 # cache lookups.
1493 for node in outgoing.missingheads:
1493 for node in outgoing.missingheads:
1494 # Don't compute missing, as this may slow down serving.
1494 # Don't compute missing, as this may slow down serving.
1495 fnode = cache.getfnode(node, computemissing=False)
1495 fnode = cache.getfnode(node, computemissing=False)
1496 if fnode is not None:
1496 if fnode is not None:
1497 chunks.extend([node, fnode])
1497 chunks.extend([node, fnode])
1498
1498
1499 if chunks:
1499 if chunks:
1500 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1500 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1501
1501
1502 def check_heads(repo, their_heads, context):
1502 def check_heads(repo, their_heads, context):
1503 """check if the heads of a repo have been modified
1503 """check if the heads of a repo have been modified
1504
1504
1505 Used by peer for unbundling.
1505 Used by peer for unbundling.
1506 """
1506 """
1507 heads = repo.heads()
1507 heads = repo.heads()
1508 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1508 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1509 if not (their_heads == ['force'] or their_heads == heads or
1509 if not (their_heads == ['force'] or their_heads == heads or
1510 their_heads == ['hashed', heads_hash]):
1510 their_heads == ['hashed', heads_hash]):
1511 # someone else committed/pushed/unbundled while we
1511 # someone else committed/pushed/unbundled while we
1512 # were transferring data
1512 # were transferring data
1513 raise error.PushRaced('repository changed while %s - '
1513 raise error.PushRaced('repository changed while %s - '
1514 'please try again' % context)
1514 'please try again' % context)
1515
1515
1516 def unbundle(repo, cg, heads, source, url):
1516 def unbundle(repo, cg, heads, source, url):
1517 """Apply a bundle to a repo.
1517 """Apply a bundle to a repo.
1518
1518
1519 this function makes sure the repo is locked during the application and have
1519 this function makes sure the repo is locked during the application and have
1520 mechanism to check that no push race occurred between the creation of the
1520 mechanism to check that no push race occurred between the creation of the
1521 bundle and its application.
1521 bundle and its application.
1522
1522
1523 If the push was raced as PushRaced exception is raised."""
1523 If the push was raced as PushRaced exception is raised."""
1524 r = 0
1524 r = 0
1525 # need a transaction when processing a bundle2 stream
1525 # need a transaction when processing a bundle2 stream
1526 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1526 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1527 lockandtr = [None, None, None]
1527 lockandtr = [None, None, None]
1528 recordout = None
1528 recordout = None
1529 # quick fix for output mismatch with bundle2 in 3.4
1529 # quick fix for output mismatch with bundle2 in 3.4
1530 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1530 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1531 False)
1531 False)
1532 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1532 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1533 captureoutput = True
1533 captureoutput = True
1534 try:
1534 try:
1535 check_heads(repo, heads, 'uploading changes')
1535 check_heads(repo, heads, 'uploading changes')
1536 # push can proceed
1536 # push can proceed
1537 if util.safehasattr(cg, 'params'):
1537 if util.safehasattr(cg, 'params'):
1538 r = None
1538 r = None
1539 try:
1539 try:
1540 def gettransaction():
1540 def gettransaction():
1541 if not lockandtr[2]:
1541 if not lockandtr[2]:
1542 lockandtr[0] = repo.wlock()
1542 lockandtr[0] = repo.wlock()
1543 lockandtr[1] = repo.lock()
1543 lockandtr[1] = repo.lock()
1544 lockandtr[2] = repo.transaction(source)
1544 lockandtr[2] = repo.transaction(source)
1545 lockandtr[2].hookargs['source'] = source
1545 lockandtr[2].hookargs['source'] = source
1546 lockandtr[2].hookargs['url'] = url
1546 lockandtr[2].hookargs['url'] = url
1547 lockandtr[2].hookargs['bundle2'] = '1'
1547 lockandtr[2].hookargs['bundle2'] = '1'
1548 return lockandtr[2]
1548 return lockandtr[2]
1549
1549
1550 # Do greedy locking by default until we're satisfied with lazy
1550 # Do greedy locking by default until we're satisfied with lazy
1551 # locking.
1551 # locking.
1552 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1552 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1553 gettransaction()
1553 gettransaction()
1554
1554
1555 op = bundle2.bundleoperation(repo, gettransaction,
1555 op = bundle2.bundleoperation(repo, gettransaction,
1556 captureoutput=captureoutput)
1556 captureoutput=captureoutput)
1557 try:
1557 try:
1558 op = bundle2.processbundle(repo, cg, op=op)
1558 op = bundle2.processbundle(repo, cg, op=op)
1559 finally:
1559 finally:
1560 r = op.reply
1560 r = op.reply
1561 if captureoutput and r is not None:
1561 if captureoutput and r is not None:
1562 repo.ui.pushbuffer(error=True, subproc=True)
1562 repo.ui.pushbuffer(error=True, subproc=True)
1563 def recordout(output):
1563 def recordout(output):
1564 r.newpart('output', data=output, mandatory=False)
1564 r.newpart('output', data=output, mandatory=False)
1565 if lockandtr[2] is not None:
1565 if lockandtr[2] is not None:
1566 lockandtr[2].close()
1566 lockandtr[2].close()
1567 except BaseException as exc:
1567 except BaseException as exc:
1568 exc.duringunbundle2 = True
1568 exc.duringunbundle2 = True
1569 if captureoutput and r is not None:
1569 if captureoutput and r is not None:
1570 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1570 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1571 def recordout(output):
1571 def recordout(output):
1572 part = bundle2.bundlepart('output', data=output,
1572 part = bundle2.bundlepart('output', data=output,
1573 mandatory=False)
1573 mandatory=False)
1574 parts.append(part)
1574 parts.append(part)
1575 raise
1575 raise
1576 else:
1576 else:
1577 lockandtr[1] = repo.lock()
1577 lockandtr[1] = repo.lock()
1578 r = changegroup.addchangegroup(repo, cg, source, url)
1578 r = changegroup.addchangegroup(repo, cg, source, url)
1579 finally:
1579 finally:
1580 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1580 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1581 if recordout is not None:
1581 if recordout is not None:
1582 recordout(repo.ui.popbuffer())
1582 recordout(repo.ui.popbuffer())
1583 return r
1583 return r
1584
1584
1585 def _maybeapplyclonebundle(pullop):
1585 def _maybeapplyclonebundle(pullop):
1586 """Apply a clone bundle from a remote, if possible."""
1586 """Apply a clone bundle from a remote, if possible."""
1587
1587
1588 repo = pullop.repo
1588 repo = pullop.repo
1589 remote = pullop.remote
1589 remote = pullop.remote
1590
1590
1591 if not repo.ui.configbool('experimental', 'clonebundles', False):
1591 if not repo.ui.configbool('experimental', 'clonebundles', False):
1592 return
1592 return
1593
1593
1594 if pullop.heads:
1594 if pullop.heads:
1595 return
1595 return
1596
1596
1597 if not remote.capable('clonebundles'):
1597 if not remote.capable('clonebundles'):
1598 return
1598 return
1599
1599
1600 res = remote._call('clonebundles')
1600 res = remote._call('clonebundles')
1601 entries = parseclonebundlesmanifest(res)
1601 entries = parseclonebundlesmanifest(res)
1602
1602
1603 # TODO filter entries by supported features.
1603 # TODO filter entries by supported features.
1604 # TODO sort entries by user preferences.
1604 # TODO sort entries by user preferences.
1605
1605
1606 if not entries:
1606 if not entries:
1607 repo.ui.note(_('no clone bundles available on remote; '
1607 repo.ui.note(_('no clone bundles available on remote; '
1608 'falling back to regular clone\n'))
1608 'falling back to regular clone\n'))
1609 return
1609 return
1610
1610
1611 url = entries[0]['URL']
1611 url = entries[0]['URL']
1612 repo.ui.status(_('applying clone bundle from %s\n') % url)
1612 repo.ui.status(_('applying clone bundle from %s\n') % url)
1613 if trypullbundlefromurl(repo.ui, repo, url):
1613 if trypullbundlefromurl(repo.ui, repo, url):
1614 repo.ui.status(_('finished applying clone bundle\n'))
1614 repo.ui.status(_('finished applying clone bundle\n'))
1615 # Bundle failed.
1615 # Bundle failed.
1616 #
1616 #
1617 # We abort by default to avoid the thundering herd of
1617 # We abort by default to avoid the thundering herd of
1618 # clients flooding a server that was expecting expensive
1618 # clients flooding a server that was expecting expensive
1619 # clone load to be offloaded.
1619 # clone load to be offloaded.
1620 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1620 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1621 repo.ui.warn(_('falling back to normal clone\n'))
1621 repo.ui.warn(_('falling back to normal clone\n'))
1622 else:
1622 else:
1623 raise error.Abort(_('error applying bundle'),
1623 raise error.Abort(_('error applying bundle'),
1624 hint=_('consider contacting the server '
1624 hint=_('consider contacting the server '
1625 'operator if this error persists'))
1625 'operator if this error persists'))
1626
1626
1627 def parseclonebundlesmanifest(s):
1627 def parseclonebundlesmanifest(s):
1628 """Parses the raw text of a clone bundles manifest.
1628 """Parses the raw text of a clone bundles manifest.
1629
1629
1630 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1630 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1631 to the URL and other keys are the attributes for the entry.
1631 to the URL and other keys are the attributes for the entry.
1632 """
1632 """
1633 m = []
1633 m = []
1634 for line in s.splitlines():
1634 for line in s.splitlines():
1635 fields = line.split()
1635 fields = line.split()
1636 if not fields:
1636 if not fields:
1637 continue
1637 continue
1638 attrs = {'URL': fields[0]}
1638 attrs = {'URL': fields[0]}
1639 for rawattr in fields[1:]:
1639 for rawattr in fields[1:]:
1640 key, value = rawattr.split('=', 1)
1640 key, value = rawattr.split('=', 1)
1641 attrs[urllib.unquote(key)] = urllib.unquote(value)
1641 attrs[urllib.unquote(key)] = urllib.unquote(value)
1642
1642
1643 m.append(attrs)
1643 m.append(attrs)
1644
1644
1645 return m
1645 return m
1646
1646
1647 def trypullbundlefromurl(ui, repo, url):
1647 def trypullbundlefromurl(ui, repo, url):
1648 """Attempt to apply a bundle from a URL."""
1648 """Attempt to apply a bundle from a URL."""
1649 lock = repo.lock()
1649 lock = repo.lock()
1650 try:
1650 try:
1651 tr = repo.transaction('bundleurl')
1651 tr = repo.transaction('bundleurl')
1652 try:
1652 try:
1653 try:
1653 try:
1654 fh = urlmod.open(ui, url)
1654 fh = urlmod.open(ui, url)
1655 cg = readbundle(ui, fh, 'stream')
1655 cg = readbundle(ui, fh, 'stream')
1656
1657 if isinstance(cg, bundle2.unbundle20):
1658 bundle2.processbundle(repo, cg, lambda: tr)
1659 else:
1656 changegroup.addchangegroup(repo, cg, 'clonebundles', url)
1660 changegroup.addchangegroup(repo, cg, 'clonebundles', url)
1657 tr.close()
1661 tr.close()
1658 return True
1662 return True
1659 except urllib2.HTTPError as e:
1663 except urllib2.HTTPError as e:
1660 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1664 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1661 except urllib2.URLError as e:
1665 except urllib2.URLError as e:
1662 ui.warn(_('error fetching bundle: %s\n') % e.reason)
1666 ui.warn(_('error fetching bundle: %s\n') % e.reason)
1663
1667
1664 return False
1668 return False
1665 finally:
1669 finally:
1666 tr.release()
1670 tr.release()
1667 finally:
1671 finally:
1668 lock.release()
1672 lock.release()
@@ -1,143 +1,143 b''
1 Set up a server
1 Set up a server
2
2
3 $ hg init server
3 $ hg init server
4 $ cd server
4 $ cd server
5 $ cat >> .hg/hgrc << EOF
5 $ cat >> .hg/hgrc << EOF
6 > [extensions]
6 > [extensions]
7 > clonebundles =
7 > clonebundles =
8 > EOF
8 > EOF
9
9
10 $ touch foo
10 $ touch foo
11 $ hg -q commit -A -m 'add foo'
11 $ hg -q commit -A -m 'add foo'
12 $ touch bar
12 $ touch bar
13 $ hg -q commit -A -m 'add bar'
13 $ hg -q commit -A -m 'add bar'
14
14
15 $ hg serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
15 $ hg serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
16 $ cat hg.pid >> $DAEMON_PIDS
16 $ cat hg.pid >> $DAEMON_PIDS
17 $ cd ..
17 $ cd ..
18
18
19 Feature disabled by default
19 Feature disabled by default
20 (client should not request manifest)
20 (client should not request manifest)
21
21
22 $ hg clone -U http://localhost:$HGPORT feature-disabled
22 $ hg clone -U http://localhost:$HGPORT feature-disabled
23 requesting all changes
23 requesting all changes
24 adding changesets
24 adding changesets
25 adding manifests
25 adding manifests
26 adding file changes
26 adding file changes
27 added 2 changesets with 2 changes to 2 files
27 added 2 changesets with 2 changes to 2 files
28
28
29 $ cat server/access.log
29 $ cat server/access.log
30 * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
30 * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
31 * - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
31 * - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
32 * - - [*] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=aaff8d2ffbbf07a46dd1f05d8ae7877e3f56e2a2&listkeys=phase%2Cbookmarks (glob)
32 * - - [*] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=aaff8d2ffbbf07a46dd1f05d8ae7877e3f56e2a2&listkeys=phase%2Cbookmarks (glob)
33 * - - [*] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases (glob)
33 * - - [*] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases (glob)
34
34
35 $ cat >> $HGRCPATH << EOF
35 $ cat >> $HGRCPATH << EOF
36 > [experimental]
36 > [experimental]
37 > clonebundles = true
37 > clonebundles = true
38 > EOF
38 > EOF
39
39
40 Missing manifest should not result in server lookup
40 Missing manifest should not result in server lookup
41
41
42 $ hg --verbose clone -U http://localhost:$HGPORT no-manifest
42 $ hg --verbose clone -U http://localhost:$HGPORT no-manifest
43 requesting all changes
43 requesting all changes
44 adding changesets
44 adding changesets
45 adding manifests
45 adding manifests
46 adding file changes
46 adding file changes
47 added 2 changesets with 2 changes to 2 files
47 added 2 changesets with 2 changes to 2 files
48
48
49 $ tail -4 server/access.log
49 $ tail -4 server/access.log
50 * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
50 * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
51 * - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
51 * - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
52 * - - [*] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=aaff8d2ffbbf07a46dd1f05d8ae7877e3f56e2a2&listkeys=phase%2Cbookmarks (glob)
52 * - - [*] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=aaff8d2ffbbf07a46dd1f05d8ae7877e3f56e2a2&listkeys=phase%2Cbookmarks (glob)
53 * - - [*] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases (glob)
53 * - - [*] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases (glob)
54
54
55 Empty manifest file results in retrieval
55 Empty manifest file results in retrieval
56 (the extension only checks if the manifest file exists)
56 (the extension only checks if the manifest file exists)
57
57
58 $ touch server/.hg/clonebundles.manifest
58 $ touch server/.hg/clonebundles.manifest
59 $ hg --verbose clone -U http://localhost:$HGPORT empty-manifest
59 $ hg --verbose clone -U http://localhost:$HGPORT empty-manifest
60 no clone bundles available on remote; falling back to regular clone
60 no clone bundles available on remote; falling back to regular clone
61 requesting all changes
61 requesting all changes
62 adding changesets
62 adding changesets
63 adding manifests
63 adding manifests
64 adding file changes
64 adding file changes
65 added 2 changesets with 2 changes to 2 files
65 added 2 changesets with 2 changes to 2 files
66
66
67 Manifest file with invalid URL aborts
67 Manifest file with invalid URL aborts
68
68
69 $ echo 'http://does.not.exist/bundle.hg' > server/.hg/clonebundles.manifest
69 $ echo 'http://does.not.exist/bundle.hg' > server/.hg/clonebundles.manifest
70 $ hg clone http://localhost:$HGPORT 404-url
70 $ hg clone http://localhost:$HGPORT 404-url
71 applying clone bundle from http://does.not.exist/bundle.hg
71 applying clone bundle from http://does.not.exist/bundle.hg
72 error fetching bundle: [Errno -2] Name or service not known
72 error fetching bundle: [Errno -2] Name or service not known
73 abort: error applying bundle
73 abort: error applying bundle
74 (consider contacting the server operator if this error persists)
74 (consider contacting the server operator if this error persists)
75 [255]
75 [255]
76
76
77 Server is not running aborts
77 Server is not running aborts
78
78
79 $ echo "http://localhost:$HGPORT1/bundle.hg" > server/.hg/clonebundles.manifest
79 $ echo "http://localhost:$HGPORT1/bundle.hg" > server/.hg/clonebundles.manifest
80 $ hg clone http://localhost:$HGPORT server-not-runner
80 $ hg clone http://localhost:$HGPORT server-not-runner
81 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
81 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
82 error fetching bundle: [Errno 111] Connection refused
82 error fetching bundle: [Errno 111] Connection refused
83 abort: error applying bundle
83 abort: error applying bundle
84 (consider contacting the server operator if this error persists)
84 (consider contacting the server operator if this error persists)
85 [255]
85 [255]
86
86
87 Server returns 404
87 Server returns 404
88
88
89 $ python $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
89 $ python $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
90 $ cat http.pid >> $DAEMON_PIDS
90 $ cat http.pid >> $DAEMON_PIDS
91 $ hg clone http://localhost:$HGPORT running-404
91 $ hg clone http://localhost:$HGPORT running-404
92 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
92 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
93 HTTP error fetching bundle: HTTP Error 404: File not found
93 HTTP error fetching bundle: HTTP Error 404: File not found
94 abort: error applying bundle
94 abort: error applying bundle
95 (consider contacting the server operator if this error persists)
95 (consider contacting the server operator if this error persists)
96 [255]
96 [255]
97
97
98 We can override failure to fall back to regular clone
98 We can override failure to fall back to regular clone
99
99
100 $ hg --config ui.clonebundlefallback=true clone -U http://localhost:$HGPORT 404-fallback
100 $ hg --config ui.clonebundlefallback=true clone -U http://localhost:$HGPORT 404-fallback
101 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
101 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
102 HTTP error fetching bundle: HTTP Error 404: File not found
102 HTTP error fetching bundle: HTTP Error 404: File not found
103 falling back to normal clone
103 falling back to normal clone
104 requesting all changes
104 requesting all changes
105 adding changesets
105 adding changesets
106 adding manifests
106 adding manifests
107 adding file changes
107 adding file changes
108 added 2 changesets with 2 changes to 2 files
108 added 2 changesets with 2 changes to 2 files
109
109
110 Bundle with partial content works
110 Bundle with partial content works
111
111
112 $ hg -R server bundle --type gzip --base null -r 53245c60e682 partial.hg
112 $ hg -R server bundle --type gzip --base null -r 53245c60e682 partial.hg
113 1 changesets found
113 1 changesets found
114
114
115 $ echo "http://localhost:$HGPORT1/partial.hg" > server/.hg/clonebundles.manifest
115 $ echo "http://localhost:$HGPORT1/partial.hg" > server/.hg/clonebundles.manifest
116 $ hg clone -U http://localhost:$HGPORT partial-bundle
116 $ hg clone -U http://localhost:$HGPORT partial-bundle
117 applying clone bundle from http://localhost:$HGPORT1/partial.hg
117 applying clone bundle from http://localhost:$HGPORT1/partial.hg
118 adding changesets
118 adding changesets
119 adding manifests
119 adding manifests
120 adding file changes
120 adding file changes
121 added 1 changesets with 1 changes to 1 files
121 added 1 changesets with 1 changes to 1 files
122 finished applying clone bundle
122 finished applying clone bundle
123 searching for changes
123 searching for changes
124 adding changesets
124 adding changesets
125 adding manifests
125 adding manifests
126 adding file changes
126 adding file changes
127 added 1 changesets with 1 changes to 1 files
127 added 1 changesets with 1 changes to 1 files
128
128
129 Bundle with full content works
129 Bundle with full content works
130
130
131 $ hg -R server bundle --type gzip --base null -r tip full.hg
131 $ hg -R server bundle --type gzip-v2 --base null -r tip full.hg
132 2 changesets found
132 2 changesets found
133
133
134 $ echo "http://localhost:$HGPORT1/full.hg" > server/.hg/clonebundles.manifest
134 $ echo "http://localhost:$HGPORT1/full.hg" > server/.hg/clonebundles.manifest
135 $ hg clone -U http://localhost:$HGPORT full-bundle
135 $ hg clone -U http://localhost:$HGPORT full-bundle
136 applying clone bundle from http://localhost:$HGPORT1/full.hg
136 applying clone bundle from http://localhost:$HGPORT1/full.hg
137 adding changesets
137 adding changesets
138 adding manifests
138 adding manifests
139 adding file changes
139 adding file changes
140 added 2 changesets with 2 changes to 2 files
140 added 2 changesets with 2 changes to 2 files
141 finished applying clone bundle
141 finished applying clone bundle
142 searching for changes
142 searching for changes
143 no changes found
143 no changes found
General Comments 0
You need to be logged in to leave comments. Login now