##// END OF EJS Templates
exchange: don't print error codes after clone bundle failure...
Gregory Szorc -
r26732:69ac9aeb default
parent child Browse files
Show More
@@ -1,1804 +1,1804 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno, urllib, urllib2
10 import errno, urllib, urllib2
11 import util, scmutil, changegroup, base85, error
11 import util, scmutil, changegroup, base85, error
12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 import lock as lockmod
13 import lock as lockmod
14 import streamclone
14 import streamclone
15 import sslutil
15 import sslutil
16 import tags
16 import tags
17 import url as urlmod
17 import url as urlmod
18
18
19 # Maps bundle compression human names to internal representation.
19 # Maps bundle compression human names to internal representation.
20 _bundlespeccompressions = {'none': None,
20 _bundlespeccompressions = {'none': None,
21 'bzip2': 'BZ',
21 'bzip2': 'BZ',
22 'gzip': 'GZ',
22 'gzip': 'GZ',
23 }
23 }
24
24
25 # Maps bundle version human names to changegroup versions.
25 # Maps bundle version human names to changegroup versions.
26 _bundlespeccgversions = {'v1': '01',
26 _bundlespeccgversions = {'v1': '01',
27 'v2': '02',
27 'v2': '02',
28 'bundle2': '02', #legacy
28 'bundle2': '02', #legacy
29 }
29 }
30
30
31 def parsebundlespec(repo, spec, strict=True, externalnames=False):
31 def parsebundlespec(repo, spec, strict=True, externalnames=False):
32 """Parse a bundle string specification into parts.
32 """Parse a bundle string specification into parts.
33
33
34 Bundle specifications denote a well-defined bundle/exchange format.
34 Bundle specifications denote a well-defined bundle/exchange format.
35 The content of a given specification should not change over time in
35 The content of a given specification should not change over time in
36 order to ensure that bundles produced by a newer version of Mercurial are
36 order to ensure that bundles produced by a newer version of Mercurial are
37 readable from an older version.
37 readable from an older version.
38
38
39 The string currently has the form:
39 The string currently has the form:
40
40
41 <compression>-<type>
41 <compression>-<type>
42
42
43 Where <compression> is one of the supported compression formats
43 Where <compression> is one of the supported compression formats
44 and <type> is (currently) a version string.
44 and <type> is (currently) a version string.
45
45
46 If ``strict`` is True (the default) <compression> is required. Otherwise,
46 If ``strict`` is True (the default) <compression> is required. Otherwise,
47 it is optional.
47 it is optional.
48
48
49 If ``externalnames`` is False (the default), the human-centric names will
49 If ``externalnames`` is False (the default), the human-centric names will
50 be converted to their internal representation.
50 be converted to their internal representation.
51
51
52 Returns a 2-tuple of (compression, version). Compression will be ``None``
52 Returns a 2-tuple of (compression, version). Compression will be ``None``
53 if not in strict mode and a compression isn't defined.
53 if not in strict mode and a compression isn't defined.
54
54
55 An ``InvalidBundleSpecification`` is raised when the specification is
55 An ``InvalidBundleSpecification`` is raised when the specification is
56 not syntactically well formed.
56 not syntactically well formed.
57
57
58 An ``UnsupportedBundleSpecification`` is raised when the compression or
58 An ``UnsupportedBundleSpecification`` is raised when the compression or
59 bundle type/version is not recognized.
59 bundle type/version is not recognized.
60
60
61 Note: this function will likely eventually return a more complex data
61 Note: this function will likely eventually return a more complex data
62 structure, including bundle2 part information.
62 structure, including bundle2 part information.
63 """
63 """
64 if strict and '-' not in spec:
64 if strict and '-' not in spec:
65 raise error.InvalidBundleSpecification(
65 raise error.InvalidBundleSpecification(
66 _('invalid bundle specification; '
66 _('invalid bundle specification; '
67 'must be prefixed with compression: %s') % spec)
67 'must be prefixed with compression: %s') % spec)
68
68
69 if '-' in spec:
69 if '-' in spec:
70 compression, version = spec.split('-', 1)
70 compression, version = spec.split('-', 1)
71
71
72 if compression not in _bundlespeccompressions:
72 if compression not in _bundlespeccompressions:
73 raise error.UnsupportedBundleSpecification(
73 raise error.UnsupportedBundleSpecification(
74 _('%s compression is not supported') % compression)
74 _('%s compression is not supported') % compression)
75
75
76 if version not in _bundlespeccgversions:
76 if version not in _bundlespeccgversions:
77 raise error.UnsupportedBundleSpecification(
77 raise error.UnsupportedBundleSpecification(
78 _('%s is not a recognized bundle version') % version)
78 _('%s is not a recognized bundle version') % version)
79 else:
79 else:
80 # Value could be just the compression or just the version, in which
80 # Value could be just the compression or just the version, in which
81 # case some defaults are assumed (but only when not in strict mode).
81 # case some defaults are assumed (but only when not in strict mode).
82 assert not strict
82 assert not strict
83
83
84 if spec in _bundlespeccompressions:
84 if spec in _bundlespeccompressions:
85 compression = spec
85 compression = spec
86 version = 'v1'
86 version = 'v1'
87 if 'generaldelta' in repo.requirements:
87 if 'generaldelta' in repo.requirements:
88 version = 'v2'
88 version = 'v2'
89 elif spec in _bundlespeccgversions:
89 elif spec in _bundlespeccgversions:
90 compression = 'bzip2'
90 compression = 'bzip2'
91 version = spec
91 version = spec
92 else:
92 else:
93 raise error.UnsupportedBundleSpecification(
93 raise error.UnsupportedBundleSpecification(
94 _('%s is not a recognized bundle specification') % spec)
94 _('%s is not a recognized bundle specification') % spec)
95
95
96 if not externalnames:
96 if not externalnames:
97 compression = _bundlespeccompressions[compression]
97 compression = _bundlespeccompressions[compression]
98 version = _bundlespeccgversions[version]
98 version = _bundlespeccgversions[version]
99 return compression, version
99 return compression, version
100
100
101 def readbundle(ui, fh, fname, vfs=None):
101 def readbundle(ui, fh, fname, vfs=None):
102 header = changegroup.readexactly(fh, 4)
102 header = changegroup.readexactly(fh, 4)
103
103
104 alg = None
104 alg = None
105 if not fname:
105 if not fname:
106 fname = "stream"
106 fname = "stream"
107 if not header.startswith('HG') and header.startswith('\0'):
107 if not header.startswith('HG') and header.startswith('\0'):
108 fh = changegroup.headerlessfixup(fh, header)
108 fh = changegroup.headerlessfixup(fh, header)
109 header = "HG10"
109 header = "HG10"
110 alg = 'UN'
110 alg = 'UN'
111 elif vfs:
111 elif vfs:
112 fname = vfs.join(fname)
112 fname = vfs.join(fname)
113
113
114 magic, version = header[0:2], header[2:4]
114 magic, version = header[0:2], header[2:4]
115
115
116 if magic != 'HG':
116 if magic != 'HG':
117 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
117 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
118 if version == '10':
118 if version == '10':
119 if alg is None:
119 if alg is None:
120 alg = changegroup.readexactly(fh, 2)
120 alg = changegroup.readexactly(fh, 2)
121 return changegroup.cg1unpacker(fh, alg)
121 return changegroup.cg1unpacker(fh, alg)
122 elif version.startswith('2'):
122 elif version.startswith('2'):
123 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
123 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
124 else:
124 else:
125 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
125 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
126
126
127 def buildobsmarkerspart(bundler, markers):
127 def buildobsmarkerspart(bundler, markers):
128 """add an obsmarker part to the bundler with <markers>
128 """add an obsmarker part to the bundler with <markers>
129
129
130 No part is created if markers is empty.
130 No part is created if markers is empty.
131 Raises ValueError if the bundler doesn't support any known obsmarker format.
131 Raises ValueError if the bundler doesn't support any known obsmarker format.
132 """
132 """
133 if markers:
133 if markers:
134 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
134 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
135 version = obsolete.commonversion(remoteversions)
135 version = obsolete.commonversion(remoteversions)
136 if version is None:
136 if version is None:
137 raise ValueError('bundler do not support common obsmarker format')
137 raise ValueError('bundler do not support common obsmarker format')
138 stream = obsolete.encodemarkers(markers, True, version=version)
138 stream = obsolete.encodemarkers(markers, True, version=version)
139 return bundler.newpart('obsmarkers', data=stream)
139 return bundler.newpart('obsmarkers', data=stream)
140 return None
140 return None
141
141
142 def _canusebundle2(op):
142 def _canusebundle2(op):
143 """return true if a pull/push can use bundle2
143 """return true if a pull/push can use bundle2
144
144
145 Feel free to nuke this function when we drop the experimental option"""
145 Feel free to nuke this function when we drop the experimental option"""
146 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
146 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
147 and op.remote.capable('bundle2'))
147 and op.remote.capable('bundle2'))
148
148
149
149
150 class pushoperation(object):
150 class pushoperation(object):
151 """A object that represent a single push operation
151 """A object that represent a single push operation
152
152
153 It purpose is to carry push related state and very common operation.
153 It purpose is to carry push related state and very common operation.
154
154
155 A new should be created at the beginning of each push and discarded
155 A new should be created at the beginning of each push and discarded
156 afterward.
156 afterward.
157 """
157 """
158
158
159 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
159 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
160 bookmarks=()):
160 bookmarks=()):
161 # repo we push from
161 # repo we push from
162 self.repo = repo
162 self.repo = repo
163 self.ui = repo.ui
163 self.ui = repo.ui
164 # repo we push to
164 # repo we push to
165 self.remote = remote
165 self.remote = remote
166 # force option provided
166 # force option provided
167 self.force = force
167 self.force = force
168 # revs to be pushed (None is "all")
168 # revs to be pushed (None is "all")
169 self.revs = revs
169 self.revs = revs
170 # bookmark explicitly pushed
170 # bookmark explicitly pushed
171 self.bookmarks = bookmarks
171 self.bookmarks = bookmarks
172 # allow push of new branch
172 # allow push of new branch
173 self.newbranch = newbranch
173 self.newbranch = newbranch
174 # did a local lock get acquired?
174 # did a local lock get acquired?
175 self.locallocked = None
175 self.locallocked = None
176 # step already performed
176 # step already performed
177 # (used to check what steps have been already performed through bundle2)
177 # (used to check what steps have been already performed through bundle2)
178 self.stepsdone = set()
178 self.stepsdone = set()
179 # Integer version of the changegroup push result
179 # Integer version of the changegroup push result
180 # - None means nothing to push
180 # - None means nothing to push
181 # - 0 means HTTP error
181 # - 0 means HTTP error
182 # - 1 means we pushed and remote head count is unchanged *or*
182 # - 1 means we pushed and remote head count is unchanged *or*
183 # we have outgoing changesets but refused to push
183 # we have outgoing changesets but refused to push
184 # - other values as described by addchangegroup()
184 # - other values as described by addchangegroup()
185 self.cgresult = None
185 self.cgresult = None
186 # Boolean value for the bookmark push
186 # Boolean value for the bookmark push
187 self.bkresult = None
187 self.bkresult = None
188 # discover.outgoing object (contains common and outgoing data)
188 # discover.outgoing object (contains common and outgoing data)
189 self.outgoing = None
189 self.outgoing = None
190 # all remote heads before the push
190 # all remote heads before the push
191 self.remoteheads = None
191 self.remoteheads = None
192 # testable as a boolean indicating if any nodes are missing locally.
192 # testable as a boolean indicating if any nodes are missing locally.
193 self.incoming = None
193 self.incoming = None
194 # phases changes that must be pushed along side the changesets
194 # phases changes that must be pushed along side the changesets
195 self.outdatedphases = None
195 self.outdatedphases = None
196 # phases changes that must be pushed if changeset push fails
196 # phases changes that must be pushed if changeset push fails
197 self.fallbackoutdatedphases = None
197 self.fallbackoutdatedphases = None
198 # outgoing obsmarkers
198 # outgoing obsmarkers
199 self.outobsmarkers = set()
199 self.outobsmarkers = set()
200 # outgoing bookmarks
200 # outgoing bookmarks
201 self.outbookmarks = []
201 self.outbookmarks = []
202 # transaction manager
202 # transaction manager
203 self.trmanager = None
203 self.trmanager = None
204 # map { pushkey partid -> callback handling failure}
204 # map { pushkey partid -> callback handling failure}
205 # used to handle exception from mandatory pushkey part failure
205 # used to handle exception from mandatory pushkey part failure
206 self.pkfailcb = {}
206 self.pkfailcb = {}
207
207
208 @util.propertycache
208 @util.propertycache
209 def futureheads(self):
209 def futureheads(self):
210 """future remote heads if the changeset push succeeds"""
210 """future remote heads if the changeset push succeeds"""
211 return self.outgoing.missingheads
211 return self.outgoing.missingheads
212
212
213 @util.propertycache
213 @util.propertycache
214 def fallbackheads(self):
214 def fallbackheads(self):
215 """future remote heads if the changeset push fails"""
215 """future remote heads if the changeset push fails"""
216 if self.revs is None:
216 if self.revs is None:
217 # not target to push, all common are relevant
217 # not target to push, all common are relevant
218 return self.outgoing.commonheads
218 return self.outgoing.commonheads
219 unfi = self.repo.unfiltered()
219 unfi = self.repo.unfiltered()
220 # I want cheads = heads(::missingheads and ::commonheads)
220 # I want cheads = heads(::missingheads and ::commonheads)
221 # (missingheads is revs with secret changeset filtered out)
221 # (missingheads is revs with secret changeset filtered out)
222 #
222 #
223 # This can be expressed as:
223 # This can be expressed as:
224 # cheads = ( (missingheads and ::commonheads)
224 # cheads = ( (missingheads and ::commonheads)
225 # + (commonheads and ::missingheads))"
225 # + (commonheads and ::missingheads))"
226 # )
226 # )
227 #
227 #
228 # while trying to push we already computed the following:
228 # while trying to push we already computed the following:
229 # common = (::commonheads)
229 # common = (::commonheads)
230 # missing = ((commonheads::missingheads) - commonheads)
230 # missing = ((commonheads::missingheads) - commonheads)
231 #
231 #
232 # We can pick:
232 # We can pick:
233 # * missingheads part of common (::commonheads)
233 # * missingheads part of common (::commonheads)
234 common = self.outgoing.common
234 common = self.outgoing.common
235 nm = self.repo.changelog.nodemap
235 nm = self.repo.changelog.nodemap
236 cheads = [node for node in self.revs if nm[node] in common]
236 cheads = [node for node in self.revs if nm[node] in common]
237 # and
237 # and
238 # * commonheads parents on missing
238 # * commonheads parents on missing
239 revset = unfi.set('%ln and parents(roots(%ln))',
239 revset = unfi.set('%ln and parents(roots(%ln))',
240 self.outgoing.commonheads,
240 self.outgoing.commonheads,
241 self.outgoing.missing)
241 self.outgoing.missing)
242 cheads.extend(c.node() for c in revset)
242 cheads.extend(c.node() for c in revset)
243 return cheads
243 return cheads
244
244
245 @property
245 @property
246 def commonheads(self):
246 def commonheads(self):
247 """set of all common heads after changeset bundle push"""
247 """set of all common heads after changeset bundle push"""
248 if self.cgresult:
248 if self.cgresult:
249 return self.futureheads
249 return self.futureheads
250 else:
250 else:
251 return self.fallbackheads
251 return self.fallbackheads
252
252
253 # mapping of message used when pushing bookmark
253 # mapping of message used when pushing bookmark
254 bookmsgmap = {'update': (_("updating bookmark %s\n"),
254 bookmsgmap = {'update': (_("updating bookmark %s\n"),
255 _('updating bookmark %s failed!\n')),
255 _('updating bookmark %s failed!\n')),
256 'export': (_("exporting bookmark %s\n"),
256 'export': (_("exporting bookmark %s\n"),
257 _('exporting bookmark %s failed!\n')),
257 _('exporting bookmark %s failed!\n')),
258 'delete': (_("deleting remote bookmark %s\n"),
258 'delete': (_("deleting remote bookmark %s\n"),
259 _('deleting remote bookmark %s failed!\n')),
259 _('deleting remote bookmark %s failed!\n')),
260 }
260 }
261
261
262
262
263 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
263 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
264 opargs=None):
264 opargs=None):
265 '''Push outgoing changesets (limited by revs) from a local
265 '''Push outgoing changesets (limited by revs) from a local
266 repository to remote. Return an integer:
266 repository to remote. Return an integer:
267 - None means nothing to push
267 - None means nothing to push
268 - 0 means HTTP error
268 - 0 means HTTP error
269 - 1 means we pushed and remote head count is unchanged *or*
269 - 1 means we pushed and remote head count is unchanged *or*
270 we have outgoing changesets but refused to push
270 we have outgoing changesets but refused to push
271 - other values as described by addchangegroup()
271 - other values as described by addchangegroup()
272 '''
272 '''
273 if opargs is None:
273 if opargs is None:
274 opargs = {}
274 opargs = {}
275 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
275 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
276 **opargs)
276 **opargs)
277 if pushop.remote.local():
277 if pushop.remote.local():
278 missing = (set(pushop.repo.requirements)
278 missing = (set(pushop.repo.requirements)
279 - pushop.remote.local().supported)
279 - pushop.remote.local().supported)
280 if missing:
280 if missing:
281 msg = _("required features are not"
281 msg = _("required features are not"
282 " supported in the destination:"
282 " supported in the destination:"
283 " %s") % (', '.join(sorted(missing)))
283 " %s") % (', '.join(sorted(missing)))
284 raise error.Abort(msg)
284 raise error.Abort(msg)
285
285
286 # there are two ways to push to remote repo:
286 # there are two ways to push to remote repo:
287 #
287 #
288 # addchangegroup assumes local user can lock remote
288 # addchangegroup assumes local user can lock remote
289 # repo (local filesystem, old ssh servers).
289 # repo (local filesystem, old ssh servers).
290 #
290 #
291 # unbundle assumes local user cannot lock remote repo (new ssh
291 # unbundle assumes local user cannot lock remote repo (new ssh
292 # servers, http servers).
292 # servers, http servers).
293
293
294 if not pushop.remote.canpush():
294 if not pushop.remote.canpush():
295 raise error.Abort(_("destination does not support push"))
295 raise error.Abort(_("destination does not support push"))
296 # get local lock as we might write phase data
296 # get local lock as we might write phase data
297 localwlock = locallock = None
297 localwlock = locallock = None
298 try:
298 try:
299 # bundle2 push may receive a reply bundle touching bookmarks or other
299 # bundle2 push may receive a reply bundle touching bookmarks or other
300 # things requiring the wlock. Take it now to ensure proper ordering.
300 # things requiring the wlock. Take it now to ensure proper ordering.
301 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
301 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
302 if _canusebundle2(pushop) and maypushback:
302 if _canusebundle2(pushop) and maypushback:
303 localwlock = pushop.repo.wlock()
303 localwlock = pushop.repo.wlock()
304 locallock = pushop.repo.lock()
304 locallock = pushop.repo.lock()
305 pushop.locallocked = True
305 pushop.locallocked = True
306 except IOError as err:
306 except IOError as err:
307 pushop.locallocked = False
307 pushop.locallocked = False
308 if err.errno != errno.EACCES:
308 if err.errno != errno.EACCES:
309 raise
309 raise
310 # source repo cannot be locked.
310 # source repo cannot be locked.
311 # We do not abort the push, but just disable the local phase
311 # We do not abort the push, but just disable the local phase
312 # synchronisation.
312 # synchronisation.
313 msg = 'cannot lock source repository: %s\n' % err
313 msg = 'cannot lock source repository: %s\n' % err
314 pushop.ui.debug(msg)
314 pushop.ui.debug(msg)
315 try:
315 try:
316 if pushop.locallocked:
316 if pushop.locallocked:
317 pushop.trmanager = transactionmanager(pushop.repo,
317 pushop.trmanager = transactionmanager(pushop.repo,
318 'push-response',
318 'push-response',
319 pushop.remote.url())
319 pushop.remote.url())
320 pushop.repo.checkpush(pushop)
320 pushop.repo.checkpush(pushop)
321 lock = None
321 lock = None
322 unbundle = pushop.remote.capable('unbundle')
322 unbundle = pushop.remote.capable('unbundle')
323 if not unbundle:
323 if not unbundle:
324 lock = pushop.remote.lock()
324 lock = pushop.remote.lock()
325 try:
325 try:
326 _pushdiscovery(pushop)
326 _pushdiscovery(pushop)
327 if _canusebundle2(pushop):
327 if _canusebundle2(pushop):
328 _pushbundle2(pushop)
328 _pushbundle2(pushop)
329 _pushchangeset(pushop)
329 _pushchangeset(pushop)
330 _pushsyncphase(pushop)
330 _pushsyncphase(pushop)
331 _pushobsolete(pushop)
331 _pushobsolete(pushop)
332 _pushbookmark(pushop)
332 _pushbookmark(pushop)
333 finally:
333 finally:
334 if lock is not None:
334 if lock is not None:
335 lock.release()
335 lock.release()
336 if pushop.trmanager:
336 if pushop.trmanager:
337 pushop.trmanager.close()
337 pushop.trmanager.close()
338 finally:
338 finally:
339 if pushop.trmanager:
339 if pushop.trmanager:
340 pushop.trmanager.release()
340 pushop.trmanager.release()
341 if locallock is not None:
341 if locallock is not None:
342 locallock.release()
342 locallock.release()
343 if localwlock is not None:
343 if localwlock is not None:
344 localwlock.release()
344 localwlock.release()
345
345
346 return pushop
346 return pushop
347
347
348 # list of steps to perform discovery before push
348 # list of steps to perform discovery before push
349 pushdiscoveryorder = []
349 pushdiscoveryorder = []
350
350
351 # Mapping between step name and function
351 # Mapping between step name and function
352 #
352 #
353 # This exists to help extensions wrap steps if necessary
353 # This exists to help extensions wrap steps if necessary
354 pushdiscoverymapping = {}
354 pushdiscoverymapping = {}
355
355
356 def pushdiscovery(stepname):
356 def pushdiscovery(stepname):
357 """decorator for function performing discovery before push
357 """decorator for function performing discovery before push
358
358
359 The function is added to the step -> function mapping and appended to the
359 The function is added to the step -> function mapping and appended to the
360 list of steps. Beware that decorated function will be added in order (this
360 list of steps. Beware that decorated function will be added in order (this
361 may matter).
361 may matter).
362
362
363 You can only use this decorator for a new step, if you want to wrap a step
363 You can only use this decorator for a new step, if you want to wrap a step
364 from an extension, change the pushdiscovery dictionary directly."""
364 from an extension, change the pushdiscovery dictionary directly."""
365 def dec(func):
365 def dec(func):
366 assert stepname not in pushdiscoverymapping
366 assert stepname not in pushdiscoverymapping
367 pushdiscoverymapping[stepname] = func
367 pushdiscoverymapping[stepname] = func
368 pushdiscoveryorder.append(stepname)
368 pushdiscoveryorder.append(stepname)
369 return func
369 return func
370 return dec
370 return dec
371
371
372 def _pushdiscovery(pushop):
372 def _pushdiscovery(pushop):
373 """Run all discovery steps"""
373 """Run all discovery steps"""
374 for stepname in pushdiscoveryorder:
374 for stepname in pushdiscoveryorder:
375 step = pushdiscoverymapping[stepname]
375 step = pushdiscoverymapping[stepname]
376 step(pushop)
376 step(pushop)
377
377
378 @pushdiscovery('changeset')
378 @pushdiscovery('changeset')
379 def _pushdiscoverychangeset(pushop):
379 def _pushdiscoverychangeset(pushop):
380 """discover the changeset that need to be pushed"""
380 """discover the changeset that need to be pushed"""
381 fci = discovery.findcommonincoming
381 fci = discovery.findcommonincoming
382 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
382 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
383 common, inc, remoteheads = commoninc
383 common, inc, remoteheads = commoninc
384 fco = discovery.findcommonoutgoing
384 fco = discovery.findcommonoutgoing
385 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
385 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
386 commoninc=commoninc, force=pushop.force)
386 commoninc=commoninc, force=pushop.force)
387 pushop.outgoing = outgoing
387 pushop.outgoing = outgoing
388 pushop.remoteheads = remoteheads
388 pushop.remoteheads = remoteheads
389 pushop.incoming = inc
389 pushop.incoming = inc
390
390
391 @pushdiscovery('phase')
391 @pushdiscovery('phase')
392 def _pushdiscoveryphase(pushop):
392 def _pushdiscoveryphase(pushop):
393 """discover the phase that needs to be pushed
393 """discover the phase that needs to be pushed
394
394
395 (computed for both success and failure case for changesets push)"""
395 (computed for both success and failure case for changesets push)"""
396 outgoing = pushop.outgoing
396 outgoing = pushop.outgoing
397 unfi = pushop.repo.unfiltered()
397 unfi = pushop.repo.unfiltered()
398 remotephases = pushop.remote.listkeys('phases')
398 remotephases = pushop.remote.listkeys('phases')
399 publishing = remotephases.get('publishing', False)
399 publishing = remotephases.get('publishing', False)
400 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
400 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
401 and remotephases # server supports phases
401 and remotephases # server supports phases
402 and not pushop.outgoing.missing # no changesets to be pushed
402 and not pushop.outgoing.missing # no changesets to be pushed
403 and publishing):
403 and publishing):
404 # When:
404 # When:
405 # - this is a subrepo push
405 # - this is a subrepo push
406 # - and remote support phase
406 # - and remote support phase
407 # - and no changeset are to be pushed
407 # - and no changeset are to be pushed
408 # - and remote is publishing
408 # - and remote is publishing
409 # We may be in issue 3871 case!
409 # We may be in issue 3871 case!
410 # We drop the possible phase synchronisation done by
410 # We drop the possible phase synchronisation done by
411 # courtesy to publish changesets possibly locally draft
411 # courtesy to publish changesets possibly locally draft
412 # on the remote.
412 # on the remote.
413 remotephases = {'publishing': 'True'}
413 remotephases = {'publishing': 'True'}
414 ana = phases.analyzeremotephases(pushop.repo,
414 ana = phases.analyzeremotephases(pushop.repo,
415 pushop.fallbackheads,
415 pushop.fallbackheads,
416 remotephases)
416 remotephases)
417 pheads, droots = ana
417 pheads, droots = ana
418 extracond = ''
418 extracond = ''
419 if not publishing:
419 if not publishing:
420 extracond = ' and public()'
420 extracond = ' and public()'
421 revset = 'heads((%%ln::%%ln) %s)' % extracond
421 revset = 'heads((%%ln::%%ln) %s)' % extracond
422 # Get the list of all revs draft on remote by public here.
422 # Get the list of all revs draft on remote by public here.
423 # XXX Beware that revset break if droots is not strictly
423 # XXX Beware that revset break if droots is not strictly
424 # XXX root we may want to ensure it is but it is costly
424 # XXX root we may want to ensure it is but it is costly
425 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
425 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
426 if not outgoing.missing:
426 if not outgoing.missing:
427 future = fallback
427 future = fallback
428 else:
428 else:
429 # adds changeset we are going to push as draft
429 # adds changeset we are going to push as draft
430 #
430 #
431 # should not be necessary for publishing server, but because of an
431 # should not be necessary for publishing server, but because of an
432 # issue fixed in xxxxx we have to do it anyway.
432 # issue fixed in xxxxx we have to do it anyway.
433 fdroots = list(unfi.set('roots(%ln + %ln::)',
433 fdroots = list(unfi.set('roots(%ln + %ln::)',
434 outgoing.missing, droots))
434 outgoing.missing, droots))
435 fdroots = [f.node() for f in fdroots]
435 fdroots = [f.node() for f in fdroots]
436 future = list(unfi.set(revset, fdroots, pushop.futureheads))
436 future = list(unfi.set(revset, fdroots, pushop.futureheads))
437 pushop.outdatedphases = future
437 pushop.outdatedphases = future
438 pushop.fallbackoutdatedphases = fallback
438 pushop.fallbackoutdatedphases = fallback
439
439
440 @pushdiscovery('obsmarker')
440 @pushdiscovery('obsmarker')
441 def _pushdiscoveryobsmarkers(pushop):
441 def _pushdiscoveryobsmarkers(pushop):
442 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
442 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
443 and pushop.repo.obsstore
443 and pushop.repo.obsstore
444 and 'obsolete' in pushop.remote.listkeys('namespaces')):
444 and 'obsolete' in pushop.remote.listkeys('namespaces')):
445 repo = pushop.repo
445 repo = pushop.repo
446 # very naive computation, that can be quite expensive on big repo.
446 # very naive computation, that can be quite expensive on big repo.
447 # However: evolution is currently slow on them anyway.
447 # However: evolution is currently slow on them anyway.
448 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
448 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
449 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
449 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
450
450
451 @pushdiscovery('bookmarks')
451 @pushdiscovery('bookmarks')
452 def _pushdiscoverybookmarks(pushop):
452 def _pushdiscoverybookmarks(pushop):
453 ui = pushop.ui
453 ui = pushop.ui
454 repo = pushop.repo.unfiltered()
454 repo = pushop.repo.unfiltered()
455 remote = pushop.remote
455 remote = pushop.remote
456 ui.debug("checking for updated bookmarks\n")
456 ui.debug("checking for updated bookmarks\n")
457 ancestors = ()
457 ancestors = ()
458 if pushop.revs:
458 if pushop.revs:
459 revnums = map(repo.changelog.rev, pushop.revs)
459 revnums = map(repo.changelog.rev, pushop.revs)
460 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
460 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
461 remotebookmark = remote.listkeys('bookmarks')
461 remotebookmark = remote.listkeys('bookmarks')
462
462
463 explicit = set(pushop.bookmarks)
463 explicit = set(pushop.bookmarks)
464
464
465 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
465 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
466 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
466 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
467 for b, scid, dcid in advsrc:
467 for b, scid, dcid in advsrc:
468 if b in explicit:
468 if b in explicit:
469 explicit.remove(b)
469 explicit.remove(b)
470 if not ancestors or repo[scid].rev() in ancestors:
470 if not ancestors or repo[scid].rev() in ancestors:
471 pushop.outbookmarks.append((b, dcid, scid))
471 pushop.outbookmarks.append((b, dcid, scid))
472 # search added bookmark
472 # search added bookmark
473 for b, scid, dcid in addsrc:
473 for b, scid, dcid in addsrc:
474 if b in explicit:
474 if b in explicit:
475 explicit.remove(b)
475 explicit.remove(b)
476 pushop.outbookmarks.append((b, '', scid))
476 pushop.outbookmarks.append((b, '', scid))
477 # search for overwritten bookmark
477 # search for overwritten bookmark
478 for b, scid, dcid in advdst + diverge + differ:
478 for b, scid, dcid in advdst + diverge + differ:
479 if b in explicit:
479 if b in explicit:
480 explicit.remove(b)
480 explicit.remove(b)
481 pushop.outbookmarks.append((b, dcid, scid))
481 pushop.outbookmarks.append((b, dcid, scid))
482 # search for bookmark to delete
482 # search for bookmark to delete
483 for b, scid, dcid in adddst:
483 for b, scid, dcid in adddst:
484 if b in explicit:
484 if b in explicit:
485 explicit.remove(b)
485 explicit.remove(b)
486 # treat as "deleted locally"
486 # treat as "deleted locally"
487 pushop.outbookmarks.append((b, dcid, ''))
487 pushop.outbookmarks.append((b, dcid, ''))
488 # identical bookmarks shouldn't get reported
488 # identical bookmarks shouldn't get reported
489 for b, scid, dcid in same:
489 for b, scid, dcid in same:
490 if b in explicit:
490 if b in explicit:
491 explicit.remove(b)
491 explicit.remove(b)
492
492
493 if explicit:
493 if explicit:
494 explicit = sorted(explicit)
494 explicit = sorted(explicit)
495 # we should probably list all of them
495 # we should probably list all of them
496 ui.warn(_('bookmark %s does not exist on the local '
496 ui.warn(_('bookmark %s does not exist on the local '
497 'or remote repository!\n') % explicit[0])
497 'or remote repository!\n') % explicit[0])
498 pushop.bkresult = 2
498 pushop.bkresult = 2
499
499
500 pushop.outbookmarks.sort()
500 pushop.outbookmarks.sort()
501
501
502 def _pushcheckoutgoing(pushop):
502 def _pushcheckoutgoing(pushop):
503 outgoing = pushop.outgoing
503 outgoing = pushop.outgoing
504 unfi = pushop.repo.unfiltered()
504 unfi = pushop.repo.unfiltered()
505 if not outgoing.missing:
505 if not outgoing.missing:
506 # nothing to push
506 # nothing to push
507 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
507 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
508 return False
508 return False
509 # something to push
509 # something to push
510 if not pushop.force:
510 if not pushop.force:
511 # if repo.obsstore == False --> no obsolete
511 # if repo.obsstore == False --> no obsolete
512 # then, save the iteration
512 # then, save the iteration
513 if unfi.obsstore:
513 if unfi.obsstore:
514 # this message are here for 80 char limit reason
514 # this message are here for 80 char limit reason
515 mso = _("push includes obsolete changeset: %s!")
515 mso = _("push includes obsolete changeset: %s!")
516 mst = {"unstable": _("push includes unstable changeset: %s!"),
516 mst = {"unstable": _("push includes unstable changeset: %s!"),
517 "bumped": _("push includes bumped changeset: %s!"),
517 "bumped": _("push includes bumped changeset: %s!"),
518 "divergent": _("push includes divergent changeset: %s!")}
518 "divergent": _("push includes divergent changeset: %s!")}
519 # If we are to push if there is at least one
519 # If we are to push if there is at least one
520 # obsolete or unstable changeset in missing, at
520 # obsolete or unstable changeset in missing, at
521 # least one of the missinghead will be obsolete or
521 # least one of the missinghead will be obsolete or
522 # unstable. So checking heads only is ok
522 # unstable. So checking heads only is ok
523 for node in outgoing.missingheads:
523 for node in outgoing.missingheads:
524 ctx = unfi[node]
524 ctx = unfi[node]
525 if ctx.obsolete():
525 if ctx.obsolete():
526 raise error.Abort(mso % ctx)
526 raise error.Abort(mso % ctx)
527 elif ctx.troubled():
527 elif ctx.troubled():
528 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
528 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
529
529
530 # internal config: bookmarks.pushing
530 # internal config: bookmarks.pushing
531 newbm = pushop.ui.configlist('bookmarks', 'pushing')
531 newbm = pushop.ui.configlist('bookmarks', 'pushing')
532 discovery.checkheads(unfi, pushop.remote, outgoing,
532 discovery.checkheads(unfi, pushop.remote, outgoing,
533 pushop.remoteheads,
533 pushop.remoteheads,
534 pushop.newbranch,
534 pushop.newbranch,
535 bool(pushop.incoming),
535 bool(pushop.incoming),
536 newbm)
536 newbm)
537 return True
537 return True
538
538
539 # List of names of steps to perform for an outgoing bundle2, order matters.
539 # List of names of steps to perform for an outgoing bundle2, order matters.
540 b2partsgenorder = []
540 b2partsgenorder = []
541
541
542 # Mapping between step name and function
542 # Mapping between step name and function
543 #
543 #
544 # This exists to help extensions wrap steps if necessary
544 # This exists to help extensions wrap steps if necessary
545 b2partsgenmapping = {}
545 b2partsgenmapping = {}
546
546
547 def b2partsgenerator(stepname, idx=None):
547 def b2partsgenerator(stepname, idx=None):
548 """decorator for function generating bundle2 part
548 """decorator for function generating bundle2 part
549
549
550 The function is added to the step -> function mapping and appended to the
550 The function is added to the step -> function mapping and appended to the
551 list of steps. Beware that decorated functions will be added in order
551 list of steps. Beware that decorated functions will be added in order
552 (this may matter).
552 (this may matter).
553
553
554 You can only use this decorator for new steps, if you want to wrap a step
554 You can only use this decorator for new steps, if you want to wrap a step
555 from an extension, attack the b2partsgenmapping dictionary directly."""
555 from an extension, attack the b2partsgenmapping dictionary directly."""
556 def dec(func):
556 def dec(func):
557 assert stepname not in b2partsgenmapping
557 assert stepname not in b2partsgenmapping
558 b2partsgenmapping[stepname] = func
558 b2partsgenmapping[stepname] = func
559 if idx is None:
559 if idx is None:
560 b2partsgenorder.append(stepname)
560 b2partsgenorder.append(stepname)
561 else:
561 else:
562 b2partsgenorder.insert(idx, stepname)
562 b2partsgenorder.insert(idx, stepname)
563 return func
563 return func
564 return dec
564 return dec
565
565
566 def _pushb2ctxcheckheads(pushop, bundler):
566 def _pushb2ctxcheckheads(pushop, bundler):
567 """Generate race condition checking parts
567 """Generate race condition checking parts
568
568
569 Exists as an indepedent function to aid extensions
569 Exists as an indepedent function to aid extensions
570 """
570 """
571 if not pushop.force:
571 if not pushop.force:
572 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
572 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
573
573
574 @b2partsgenerator('changeset')
574 @b2partsgenerator('changeset')
575 def _pushb2ctx(pushop, bundler):
575 def _pushb2ctx(pushop, bundler):
576 """handle changegroup push through bundle2
576 """handle changegroup push through bundle2
577
577
578 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
578 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
579 """
579 """
580 if 'changesets' in pushop.stepsdone:
580 if 'changesets' in pushop.stepsdone:
581 return
581 return
582 pushop.stepsdone.add('changesets')
582 pushop.stepsdone.add('changesets')
583 # Send known heads to the server for race detection.
583 # Send known heads to the server for race detection.
584 if not _pushcheckoutgoing(pushop):
584 if not _pushcheckoutgoing(pushop):
585 return
585 return
586 pushop.repo.prepushoutgoinghooks(pushop.repo,
586 pushop.repo.prepushoutgoinghooks(pushop.repo,
587 pushop.remote,
587 pushop.remote,
588 pushop.outgoing)
588 pushop.outgoing)
589
589
590 _pushb2ctxcheckheads(pushop, bundler)
590 _pushb2ctxcheckheads(pushop, bundler)
591
591
592 b2caps = bundle2.bundle2caps(pushop.remote)
592 b2caps = bundle2.bundle2caps(pushop.remote)
593 version = None
593 version = None
594 cgversions = b2caps.get('changegroup')
594 cgversions = b2caps.get('changegroup')
595 if not cgversions: # 3.1 and 3.2 ship with an empty value
595 if not cgversions: # 3.1 and 3.2 ship with an empty value
596 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
596 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
597 pushop.outgoing)
597 pushop.outgoing)
598 else:
598 else:
599 cgversions = [v for v in cgversions if v in changegroup.packermap]
599 cgversions = [v for v in cgversions if v in changegroup.packermap]
600 if not cgversions:
600 if not cgversions:
601 raise ValueError(_('no common changegroup version'))
601 raise ValueError(_('no common changegroup version'))
602 version = max(cgversions)
602 version = max(cgversions)
603 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
603 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
604 pushop.outgoing,
604 pushop.outgoing,
605 version=version)
605 version=version)
606 cgpart = bundler.newpart('changegroup', data=cg)
606 cgpart = bundler.newpart('changegroup', data=cg)
607 if version is not None:
607 if version is not None:
608 cgpart.addparam('version', version)
608 cgpart.addparam('version', version)
609 def handlereply(op):
609 def handlereply(op):
610 """extract addchangegroup returns from server reply"""
610 """extract addchangegroup returns from server reply"""
611 cgreplies = op.records.getreplies(cgpart.id)
611 cgreplies = op.records.getreplies(cgpart.id)
612 assert len(cgreplies['changegroup']) == 1
612 assert len(cgreplies['changegroup']) == 1
613 pushop.cgresult = cgreplies['changegroup'][0]['return']
613 pushop.cgresult = cgreplies['changegroup'][0]['return']
614 return handlereply
614 return handlereply
615
615
616 @b2partsgenerator('phase')
616 @b2partsgenerator('phase')
617 def _pushb2phases(pushop, bundler):
617 def _pushb2phases(pushop, bundler):
618 """handle phase push through bundle2"""
618 """handle phase push through bundle2"""
619 if 'phases' in pushop.stepsdone:
619 if 'phases' in pushop.stepsdone:
620 return
620 return
621 b2caps = bundle2.bundle2caps(pushop.remote)
621 b2caps = bundle2.bundle2caps(pushop.remote)
622 if not 'pushkey' in b2caps:
622 if not 'pushkey' in b2caps:
623 return
623 return
624 pushop.stepsdone.add('phases')
624 pushop.stepsdone.add('phases')
625 part2node = []
625 part2node = []
626
626
627 def handlefailure(pushop, exc):
627 def handlefailure(pushop, exc):
628 targetid = int(exc.partid)
628 targetid = int(exc.partid)
629 for partid, node in part2node:
629 for partid, node in part2node:
630 if partid == targetid:
630 if partid == targetid:
631 raise error.Abort(_('updating %s to public failed') % node)
631 raise error.Abort(_('updating %s to public failed') % node)
632
632
633 enc = pushkey.encode
633 enc = pushkey.encode
634 for newremotehead in pushop.outdatedphases:
634 for newremotehead in pushop.outdatedphases:
635 part = bundler.newpart('pushkey')
635 part = bundler.newpart('pushkey')
636 part.addparam('namespace', enc('phases'))
636 part.addparam('namespace', enc('phases'))
637 part.addparam('key', enc(newremotehead.hex()))
637 part.addparam('key', enc(newremotehead.hex()))
638 part.addparam('old', enc(str(phases.draft)))
638 part.addparam('old', enc(str(phases.draft)))
639 part.addparam('new', enc(str(phases.public)))
639 part.addparam('new', enc(str(phases.public)))
640 part2node.append((part.id, newremotehead))
640 part2node.append((part.id, newremotehead))
641 pushop.pkfailcb[part.id] = handlefailure
641 pushop.pkfailcb[part.id] = handlefailure
642
642
643 def handlereply(op):
643 def handlereply(op):
644 for partid, node in part2node:
644 for partid, node in part2node:
645 partrep = op.records.getreplies(partid)
645 partrep = op.records.getreplies(partid)
646 results = partrep['pushkey']
646 results = partrep['pushkey']
647 assert len(results) <= 1
647 assert len(results) <= 1
648 msg = None
648 msg = None
649 if not results:
649 if not results:
650 msg = _('server ignored update of %s to public!\n') % node
650 msg = _('server ignored update of %s to public!\n') % node
651 elif not int(results[0]['return']):
651 elif not int(results[0]['return']):
652 msg = _('updating %s to public failed!\n') % node
652 msg = _('updating %s to public failed!\n') % node
653 if msg is not None:
653 if msg is not None:
654 pushop.ui.warn(msg)
654 pushop.ui.warn(msg)
655 return handlereply
655 return handlereply
656
656
657 @b2partsgenerator('obsmarkers')
657 @b2partsgenerator('obsmarkers')
658 def _pushb2obsmarkers(pushop, bundler):
658 def _pushb2obsmarkers(pushop, bundler):
659 if 'obsmarkers' in pushop.stepsdone:
659 if 'obsmarkers' in pushop.stepsdone:
660 return
660 return
661 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
661 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
662 if obsolete.commonversion(remoteversions) is None:
662 if obsolete.commonversion(remoteversions) is None:
663 return
663 return
664 pushop.stepsdone.add('obsmarkers')
664 pushop.stepsdone.add('obsmarkers')
665 if pushop.outobsmarkers:
665 if pushop.outobsmarkers:
666 markers = sorted(pushop.outobsmarkers)
666 markers = sorted(pushop.outobsmarkers)
667 buildobsmarkerspart(bundler, markers)
667 buildobsmarkerspart(bundler, markers)
668
668
669 @b2partsgenerator('bookmarks')
669 @b2partsgenerator('bookmarks')
670 def _pushb2bookmarks(pushop, bundler):
670 def _pushb2bookmarks(pushop, bundler):
671 """handle bookmark push through bundle2"""
671 """handle bookmark push through bundle2"""
672 if 'bookmarks' in pushop.stepsdone:
672 if 'bookmarks' in pushop.stepsdone:
673 return
673 return
674 b2caps = bundle2.bundle2caps(pushop.remote)
674 b2caps = bundle2.bundle2caps(pushop.remote)
675 if 'pushkey' not in b2caps:
675 if 'pushkey' not in b2caps:
676 return
676 return
677 pushop.stepsdone.add('bookmarks')
677 pushop.stepsdone.add('bookmarks')
678 part2book = []
678 part2book = []
679 enc = pushkey.encode
679 enc = pushkey.encode
680
680
681 def handlefailure(pushop, exc):
681 def handlefailure(pushop, exc):
682 targetid = int(exc.partid)
682 targetid = int(exc.partid)
683 for partid, book, action in part2book:
683 for partid, book, action in part2book:
684 if partid == targetid:
684 if partid == targetid:
685 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
685 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
686 # we should not be called for part we did not generated
686 # we should not be called for part we did not generated
687 assert False
687 assert False
688
688
689 for book, old, new in pushop.outbookmarks:
689 for book, old, new in pushop.outbookmarks:
690 part = bundler.newpart('pushkey')
690 part = bundler.newpart('pushkey')
691 part.addparam('namespace', enc('bookmarks'))
691 part.addparam('namespace', enc('bookmarks'))
692 part.addparam('key', enc(book))
692 part.addparam('key', enc(book))
693 part.addparam('old', enc(old))
693 part.addparam('old', enc(old))
694 part.addparam('new', enc(new))
694 part.addparam('new', enc(new))
695 action = 'update'
695 action = 'update'
696 if not old:
696 if not old:
697 action = 'export'
697 action = 'export'
698 elif not new:
698 elif not new:
699 action = 'delete'
699 action = 'delete'
700 part2book.append((part.id, book, action))
700 part2book.append((part.id, book, action))
701 pushop.pkfailcb[part.id] = handlefailure
701 pushop.pkfailcb[part.id] = handlefailure
702
702
703 def handlereply(op):
703 def handlereply(op):
704 ui = pushop.ui
704 ui = pushop.ui
705 for partid, book, action in part2book:
705 for partid, book, action in part2book:
706 partrep = op.records.getreplies(partid)
706 partrep = op.records.getreplies(partid)
707 results = partrep['pushkey']
707 results = partrep['pushkey']
708 assert len(results) <= 1
708 assert len(results) <= 1
709 if not results:
709 if not results:
710 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
710 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
711 else:
711 else:
712 ret = int(results[0]['return'])
712 ret = int(results[0]['return'])
713 if ret:
713 if ret:
714 ui.status(bookmsgmap[action][0] % book)
714 ui.status(bookmsgmap[action][0] % book)
715 else:
715 else:
716 ui.warn(bookmsgmap[action][1] % book)
716 ui.warn(bookmsgmap[action][1] % book)
717 if pushop.bkresult is not None:
717 if pushop.bkresult is not None:
718 pushop.bkresult = 1
718 pushop.bkresult = 1
719 return handlereply
719 return handlereply
720
720
721
721
722 def _pushbundle2(pushop):
722 def _pushbundle2(pushop):
723 """push data to the remote using bundle2
723 """push data to the remote using bundle2
724
724
725 The only currently supported type of data is changegroup but this will
725 The only currently supported type of data is changegroup but this will
726 evolve in the future."""
726 evolve in the future."""
727 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
727 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
728 pushback = (pushop.trmanager
728 pushback = (pushop.trmanager
729 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
729 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
730
730
731 # create reply capability
731 # create reply capability
732 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
732 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
733 allowpushback=pushback))
733 allowpushback=pushback))
734 bundler.newpart('replycaps', data=capsblob)
734 bundler.newpart('replycaps', data=capsblob)
735 replyhandlers = []
735 replyhandlers = []
736 for partgenname in b2partsgenorder:
736 for partgenname in b2partsgenorder:
737 partgen = b2partsgenmapping[partgenname]
737 partgen = b2partsgenmapping[partgenname]
738 ret = partgen(pushop, bundler)
738 ret = partgen(pushop, bundler)
739 if callable(ret):
739 if callable(ret):
740 replyhandlers.append(ret)
740 replyhandlers.append(ret)
741 # do not push if nothing to push
741 # do not push if nothing to push
742 if bundler.nbparts <= 1:
742 if bundler.nbparts <= 1:
743 return
743 return
744 stream = util.chunkbuffer(bundler.getchunks())
744 stream = util.chunkbuffer(bundler.getchunks())
745 try:
745 try:
746 try:
746 try:
747 reply = pushop.remote.unbundle(stream, ['force'], 'push')
747 reply = pushop.remote.unbundle(stream, ['force'], 'push')
748 except error.BundleValueError as exc:
748 except error.BundleValueError as exc:
749 raise error.Abort('missing support for %s' % exc)
749 raise error.Abort('missing support for %s' % exc)
750 try:
750 try:
751 trgetter = None
751 trgetter = None
752 if pushback:
752 if pushback:
753 trgetter = pushop.trmanager.transaction
753 trgetter = pushop.trmanager.transaction
754 op = bundle2.processbundle(pushop.repo, reply, trgetter)
754 op = bundle2.processbundle(pushop.repo, reply, trgetter)
755 except error.BundleValueError as exc:
755 except error.BundleValueError as exc:
756 raise error.Abort('missing support for %s' % exc)
756 raise error.Abort('missing support for %s' % exc)
757 except error.PushkeyFailed as exc:
757 except error.PushkeyFailed as exc:
758 partid = int(exc.partid)
758 partid = int(exc.partid)
759 if partid not in pushop.pkfailcb:
759 if partid not in pushop.pkfailcb:
760 raise
760 raise
761 pushop.pkfailcb[partid](pushop, exc)
761 pushop.pkfailcb[partid](pushop, exc)
762 for rephand in replyhandlers:
762 for rephand in replyhandlers:
763 rephand(op)
763 rephand(op)
764
764
765 def _pushchangeset(pushop):
765 def _pushchangeset(pushop):
766 """Make the actual push of changeset bundle to remote repo"""
766 """Make the actual push of changeset bundle to remote repo"""
767 if 'changesets' in pushop.stepsdone:
767 if 'changesets' in pushop.stepsdone:
768 return
768 return
769 pushop.stepsdone.add('changesets')
769 pushop.stepsdone.add('changesets')
770 if not _pushcheckoutgoing(pushop):
770 if not _pushcheckoutgoing(pushop):
771 return
771 return
772 pushop.repo.prepushoutgoinghooks(pushop.repo,
772 pushop.repo.prepushoutgoinghooks(pushop.repo,
773 pushop.remote,
773 pushop.remote,
774 pushop.outgoing)
774 pushop.outgoing)
775 outgoing = pushop.outgoing
775 outgoing = pushop.outgoing
776 unbundle = pushop.remote.capable('unbundle')
776 unbundle = pushop.remote.capable('unbundle')
777 # TODO: get bundlecaps from remote
777 # TODO: get bundlecaps from remote
778 bundlecaps = None
778 bundlecaps = None
779 # create a changegroup from local
779 # create a changegroup from local
780 if pushop.revs is None and not (outgoing.excluded
780 if pushop.revs is None and not (outgoing.excluded
781 or pushop.repo.changelog.filteredrevs):
781 or pushop.repo.changelog.filteredrevs):
782 # push everything,
782 # push everything,
783 # use the fast path, no race possible on push
783 # use the fast path, no race possible on push
784 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
784 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
785 cg = changegroup.getsubset(pushop.repo,
785 cg = changegroup.getsubset(pushop.repo,
786 outgoing,
786 outgoing,
787 bundler,
787 bundler,
788 'push',
788 'push',
789 fastpath=True)
789 fastpath=True)
790 else:
790 else:
791 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
791 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
792 bundlecaps)
792 bundlecaps)
793
793
794 # apply changegroup to remote
794 # apply changegroup to remote
795 if unbundle:
795 if unbundle:
796 # local repo finds heads on server, finds out what
796 # local repo finds heads on server, finds out what
797 # revs it must push. once revs transferred, if server
797 # revs it must push. once revs transferred, if server
798 # finds it has different heads (someone else won
798 # finds it has different heads (someone else won
799 # commit/push race), server aborts.
799 # commit/push race), server aborts.
800 if pushop.force:
800 if pushop.force:
801 remoteheads = ['force']
801 remoteheads = ['force']
802 else:
802 else:
803 remoteheads = pushop.remoteheads
803 remoteheads = pushop.remoteheads
804 # ssh: return remote's addchangegroup()
804 # ssh: return remote's addchangegroup()
805 # http: return remote's addchangegroup() or 0 for error
805 # http: return remote's addchangegroup() or 0 for error
806 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
806 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
807 pushop.repo.url())
807 pushop.repo.url())
808 else:
808 else:
809 # we return an integer indicating remote head count
809 # we return an integer indicating remote head count
810 # change
810 # change
811 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
811 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
812 pushop.repo.url())
812 pushop.repo.url())
813
813
814 def _pushsyncphase(pushop):
814 def _pushsyncphase(pushop):
815 """synchronise phase information locally and remotely"""
815 """synchronise phase information locally and remotely"""
816 cheads = pushop.commonheads
816 cheads = pushop.commonheads
817 # even when we don't push, exchanging phase data is useful
817 # even when we don't push, exchanging phase data is useful
818 remotephases = pushop.remote.listkeys('phases')
818 remotephases = pushop.remote.listkeys('phases')
819 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
819 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
820 and remotephases # server supports phases
820 and remotephases # server supports phases
821 and pushop.cgresult is None # nothing was pushed
821 and pushop.cgresult is None # nothing was pushed
822 and remotephases.get('publishing', False)):
822 and remotephases.get('publishing', False)):
823 # When:
823 # When:
824 # - this is a subrepo push
824 # - this is a subrepo push
825 # - and remote support phase
825 # - and remote support phase
826 # - and no changeset was pushed
826 # - and no changeset was pushed
827 # - and remote is publishing
827 # - and remote is publishing
828 # We may be in issue 3871 case!
828 # We may be in issue 3871 case!
829 # We drop the possible phase synchronisation done by
829 # We drop the possible phase synchronisation done by
830 # courtesy to publish changesets possibly locally draft
830 # courtesy to publish changesets possibly locally draft
831 # on the remote.
831 # on the remote.
832 remotephases = {'publishing': 'True'}
832 remotephases = {'publishing': 'True'}
833 if not remotephases: # old server or public only reply from non-publishing
833 if not remotephases: # old server or public only reply from non-publishing
834 _localphasemove(pushop, cheads)
834 _localphasemove(pushop, cheads)
835 # don't push any phase data as there is nothing to push
835 # don't push any phase data as there is nothing to push
836 else:
836 else:
837 ana = phases.analyzeremotephases(pushop.repo, cheads,
837 ana = phases.analyzeremotephases(pushop.repo, cheads,
838 remotephases)
838 remotephases)
839 pheads, droots = ana
839 pheads, droots = ana
840 ### Apply remote phase on local
840 ### Apply remote phase on local
841 if remotephases.get('publishing', False):
841 if remotephases.get('publishing', False):
842 _localphasemove(pushop, cheads)
842 _localphasemove(pushop, cheads)
843 else: # publish = False
843 else: # publish = False
844 _localphasemove(pushop, pheads)
844 _localphasemove(pushop, pheads)
845 _localphasemove(pushop, cheads, phases.draft)
845 _localphasemove(pushop, cheads, phases.draft)
846 ### Apply local phase on remote
846 ### Apply local phase on remote
847
847
848 if pushop.cgresult:
848 if pushop.cgresult:
849 if 'phases' in pushop.stepsdone:
849 if 'phases' in pushop.stepsdone:
850 # phases already pushed though bundle2
850 # phases already pushed though bundle2
851 return
851 return
852 outdated = pushop.outdatedphases
852 outdated = pushop.outdatedphases
853 else:
853 else:
854 outdated = pushop.fallbackoutdatedphases
854 outdated = pushop.fallbackoutdatedphases
855
855
856 pushop.stepsdone.add('phases')
856 pushop.stepsdone.add('phases')
857
857
858 # filter heads already turned public by the push
858 # filter heads already turned public by the push
859 outdated = [c for c in outdated if c.node() not in pheads]
859 outdated = [c for c in outdated if c.node() not in pheads]
860 # fallback to independent pushkey command
860 # fallback to independent pushkey command
861 for newremotehead in outdated:
861 for newremotehead in outdated:
862 r = pushop.remote.pushkey('phases',
862 r = pushop.remote.pushkey('phases',
863 newremotehead.hex(),
863 newremotehead.hex(),
864 str(phases.draft),
864 str(phases.draft),
865 str(phases.public))
865 str(phases.public))
866 if not r:
866 if not r:
867 pushop.ui.warn(_('updating %s to public failed!\n')
867 pushop.ui.warn(_('updating %s to public failed!\n')
868 % newremotehead)
868 % newremotehead)
869
869
870 def _localphasemove(pushop, nodes, phase=phases.public):
870 def _localphasemove(pushop, nodes, phase=phases.public):
871 """move <nodes> to <phase> in the local source repo"""
871 """move <nodes> to <phase> in the local source repo"""
872 if pushop.trmanager:
872 if pushop.trmanager:
873 phases.advanceboundary(pushop.repo,
873 phases.advanceboundary(pushop.repo,
874 pushop.trmanager.transaction(),
874 pushop.trmanager.transaction(),
875 phase,
875 phase,
876 nodes)
876 nodes)
877 else:
877 else:
878 # repo is not locked, do not change any phases!
878 # repo is not locked, do not change any phases!
879 # Informs the user that phases should have been moved when
879 # Informs the user that phases should have been moved when
880 # applicable.
880 # applicable.
881 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
881 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
882 phasestr = phases.phasenames[phase]
882 phasestr = phases.phasenames[phase]
883 if actualmoves:
883 if actualmoves:
884 pushop.ui.status(_('cannot lock source repo, skipping '
884 pushop.ui.status(_('cannot lock source repo, skipping '
885 'local %s phase update\n') % phasestr)
885 'local %s phase update\n') % phasestr)
886
886
887 def _pushobsolete(pushop):
887 def _pushobsolete(pushop):
888 """utility function to push obsolete markers to a remote"""
888 """utility function to push obsolete markers to a remote"""
889 if 'obsmarkers' in pushop.stepsdone:
889 if 'obsmarkers' in pushop.stepsdone:
890 return
890 return
891 repo = pushop.repo
891 repo = pushop.repo
892 remote = pushop.remote
892 remote = pushop.remote
893 pushop.stepsdone.add('obsmarkers')
893 pushop.stepsdone.add('obsmarkers')
894 if pushop.outobsmarkers:
894 if pushop.outobsmarkers:
895 pushop.ui.debug('try to push obsolete markers to remote\n')
895 pushop.ui.debug('try to push obsolete markers to remote\n')
896 rslts = []
896 rslts = []
897 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
897 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
898 for key in sorted(remotedata, reverse=True):
898 for key in sorted(remotedata, reverse=True):
899 # reverse sort to ensure we end with dump0
899 # reverse sort to ensure we end with dump0
900 data = remotedata[key]
900 data = remotedata[key]
901 rslts.append(remote.pushkey('obsolete', key, '', data))
901 rslts.append(remote.pushkey('obsolete', key, '', data))
902 if [r for r in rslts if not r]:
902 if [r for r in rslts if not r]:
903 msg = _('failed to push some obsolete markers!\n')
903 msg = _('failed to push some obsolete markers!\n')
904 repo.ui.warn(msg)
904 repo.ui.warn(msg)
905
905
906 def _pushbookmark(pushop):
906 def _pushbookmark(pushop):
907 """Update bookmark position on remote"""
907 """Update bookmark position on remote"""
908 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
908 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
909 return
909 return
910 pushop.stepsdone.add('bookmarks')
910 pushop.stepsdone.add('bookmarks')
911 ui = pushop.ui
911 ui = pushop.ui
912 remote = pushop.remote
912 remote = pushop.remote
913
913
914 for b, old, new in pushop.outbookmarks:
914 for b, old, new in pushop.outbookmarks:
915 action = 'update'
915 action = 'update'
916 if not old:
916 if not old:
917 action = 'export'
917 action = 'export'
918 elif not new:
918 elif not new:
919 action = 'delete'
919 action = 'delete'
920 if remote.pushkey('bookmarks', b, old, new):
920 if remote.pushkey('bookmarks', b, old, new):
921 ui.status(bookmsgmap[action][0] % b)
921 ui.status(bookmsgmap[action][0] % b)
922 else:
922 else:
923 ui.warn(bookmsgmap[action][1] % b)
923 ui.warn(bookmsgmap[action][1] % b)
924 # discovery can have set the value form invalid entry
924 # discovery can have set the value form invalid entry
925 if pushop.bkresult is not None:
925 if pushop.bkresult is not None:
926 pushop.bkresult = 1
926 pushop.bkresult = 1
927
927
928 class pulloperation(object):
928 class pulloperation(object):
929 """A object that represent a single pull operation
929 """A object that represent a single pull operation
930
930
931 It purpose is to carry pull related state and very common operation.
931 It purpose is to carry pull related state and very common operation.
932
932
933 A new should be created at the beginning of each pull and discarded
933 A new should be created at the beginning of each pull and discarded
934 afterward.
934 afterward.
935 """
935 """
936
936
937 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
937 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
938 remotebookmarks=None, streamclonerequested=None):
938 remotebookmarks=None, streamclonerequested=None):
939 # repo we pull into
939 # repo we pull into
940 self.repo = repo
940 self.repo = repo
941 # repo we pull from
941 # repo we pull from
942 self.remote = remote
942 self.remote = remote
943 # revision we try to pull (None is "all")
943 # revision we try to pull (None is "all")
944 self.heads = heads
944 self.heads = heads
945 # bookmark pulled explicitly
945 # bookmark pulled explicitly
946 self.explicitbookmarks = bookmarks
946 self.explicitbookmarks = bookmarks
947 # do we force pull?
947 # do we force pull?
948 self.force = force
948 self.force = force
949 # whether a streaming clone was requested
949 # whether a streaming clone was requested
950 self.streamclonerequested = streamclonerequested
950 self.streamclonerequested = streamclonerequested
951 # transaction manager
951 # transaction manager
952 self.trmanager = None
952 self.trmanager = None
953 # set of common changeset between local and remote before pull
953 # set of common changeset between local and remote before pull
954 self.common = None
954 self.common = None
955 # set of pulled head
955 # set of pulled head
956 self.rheads = None
956 self.rheads = None
957 # list of missing changeset to fetch remotely
957 # list of missing changeset to fetch remotely
958 self.fetch = None
958 self.fetch = None
959 # remote bookmarks data
959 # remote bookmarks data
960 self.remotebookmarks = remotebookmarks
960 self.remotebookmarks = remotebookmarks
961 # result of changegroup pulling (used as return code by pull)
961 # result of changegroup pulling (used as return code by pull)
962 self.cgresult = None
962 self.cgresult = None
963 # list of step already done
963 # list of step already done
964 self.stepsdone = set()
964 self.stepsdone = set()
965 # Whether we attempted a clone from pre-generated bundles.
965 # Whether we attempted a clone from pre-generated bundles.
966 self.clonebundleattempted = False
966 self.clonebundleattempted = False
967
967
968 @util.propertycache
968 @util.propertycache
969 def pulledsubset(self):
969 def pulledsubset(self):
970 """heads of the set of changeset target by the pull"""
970 """heads of the set of changeset target by the pull"""
971 # compute target subset
971 # compute target subset
972 if self.heads is None:
972 if self.heads is None:
973 # We pulled every thing possible
973 # We pulled every thing possible
974 # sync on everything common
974 # sync on everything common
975 c = set(self.common)
975 c = set(self.common)
976 ret = list(self.common)
976 ret = list(self.common)
977 for n in self.rheads:
977 for n in self.rheads:
978 if n not in c:
978 if n not in c:
979 ret.append(n)
979 ret.append(n)
980 return ret
980 return ret
981 else:
981 else:
982 # We pulled a specific subset
982 # We pulled a specific subset
983 # sync on this subset
983 # sync on this subset
984 return self.heads
984 return self.heads
985
985
986 @util.propertycache
986 @util.propertycache
987 def canusebundle2(self):
987 def canusebundle2(self):
988 return _canusebundle2(self)
988 return _canusebundle2(self)
989
989
990 @util.propertycache
990 @util.propertycache
991 def remotebundle2caps(self):
991 def remotebundle2caps(self):
992 return bundle2.bundle2caps(self.remote)
992 return bundle2.bundle2caps(self.remote)
993
993
994 def gettransaction(self):
994 def gettransaction(self):
995 # deprecated; talk to trmanager directly
995 # deprecated; talk to trmanager directly
996 return self.trmanager.transaction()
996 return self.trmanager.transaction()
997
997
998 class transactionmanager(object):
998 class transactionmanager(object):
999 """An object to manage the life cycle of a transaction
999 """An object to manage the life cycle of a transaction
1000
1000
1001 It creates the transaction on demand and calls the appropriate hooks when
1001 It creates the transaction on demand and calls the appropriate hooks when
1002 closing the transaction."""
1002 closing the transaction."""
1003 def __init__(self, repo, source, url):
1003 def __init__(self, repo, source, url):
1004 self.repo = repo
1004 self.repo = repo
1005 self.source = source
1005 self.source = source
1006 self.url = url
1006 self.url = url
1007 self._tr = None
1007 self._tr = None
1008
1008
1009 def transaction(self):
1009 def transaction(self):
1010 """Return an open transaction object, constructing if necessary"""
1010 """Return an open transaction object, constructing if necessary"""
1011 if not self._tr:
1011 if not self._tr:
1012 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1012 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1013 self._tr = self.repo.transaction(trname)
1013 self._tr = self.repo.transaction(trname)
1014 self._tr.hookargs['source'] = self.source
1014 self._tr.hookargs['source'] = self.source
1015 self._tr.hookargs['url'] = self.url
1015 self._tr.hookargs['url'] = self.url
1016 return self._tr
1016 return self._tr
1017
1017
1018 def close(self):
1018 def close(self):
1019 """close transaction if created"""
1019 """close transaction if created"""
1020 if self._tr is not None:
1020 if self._tr is not None:
1021 self._tr.close()
1021 self._tr.close()
1022
1022
1023 def release(self):
1023 def release(self):
1024 """release transaction if created"""
1024 """release transaction if created"""
1025 if self._tr is not None:
1025 if self._tr is not None:
1026 self._tr.release()
1026 self._tr.release()
1027
1027
1028 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1028 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1029 streamclonerequested=None):
1029 streamclonerequested=None):
1030 """Fetch repository data from a remote.
1030 """Fetch repository data from a remote.
1031
1031
1032 This is the main function used to retrieve data from a remote repository.
1032 This is the main function used to retrieve data from a remote repository.
1033
1033
1034 ``repo`` is the local repository to clone into.
1034 ``repo`` is the local repository to clone into.
1035 ``remote`` is a peer instance.
1035 ``remote`` is a peer instance.
1036 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1036 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1037 default) means to pull everything from the remote.
1037 default) means to pull everything from the remote.
1038 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1038 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1039 default, all remote bookmarks are pulled.
1039 default, all remote bookmarks are pulled.
1040 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1040 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1041 initialization.
1041 initialization.
1042 ``streamclonerequested`` is a boolean indicating whether a "streaming
1042 ``streamclonerequested`` is a boolean indicating whether a "streaming
1043 clone" is requested. A "streaming clone" is essentially a raw file copy
1043 clone" is requested. A "streaming clone" is essentially a raw file copy
1044 of revlogs from the server. This only works when the local repository is
1044 of revlogs from the server. This only works when the local repository is
1045 empty. The default value of ``None`` means to respect the server
1045 empty. The default value of ``None`` means to respect the server
1046 configuration for preferring stream clones.
1046 configuration for preferring stream clones.
1047
1047
1048 Returns the ``pulloperation`` created for this pull.
1048 Returns the ``pulloperation`` created for this pull.
1049 """
1049 """
1050 if opargs is None:
1050 if opargs is None:
1051 opargs = {}
1051 opargs = {}
1052 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1052 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1053 streamclonerequested=streamclonerequested, **opargs)
1053 streamclonerequested=streamclonerequested, **opargs)
1054 if pullop.remote.local():
1054 if pullop.remote.local():
1055 missing = set(pullop.remote.requirements) - pullop.repo.supported
1055 missing = set(pullop.remote.requirements) - pullop.repo.supported
1056 if missing:
1056 if missing:
1057 msg = _("required features are not"
1057 msg = _("required features are not"
1058 " supported in the destination:"
1058 " supported in the destination:"
1059 " %s") % (', '.join(sorted(missing)))
1059 " %s") % (', '.join(sorted(missing)))
1060 raise error.Abort(msg)
1060 raise error.Abort(msg)
1061
1061
1062 lock = pullop.repo.lock()
1062 lock = pullop.repo.lock()
1063 try:
1063 try:
1064 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1064 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1065 streamclone.maybeperformlegacystreamclone(pullop)
1065 streamclone.maybeperformlegacystreamclone(pullop)
1066 # This should ideally be in _pullbundle2(). However, it needs to run
1066 # This should ideally be in _pullbundle2(). However, it needs to run
1067 # before discovery to avoid extra work.
1067 # before discovery to avoid extra work.
1068 _maybeapplyclonebundle(pullop)
1068 _maybeapplyclonebundle(pullop)
1069 _pulldiscovery(pullop)
1069 _pulldiscovery(pullop)
1070 if pullop.canusebundle2:
1070 if pullop.canusebundle2:
1071 _pullbundle2(pullop)
1071 _pullbundle2(pullop)
1072 _pullchangeset(pullop)
1072 _pullchangeset(pullop)
1073 _pullphase(pullop)
1073 _pullphase(pullop)
1074 _pullbookmarks(pullop)
1074 _pullbookmarks(pullop)
1075 _pullobsolete(pullop)
1075 _pullobsolete(pullop)
1076 pullop.trmanager.close()
1076 pullop.trmanager.close()
1077 finally:
1077 finally:
1078 pullop.trmanager.release()
1078 pullop.trmanager.release()
1079 lock.release()
1079 lock.release()
1080
1080
1081 return pullop
1081 return pullop
1082
1082
1083 # list of steps to perform discovery before pull
1083 # list of steps to perform discovery before pull
1084 pulldiscoveryorder = []
1084 pulldiscoveryorder = []
1085
1085
1086 # Mapping between step name and function
1086 # Mapping between step name and function
1087 #
1087 #
1088 # This exists to help extensions wrap steps if necessary
1088 # This exists to help extensions wrap steps if necessary
1089 pulldiscoverymapping = {}
1089 pulldiscoverymapping = {}
1090
1090
1091 def pulldiscovery(stepname):
1091 def pulldiscovery(stepname):
1092 """decorator for function performing discovery before pull
1092 """decorator for function performing discovery before pull
1093
1093
1094 The function is added to the step -> function mapping and appended to the
1094 The function is added to the step -> function mapping and appended to the
1095 list of steps. Beware that decorated function will be added in order (this
1095 list of steps. Beware that decorated function will be added in order (this
1096 may matter).
1096 may matter).
1097
1097
1098 You can only use this decorator for a new step, if you want to wrap a step
1098 You can only use this decorator for a new step, if you want to wrap a step
1099 from an extension, change the pulldiscovery dictionary directly."""
1099 from an extension, change the pulldiscovery dictionary directly."""
1100 def dec(func):
1100 def dec(func):
1101 assert stepname not in pulldiscoverymapping
1101 assert stepname not in pulldiscoverymapping
1102 pulldiscoverymapping[stepname] = func
1102 pulldiscoverymapping[stepname] = func
1103 pulldiscoveryorder.append(stepname)
1103 pulldiscoveryorder.append(stepname)
1104 return func
1104 return func
1105 return dec
1105 return dec
1106
1106
1107 def _pulldiscovery(pullop):
1107 def _pulldiscovery(pullop):
1108 """Run all discovery steps"""
1108 """Run all discovery steps"""
1109 for stepname in pulldiscoveryorder:
1109 for stepname in pulldiscoveryorder:
1110 step = pulldiscoverymapping[stepname]
1110 step = pulldiscoverymapping[stepname]
1111 step(pullop)
1111 step(pullop)
1112
1112
1113 @pulldiscovery('b1:bookmarks')
1113 @pulldiscovery('b1:bookmarks')
1114 def _pullbookmarkbundle1(pullop):
1114 def _pullbookmarkbundle1(pullop):
1115 """fetch bookmark data in bundle1 case
1115 """fetch bookmark data in bundle1 case
1116
1116
1117 If not using bundle2, we have to fetch bookmarks before changeset
1117 If not using bundle2, we have to fetch bookmarks before changeset
1118 discovery to reduce the chance and impact of race conditions."""
1118 discovery to reduce the chance and impact of race conditions."""
1119 if pullop.remotebookmarks is not None:
1119 if pullop.remotebookmarks is not None:
1120 return
1120 return
1121 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1121 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1122 # all known bundle2 servers now support listkeys, but lets be nice with
1122 # all known bundle2 servers now support listkeys, but lets be nice with
1123 # new implementation.
1123 # new implementation.
1124 return
1124 return
1125 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1125 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1126
1126
1127
1127
1128 @pulldiscovery('changegroup')
1128 @pulldiscovery('changegroup')
1129 def _pulldiscoverychangegroup(pullop):
1129 def _pulldiscoverychangegroup(pullop):
1130 """discovery phase for the pull
1130 """discovery phase for the pull
1131
1131
1132 Current handle changeset discovery only, will change handle all discovery
1132 Current handle changeset discovery only, will change handle all discovery
1133 at some point."""
1133 at some point."""
1134 tmp = discovery.findcommonincoming(pullop.repo,
1134 tmp = discovery.findcommonincoming(pullop.repo,
1135 pullop.remote,
1135 pullop.remote,
1136 heads=pullop.heads,
1136 heads=pullop.heads,
1137 force=pullop.force)
1137 force=pullop.force)
1138 common, fetch, rheads = tmp
1138 common, fetch, rheads = tmp
1139 nm = pullop.repo.unfiltered().changelog.nodemap
1139 nm = pullop.repo.unfiltered().changelog.nodemap
1140 if fetch and rheads:
1140 if fetch and rheads:
1141 # If a remote heads in filtered locally, lets drop it from the unknown
1141 # If a remote heads in filtered locally, lets drop it from the unknown
1142 # remote heads and put in back in common.
1142 # remote heads and put in back in common.
1143 #
1143 #
1144 # This is a hackish solution to catch most of "common but locally
1144 # This is a hackish solution to catch most of "common but locally
1145 # hidden situation". We do not performs discovery on unfiltered
1145 # hidden situation". We do not performs discovery on unfiltered
1146 # repository because it end up doing a pathological amount of round
1146 # repository because it end up doing a pathological amount of round
1147 # trip for w huge amount of changeset we do not care about.
1147 # trip for w huge amount of changeset we do not care about.
1148 #
1148 #
1149 # If a set of such "common but filtered" changeset exist on the server
1149 # If a set of such "common but filtered" changeset exist on the server
1150 # but are not including a remote heads, we'll not be able to detect it,
1150 # but are not including a remote heads, we'll not be able to detect it,
1151 scommon = set(common)
1151 scommon = set(common)
1152 filteredrheads = []
1152 filteredrheads = []
1153 for n in rheads:
1153 for n in rheads:
1154 if n in nm:
1154 if n in nm:
1155 if n not in scommon:
1155 if n not in scommon:
1156 common.append(n)
1156 common.append(n)
1157 else:
1157 else:
1158 filteredrheads.append(n)
1158 filteredrheads.append(n)
1159 if not filteredrheads:
1159 if not filteredrheads:
1160 fetch = []
1160 fetch = []
1161 rheads = filteredrheads
1161 rheads = filteredrheads
1162 pullop.common = common
1162 pullop.common = common
1163 pullop.fetch = fetch
1163 pullop.fetch = fetch
1164 pullop.rheads = rheads
1164 pullop.rheads = rheads
1165
1165
1166 def _pullbundle2(pullop):
1166 def _pullbundle2(pullop):
1167 """pull data using bundle2
1167 """pull data using bundle2
1168
1168
1169 For now, the only supported data are changegroup."""
1169 For now, the only supported data are changegroup."""
1170 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1170 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1171
1171
1172 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1172 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1173
1173
1174 # pulling changegroup
1174 # pulling changegroup
1175 pullop.stepsdone.add('changegroup')
1175 pullop.stepsdone.add('changegroup')
1176
1176
1177 kwargs['common'] = pullop.common
1177 kwargs['common'] = pullop.common
1178 kwargs['heads'] = pullop.heads or pullop.rheads
1178 kwargs['heads'] = pullop.heads or pullop.rheads
1179 kwargs['cg'] = pullop.fetch
1179 kwargs['cg'] = pullop.fetch
1180 if 'listkeys' in pullop.remotebundle2caps:
1180 if 'listkeys' in pullop.remotebundle2caps:
1181 kwargs['listkeys'] = ['phase']
1181 kwargs['listkeys'] = ['phase']
1182 if pullop.remotebookmarks is None:
1182 if pullop.remotebookmarks is None:
1183 # make sure to always includes bookmark data when migrating
1183 # make sure to always includes bookmark data when migrating
1184 # `hg incoming --bundle` to using this function.
1184 # `hg incoming --bundle` to using this function.
1185 kwargs['listkeys'].append('bookmarks')
1185 kwargs['listkeys'].append('bookmarks')
1186
1186
1187 # If this is a full pull / clone and the server supports the clone bundles
1187 # If this is a full pull / clone and the server supports the clone bundles
1188 # feature, tell the server whether we attempted a clone bundle. The
1188 # feature, tell the server whether we attempted a clone bundle. The
1189 # presence of this flag indicates the client supports clone bundles. This
1189 # presence of this flag indicates the client supports clone bundles. This
1190 # will enable the server to treat clients that support clone bundles
1190 # will enable the server to treat clients that support clone bundles
1191 # differently from those that don't.
1191 # differently from those that don't.
1192 if (pullop.remote.capable('clonebundles')
1192 if (pullop.remote.capable('clonebundles')
1193 and pullop.heads is None and list(pullop.common) == [nullid]):
1193 and pullop.heads is None and list(pullop.common) == [nullid]):
1194 kwargs['cbattempted'] = pullop.clonebundleattempted
1194 kwargs['cbattempted'] = pullop.clonebundleattempted
1195
1195
1196 if streaming:
1196 if streaming:
1197 pullop.repo.ui.status(_('streaming all changes\n'))
1197 pullop.repo.ui.status(_('streaming all changes\n'))
1198 elif not pullop.fetch:
1198 elif not pullop.fetch:
1199 pullop.repo.ui.status(_("no changes found\n"))
1199 pullop.repo.ui.status(_("no changes found\n"))
1200 pullop.cgresult = 0
1200 pullop.cgresult = 0
1201 else:
1201 else:
1202 if pullop.heads is None and list(pullop.common) == [nullid]:
1202 if pullop.heads is None and list(pullop.common) == [nullid]:
1203 pullop.repo.ui.status(_("requesting all changes\n"))
1203 pullop.repo.ui.status(_("requesting all changes\n"))
1204 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1204 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1205 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1205 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1206 if obsolete.commonversion(remoteversions) is not None:
1206 if obsolete.commonversion(remoteversions) is not None:
1207 kwargs['obsmarkers'] = True
1207 kwargs['obsmarkers'] = True
1208 pullop.stepsdone.add('obsmarkers')
1208 pullop.stepsdone.add('obsmarkers')
1209 _pullbundle2extraprepare(pullop, kwargs)
1209 _pullbundle2extraprepare(pullop, kwargs)
1210 bundle = pullop.remote.getbundle('pull', **kwargs)
1210 bundle = pullop.remote.getbundle('pull', **kwargs)
1211 try:
1211 try:
1212 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1212 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1213 except error.BundleValueError as exc:
1213 except error.BundleValueError as exc:
1214 raise error.Abort('missing support for %s' % exc)
1214 raise error.Abort('missing support for %s' % exc)
1215
1215
1216 if pullop.fetch:
1216 if pullop.fetch:
1217 results = [cg['return'] for cg in op.records['changegroup']]
1217 results = [cg['return'] for cg in op.records['changegroup']]
1218 pullop.cgresult = changegroup.combineresults(results)
1218 pullop.cgresult = changegroup.combineresults(results)
1219
1219
1220 # processing phases change
1220 # processing phases change
1221 for namespace, value in op.records['listkeys']:
1221 for namespace, value in op.records['listkeys']:
1222 if namespace == 'phases':
1222 if namespace == 'phases':
1223 _pullapplyphases(pullop, value)
1223 _pullapplyphases(pullop, value)
1224
1224
1225 # processing bookmark update
1225 # processing bookmark update
1226 for namespace, value in op.records['listkeys']:
1226 for namespace, value in op.records['listkeys']:
1227 if namespace == 'bookmarks':
1227 if namespace == 'bookmarks':
1228 pullop.remotebookmarks = value
1228 pullop.remotebookmarks = value
1229
1229
1230 # bookmark data were either already there or pulled in the bundle
1230 # bookmark data were either already there or pulled in the bundle
1231 if pullop.remotebookmarks is not None:
1231 if pullop.remotebookmarks is not None:
1232 _pullbookmarks(pullop)
1232 _pullbookmarks(pullop)
1233
1233
1234 def _pullbundle2extraprepare(pullop, kwargs):
1234 def _pullbundle2extraprepare(pullop, kwargs):
1235 """hook function so that extensions can extend the getbundle call"""
1235 """hook function so that extensions can extend the getbundle call"""
1236 pass
1236 pass
1237
1237
1238 def _pullchangeset(pullop):
1238 def _pullchangeset(pullop):
1239 """pull changeset from unbundle into the local repo"""
1239 """pull changeset from unbundle into the local repo"""
1240 # We delay the open of the transaction as late as possible so we
1240 # We delay the open of the transaction as late as possible so we
1241 # don't open transaction for nothing or you break future useful
1241 # don't open transaction for nothing or you break future useful
1242 # rollback call
1242 # rollback call
1243 if 'changegroup' in pullop.stepsdone:
1243 if 'changegroup' in pullop.stepsdone:
1244 return
1244 return
1245 pullop.stepsdone.add('changegroup')
1245 pullop.stepsdone.add('changegroup')
1246 if not pullop.fetch:
1246 if not pullop.fetch:
1247 pullop.repo.ui.status(_("no changes found\n"))
1247 pullop.repo.ui.status(_("no changes found\n"))
1248 pullop.cgresult = 0
1248 pullop.cgresult = 0
1249 return
1249 return
1250 pullop.gettransaction()
1250 pullop.gettransaction()
1251 if pullop.heads is None and list(pullop.common) == [nullid]:
1251 if pullop.heads is None and list(pullop.common) == [nullid]:
1252 pullop.repo.ui.status(_("requesting all changes\n"))
1252 pullop.repo.ui.status(_("requesting all changes\n"))
1253 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1253 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1254 # issue1320, avoid a race if remote changed after discovery
1254 # issue1320, avoid a race if remote changed after discovery
1255 pullop.heads = pullop.rheads
1255 pullop.heads = pullop.rheads
1256
1256
1257 if pullop.remote.capable('getbundle'):
1257 if pullop.remote.capable('getbundle'):
1258 # TODO: get bundlecaps from remote
1258 # TODO: get bundlecaps from remote
1259 cg = pullop.remote.getbundle('pull', common=pullop.common,
1259 cg = pullop.remote.getbundle('pull', common=pullop.common,
1260 heads=pullop.heads or pullop.rheads)
1260 heads=pullop.heads or pullop.rheads)
1261 elif pullop.heads is None:
1261 elif pullop.heads is None:
1262 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1262 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1263 elif not pullop.remote.capable('changegroupsubset'):
1263 elif not pullop.remote.capable('changegroupsubset'):
1264 raise error.Abort(_("partial pull cannot be done because "
1264 raise error.Abort(_("partial pull cannot be done because "
1265 "other repository doesn't support "
1265 "other repository doesn't support "
1266 "changegroupsubset."))
1266 "changegroupsubset."))
1267 else:
1267 else:
1268 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1268 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1269 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1269 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1270
1270
1271 def _pullphase(pullop):
1271 def _pullphase(pullop):
1272 # Get remote phases data from remote
1272 # Get remote phases data from remote
1273 if 'phases' in pullop.stepsdone:
1273 if 'phases' in pullop.stepsdone:
1274 return
1274 return
1275 remotephases = pullop.remote.listkeys('phases')
1275 remotephases = pullop.remote.listkeys('phases')
1276 _pullapplyphases(pullop, remotephases)
1276 _pullapplyphases(pullop, remotephases)
1277
1277
1278 def _pullapplyphases(pullop, remotephases):
1278 def _pullapplyphases(pullop, remotephases):
1279 """apply phase movement from observed remote state"""
1279 """apply phase movement from observed remote state"""
1280 if 'phases' in pullop.stepsdone:
1280 if 'phases' in pullop.stepsdone:
1281 return
1281 return
1282 pullop.stepsdone.add('phases')
1282 pullop.stepsdone.add('phases')
1283 publishing = bool(remotephases.get('publishing', False))
1283 publishing = bool(remotephases.get('publishing', False))
1284 if remotephases and not publishing:
1284 if remotephases and not publishing:
1285 # remote is new and unpublishing
1285 # remote is new and unpublishing
1286 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1286 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1287 pullop.pulledsubset,
1287 pullop.pulledsubset,
1288 remotephases)
1288 remotephases)
1289 dheads = pullop.pulledsubset
1289 dheads = pullop.pulledsubset
1290 else:
1290 else:
1291 # Remote is old or publishing all common changesets
1291 # Remote is old or publishing all common changesets
1292 # should be seen as public
1292 # should be seen as public
1293 pheads = pullop.pulledsubset
1293 pheads = pullop.pulledsubset
1294 dheads = []
1294 dheads = []
1295 unfi = pullop.repo.unfiltered()
1295 unfi = pullop.repo.unfiltered()
1296 phase = unfi._phasecache.phase
1296 phase = unfi._phasecache.phase
1297 rev = unfi.changelog.nodemap.get
1297 rev = unfi.changelog.nodemap.get
1298 public = phases.public
1298 public = phases.public
1299 draft = phases.draft
1299 draft = phases.draft
1300
1300
1301 # exclude changesets already public locally and update the others
1301 # exclude changesets already public locally and update the others
1302 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1302 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1303 if pheads:
1303 if pheads:
1304 tr = pullop.gettransaction()
1304 tr = pullop.gettransaction()
1305 phases.advanceboundary(pullop.repo, tr, public, pheads)
1305 phases.advanceboundary(pullop.repo, tr, public, pheads)
1306
1306
1307 # exclude changesets already draft locally and update the others
1307 # exclude changesets already draft locally and update the others
1308 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1308 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1309 if dheads:
1309 if dheads:
1310 tr = pullop.gettransaction()
1310 tr = pullop.gettransaction()
1311 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1311 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1312
1312
1313 def _pullbookmarks(pullop):
1313 def _pullbookmarks(pullop):
1314 """process the remote bookmark information to update the local one"""
1314 """process the remote bookmark information to update the local one"""
1315 if 'bookmarks' in pullop.stepsdone:
1315 if 'bookmarks' in pullop.stepsdone:
1316 return
1316 return
1317 pullop.stepsdone.add('bookmarks')
1317 pullop.stepsdone.add('bookmarks')
1318 repo = pullop.repo
1318 repo = pullop.repo
1319 remotebookmarks = pullop.remotebookmarks
1319 remotebookmarks = pullop.remotebookmarks
1320 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1320 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1321 pullop.remote.url(),
1321 pullop.remote.url(),
1322 pullop.gettransaction,
1322 pullop.gettransaction,
1323 explicit=pullop.explicitbookmarks)
1323 explicit=pullop.explicitbookmarks)
1324
1324
1325 def _pullobsolete(pullop):
1325 def _pullobsolete(pullop):
1326 """utility function to pull obsolete markers from a remote
1326 """utility function to pull obsolete markers from a remote
1327
1327
1328 The `gettransaction` is function that return the pull transaction, creating
1328 The `gettransaction` is function that return the pull transaction, creating
1329 one if necessary. We return the transaction to inform the calling code that
1329 one if necessary. We return the transaction to inform the calling code that
1330 a new transaction have been created (when applicable).
1330 a new transaction have been created (when applicable).
1331
1331
1332 Exists mostly to allow overriding for experimentation purpose"""
1332 Exists mostly to allow overriding for experimentation purpose"""
1333 if 'obsmarkers' in pullop.stepsdone:
1333 if 'obsmarkers' in pullop.stepsdone:
1334 return
1334 return
1335 pullop.stepsdone.add('obsmarkers')
1335 pullop.stepsdone.add('obsmarkers')
1336 tr = None
1336 tr = None
1337 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1337 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1338 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1338 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1339 remoteobs = pullop.remote.listkeys('obsolete')
1339 remoteobs = pullop.remote.listkeys('obsolete')
1340 if 'dump0' in remoteobs:
1340 if 'dump0' in remoteobs:
1341 tr = pullop.gettransaction()
1341 tr = pullop.gettransaction()
1342 for key in sorted(remoteobs, reverse=True):
1342 for key in sorted(remoteobs, reverse=True):
1343 if key.startswith('dump'):
1343 if key.startswith('dump'):
1344 data = base85.b85decode(remoteobs[key])
1344 data = base85.b85decode(remoteobs[key])
1345 pullop.repo.obsstore.mergemarkers(tr, data)
1345 pullop.repo.obsstore.mergemarkers(tr, data)
1346 pullop.repo.invalidatevolatilesets()
1346 pullop.repo.invalidatevolatilesets()
1347 return tr
1347 return tr
1348
1348
1349 def caps20to10(repo):
1349 def caps20to10(repo):
1350 """return a set with appropriate options to use bundle20 during getbundle"""
1350 """return a set with appropriate options to use bundle20 during getbundle"""
1351 caps = set(['HG20'])
1351 caps = set(['HG20'])
1352 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1352 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1353 caps.add('bundle2=' + urllib.quote(capsblob))
1353 caps.add('bundle2=' + urllib.quote(capsblob))
1354 return caps
1354 return caps
1355
1355
1356 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1356 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1357 getbundle2partsorder = []
1357 getbundle2partsorder = []
1358
1358
1359 # Mapping between step name and function
1359 # Mapping between step name and function
1360 #
1360 #
1361 # This exists to help extensions wrap steps if necessary
1361 # This exists to help extensions wrap steps if necessary
1362 getbundle2partsmapping = {}
1362 getbundle2partsmapping = {}
1363
1363
1364 def getbundle2partsgenerator(stepname, idx=None):
1364 def getbundle2partsgenerator(stepname, idx=None):
1365 """decorator for function generating bundle2 part for getbundle
1365 """decorator for function generating bundle2 part for getbundle
1366
1366
1367 The function is added to the step -> function mapping and appended to the
1367 The function is added to the step -> function mapping and appended to the
1368 list of steps. Beware that decorated functions will be added in order
1368 list of steps. Beware that decorated functions will be added in order
1369 (this may matter).
1369 (this may matter).
1370
1370
1371 You can only use this decorator for new steps, if you want to wrap a step
1371 You can only use this decorator for new steps, if you want to wrap a step
1372 from an extension, attack the getbundle2partsmapping dictionary directly."""
1372 from an extension, attack the getbundle2partsmapping dictionary directly."""
1373 def dec(func):
1373 def dec(func):
1374 assert stepname not in getbundle2partsmapping
1374 assert stepname not in getbundle2partsmapping
1375 getbundle2partsmapping[stepname] = func
1375 getbundle2partsmapping[stepname] = func
1376 if idx is None:
1376 if idx is None:
1377 getbundle2partsorder.append(stepname)
1377 getbundle2partsorder.append(stepname)
1378 else:
1378 else:
1379 getbundle2partsorder.insert(idx, stepname)
1379 getbundle2partsorder.insert(idx, stepname)
1380 return func
1380 return func
1381 return dec
1381 return dec
1382
1382
1383 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1383 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1384 **kwargs):
1384 **kwargs):
1385 """return a full bundle (with potentially multiple kind of parts)
1385 """return a full bundle (with potentially multiple kind of parts)
1386
1386
1387 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1387 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1388 passed. For now, the bundle can contain only changegroup, but this will
1388 passed. For now, the bundle can contain only changegroup, but this will
1389 changes when more part type will be available for bundle2.
1389 changes when more part type will be available for bundle2.
1390
1390
1391 This is different from changegroup.getchangegroup that only returns an HG10
1391 This is different from changegroup.getchangegroup that only returns an HG10
1392 changegroup bundle. They may eventually get reunited in the future when we
1392 changegroup bundle. They may eventually get reunited in the future when we
1393 have a clearer idea of the API we what to query different data.
1393 have a clearer idea of the API we what to query different data.
1394
1394
1395 The implementation is at a very early stage and will get massive rework
1395 The implementation is at a very early stage and will get massive rework
1396 when the API of bundle is refined.
1396 when the API of bundle is refined.
1397 """
1397 """
1398 # bundle10 case
1398 # bundle10 case
1399 usebundle2 = False
1399 usebundle2 = False
1400 if bundlecaps is not None:
1400 if bundlecaps is not None:
1401 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1401 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1402 if not usebundle2:
1402 if not usebundle2:
1403 if bundlecaps and not kwargs.get('cg', True):
1403 if bundlecaps and not kwargs.get('cg', True):
1404 raise ValueError(_('request for bundle10 must include changegroup'))
1404 raise ValueError(_('request for bundle10 must include changegroup'))
1405
1405
1406 if kwargs:
1406 if kwargs:
1407 raise ValueError(_('unsupported getbundle arguments: %s')
1407 raise ValueError(_('unsupported getbundle arguments: %s')
1408 % ', '.join(sorted(kwargs.keys())))
1408 % ', '.join(sorted(kwargs.keys())))
1409 return changegroup.getchangegroup(repo, source, heads=heads,
1409 return changegroup.getchangegroup(repo, source, heads=heads,
1410 common=common, bundlecaps=bundlecaps)
1410 common=common, bundlecaps=bundlecaps)
1411
1411
1412 # bundle20 case
1412 # bundle20 case
1413 b2caps = {}
1413 b2caps = {}
1414 for bcaps in bundlecaps:
1414 for bcaps in bundlecaps:
1415 if bcaps.startswith('bundle2='):
1415 if bcaps.startswith('bundle2='):
1416 blob = urllib.unquote(bcaps[len('bundle2='):])
1416 blob = urllib.unquote(bcaps[len('bundle2='):])
1417 b2caps.update(bundle2.decodecaps(blob))
1417 b2caps.update(bundle2.decodecaps(blob))
1418 bundler = bundle2.bundle20(repo.ui, b2caps)
1418 bundler = bundle2.bundle20(repo.ui, b2caps)
1419
1419
1420 kwargs['heads'] = heads
1420 kwargs['heads'] = heads
1421 kwargs['common'] = common
1421 kwargs['common'] = common
1422
1422
1423 for name in getbundle2partsorder:
1423 for name in getbundle2partsorder:
1424 func = getbundle2partsmapping[name]
1424 func = getbundle2partsmapping[name]
1425 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1425 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1426 **kwargs)
1426 **kwargs)
1427
1427
1428 return util.chunkbuffer(bundler.getchunks())
1428 return util.chunkbuffer(bundler.getchunks())
1429
1429
1430 @getbundle2partsgenerator('changegroup')
1430 @getbundle2partsgenerator('changegroup')
1431 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1431 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1432 b2caps=None, heads=None, common=None, **kwargs):
1432 b2caps=None, heads=None, common=None, **kwargs):
1433 """add a changegroup part to the requested bundle"""
1433 """add a changegroup part to the requested bundle"""
1434 cg = None
1434 cg = None
1435 if kwargs.get('cg', True):
1435 if kwargs.get('cg', True):
1436 # build changegroup bundle here.
1436 # build changegroup bundle here.
1437 version = None
1437 version = None
1438 cgversions = b2caps.get('changegroup')
1438 cgversions = b2caps.get('changegroup')
1439 getcgkwargs = {}
1439 getcgkwargs = {}
1440 if cgversions: # 3.1 and 3.2 ship with an empty value
1440 if cgversions: # 3.1 and 3.2 ship with an empty value
1441 cgversions = [v for v in cgversions if v in changegroup.packermap]
1441 cgversions = [v for v in cgversions if v in changegroup.packermap]
1442 if not cgversions:
1442 if not cgversions:
1443 raise ValueError(_('no common changegroup version'))
1443 raise ValueError(_('no common changegroup version'))
1444 version = getcgkwargs['version'] = max(cgversions)
1444 version = getcgkwargs['version'] = max(cgversions)
1445 outgoing = changegroup.computeoutgoing(repo, heads, common)
1445 outgoing = changegroup.computeoutgoing(repo, heads, common)
1446 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1446 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1447 bundlecaps=bundlecaps,
1447 bundlecaps=bundlecaps,
1448 **getcgkwargs)
1448 **getcgkwargs)
1449
1449
1450 if cg:
1450 if cg:
1451 part = bundler.newpart('changegroup', data=cg)
1451 part = bundler.newpart('changegroup', data=cg)
1452 if version is not None:
1452 if version is not None:
1453 part.addparam('version', version)
1453 part.addparam('version', version)
1454 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1454 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1455
1455
1456 @getbundle2partsgenerator('listkeys')
1456 @getbundle2partsgenerator('listkeys')
1457 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1457 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1458 b2caps=None, **kwargs):
1458 b2caps=None, **kwargs):
1459 """add parts containing listkeys namespaces to the requested bundle"""
1459 """add parts containing listkeys namespaces to the requested bundle"""
1460 listkeys = kwargs.get('listkeys', ())
1460 listkeys = kwargs.get('listkeys', ())
1461 for namespace in listkeys:
1461 for namespace in listkeys:
1462 part = bundler.newpart('listkeys')
1462 part = bundler.newpart('listkeys')
1463 part.addparam('namespace', namespace)
1463 part.addparam('namespace', namespace)
1464 keys = repo.listkeys(namespace).items()
1464 keys = repo.listkeys(namespace).items()
1465 part.data = pushkey.encodekeys(keys)
1465 part.data = pushkey.encodekeys(keys)
1466
1466
1467 @getbundle2partsgenerator('obsmarkers')
1467 @getbundle2partsgenerator('obsmarkers')
1468 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1468 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1469 b2caps=None, heads=None, **kwargs):
1469 b2caps=None, heads=None, **kwargs):
1470 """add an obsolescence markers part to the requested bundle"""
1470 """add an obsolescence markers part to the requested bundle"""
1471 if kwargs.get('obsmarkers', False):
1471 if kwargs.get('obsmarkers', False):
1472 if heads is None:
1472 if heads is None:
1473 heads = repo.heads()
1473 heads = repo.heads()
1474 subset = [c.node() for c in repo.set('::%ln', heads)]
1474 subset = [c.node() for c in repo.set('::%ln', heads)]
1475 markers = repo.obsstore.relevantmarkers(subset)
1475 markers = repo.obsstore.relevantmarkers(subset)
1476 markers = sorted(markers)
1476 markers = sorted(markers)
1477 buildobsmarkerspart(bundler, markers)
1477 buildobsmarkerspart(bundler, markers)
1478
1478
1479 @getbundle2partsgenerator('hgtagsfnodes')
1479 @getbundle2partsgenerator('hgtagsfnodes')
1480 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1480 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1481 b2caps=None, heads=None, common=None,
1481 b2caps=None, heads=None, common=None,
1482 **kwargs):
1482 **kwargs):
1483 """Transfer the .hgtags filenodes mapping.
1483 """Transfer the .hgtags filenodes mapping.
1484
1484
1485 Only values for heads in this bundle will be transferred.
1485 Only values for heads in this bundle will be transferred.
1486
1486
1487 The part data consists of pairs of 20 byte changeset node and .hgtags
1487 The part data consists of pairs of 20 byte changeset node and .hgtags
1488 filenodes raw values.
1488 filenodes raw values.
1489 """
1489 """
1490 # Don't send unless:
1490 # Don't send unless:
1491 # - changeset are being exchanged,
1491 # - changeset are being exchanged,
1492 # - the client supports it.
1492 # - the client supports it.
1493 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1493 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1494 return
1494 return
1495
1495
1496 outgoing = changegroup.computeoutgoing(repo, heads, common)
1496 outgoing = changegroup.computeoutgoing(repo, heads, common)
1497
1497
1498 if not outgoing.missingheads:
1498 if not outgoing.missingheads:
1499 return
1499 return
1500
1500
1501 cache = tags.hgtagsfnodescache(repo.unfiltered())
1501 cache = tags.hgtagsfnodescache(repo.unfiltered())
1502 chunks = []
1502 chunks = []
1503
1503
1504 # .hgtags fnodes are only relevant for head changesets. While we could
1504 # .hgtags fnodes are only relevant for head changesets. While we could
1505 # transfer values for all known nodes, there will likely be little to
1505 # transfer values for all known nodes, there will likely be little to
1506 # no benefit.
1506 # no benefit.
1507 #
1507 #
1508 # We don't bother using a generator to produce output data because
1508 # We don't bother using a generator to produce output data because
1509 # a) we only have 40 bytes per head and even esoteric numbers of heads
1509 # a) we only have 40 bytes per head and even esoteric numbers of heads
1510 # consume little memory (1M heads is 40MB) b) we don't want to send the
1510 # consume little memory (1M heads is 40MB) b) we don't want to send the
1511 # part if we don't have entries and knowing if we have entries requires
1511 # part if we don't have entries and knowing if we have entries requires
1512 # cache lookups.
1512 # cache lookups.
1513 for node in outgoing.missingheads:
1513 for node in outgoing.missingheads:
1514 # Don't compute missing, as this may slow down serving.
1514 # Don't compute missing, as this may slow down serving.
1515 fnode = cache.getfnode(node, computemissing=False)
1515 fnode = cache.getfnode(node, computemissing=False)
1516 if fnode is not None:
1516 if fnode is not None:
1517 chunks.extend([node, fnode])
1517 chunks.extend([node, fnode])
1518
1518
1519 if chunks:
1519 if chunks:
1520 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1520 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1521
1521
1522 def check_heads(repo, their_heads, context):
1522 def check_heads(repo, their_heads, context):
1523 """check if the heads of a repo have been modified
1523 """check if the heads of a repo have been modified
1524
1524
1525 Used by peer for unbundling.
1525 Used by peer for unbundling.
1526 """
1526 """
1527 heads = repo.heads()
1527 heads = repo.heads()
1528 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1528 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1529 if not (their_heads == ['force'] or their_heads == heads or
1529 if not (their_heads == ['force'] or their_heads == heads or
1530 their_heads == ['hashed', heads_hash]):
1530 their_heads == ['hashed', heads_hash]):
1531 # someone else committed/pushed/unbundled while we
1531 # someone else committed/pushed/unbundled while we
1532 # were transferring data
1532 # were transferring data
1533 raise error.PushRaced('repository changed while %s - '
1533 raise error.PushRaced('repository changed while %s - '
1534 'please try again' % context)
1534 'please try again' % context)
1535
1535
1536 def unbundle(repo, cg, heads, source, url):
1536 def unbundle(repo, cg, heads, source, url):
1537 """Apply a bundle to a repo.
1537 """Apply a bundle to a repo.
1538
1538
1539 this function makes sure the repo is locked during the application and have
1539 this function makes sure the repo is locked during the application and have
1540 mechanism to check that no push race occurred between the creation of the
1540 mechanism to check that no push race occurred between the creation of the
1541 bundle and its application.
1541 bundle and its application.
1542
1542
1543 If the push was raced as PushRaced exception is raised."""
1543 If the push was raced as PushRaced exception is raised."""
1544 r = 0
1544 r = 0
1545 # need a transaction when processing a bundle2 stream
1545 # need a transaction when processing a bundle2 stream
1546 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1546 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1547 lockandtr = [None, None, None]
1547 lockandtr = [None, None, None]
1548 recordout = None
1548 recordout = None
1549 # quick fix for output mismatch with bundle2 in 3.4
1549 # quick fix for output mismatch with bundle2 in 3.4
1550 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1550 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1551 False)
1551 False)
1552 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1552 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1553 captureoutput = True
1553 captureoutput = True
1554 try:
1554 try:
1555 check_heads(repo, heads, 'uploading changes')
1555 check_heads(repo, heads, 'uploading changes')
1556 # push can proceed
1556 # push can proceed
1557 if util.safehasattr(cg, 'params'):
1557 if util.safehasattr(cg, 'params'):
1558 r = None
1558 r = None
1559 try:
1559 try:
1560 def gettransaction():
1560 def gettransaction():
1561 if not lockandtr[2]:
1561 if not lockandtr[2]:
1562 lockandtr[0] = repo.wlock()
1562 lockandtr[0] = repo.wlock()
1563 lockandtr[1] = repo.lock()
1563 lockandtr[1] = repo.lock()
1564 lockandtr[2] = repo.transaction(source)
1564 lockandtr[2] = repo.transaction(source)
1565 lockandtr[2].hookargs['source'] = source
1565 lockandtr[2].hookargs['source'] = source
1566 lockandtr[2].hookargs['url'] = url
1566 lockandtr[2].hookargs['url'] = url
1567 lockandtr[2].hookargs['bundle2'] = '1'
1567 lockandtr[2].hookargs['bundle2'] = '1'
1568 return lockandtr[2]
1568 return lockandtr[2]
1569
1569
1570 # Do greedy locking by default until we're satisfied with lazy
1570 # Do greedy locking by default until we're satisfied with lazy
1571 # locking.
1571 # locking.
1572 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1572 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1573 gettransaction()
1573 gettransaction()
1574
1574
1575 op = bundle2.bundleoperation(repo, gettransaction,
1575 op = bundle2.bundleoperation(repo, gettransaction,
1576 captureoutput=captureoutput)
1576 captureoutput=captureoutput)
1577 try:
1577 try:
1578 op = bundle2.processbundle(repo, cg, op=op)
1578 op = bundle2.processbundle(repo, cg, op=op)
1579 finally:
1579 finally:
1580 r = op.reply
1580 r = op.reply
1581 if captureoutput and r is not None:
1581 if captureoutput and r is not None:
1582 repo.ui.pushbuffer(error=True, subproc=True)
1582 repo.ui.pushbuffer(error=True, subproc=True)
1583 def recordout(output):
1583 def recordout(output):
1584 r.newpart('output', data=output, mandatory=False)
1584 r.newpart('output', data=output, mandatory=False)
1585 if lockandtr[2] is not None:
1585 if lockandtr[2] is not None:
1586 lockandtr[2].close()
1586 lockandtr[2].close()
1587 except BaseException as exc:
1587 except BaseException as exc:
1588 exc.duringunbundle2 = True
1588 exc.duringunbundle2 = True
1589 if captureoutput and r is not None:
1589 if captureoutput and r is not None:
1590 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1590 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1591 def recordout(output):
1591 def recordout(output):
1592 part = bundle2.bundlepart('output', data=output,
1592 part = bundle2.bundlepart('output', data=output,
1593 mandatory=False)
1593 mandatory=False)
1594 parts.append(part)
1594 parts.append(part)
1595 raise
1595 raise
1596 else:
1596 else:
1597 lockandtr[1] = repo.lock()
1597 lockandtr[1] = repo.lock()
1598 r = cg.apply(repo, source, url)
1598 r = cg.apply(repo, source, url)
1599 finally:
1599 finally:
1600 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1600 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1601 if recordout is not None:
1601 if recordout is not None:
1602 recordout(repo.ui.popbuffer())
1602 recordout(repo.ui.popbuffer())
1603 return r
1603 return r
1604
1604
1605 def _maybeapplyclonebundle(pullop):
1605 def _maybeapplyclonebundle(pullop):
1606 """Apply a clone bundle from a remote, if possible."""
1606 """Apply a clone bundle from a remote, if possible."""
1607
1607
1608 repo = pullop.repo
1608 repo = pullop.repo
1609 remote = pullop.remote
1609 remote = pullop.remote
1610
1610
1611 if not repo.ui.configbool('experimental', 'clonebundles', False):
1611 if not repo.ui.configbool('experimental', 'clonebundles', False):
1612 return
1612 return
1613
1613
1614 if pullop.heads:
1614 if pullop.heads:
1615 return
1615 return
1616
1616
1617 if not remote.capable('clonebundles'):
1617 if not remote.capable('clonebundles'):
1618 return
1618 return
1619
1619
1620 res = remote._call('clonebundles')
1620 res = remote._call('clonebundles')
1621
1621
1622 # If we call the wire protocol command, that's good enough to record the
1622 # If we call the wire protocol command, that's good enough to record the
1623 # attempt.
1623 # attempt.
1624 pullop.clonebundleattempted = True
1624 pullop.clonebundleattempted = True
1625
1625
1626 entries = parseclonebundlesmanifest(repo, res)
1626 entries = parseclonebundlesmanifest(repo, res)
1627 if not entries:
1627 if not entries:
1628 repo.ui.note(_('no clone bundles available on remote; '
1628 repo.ui.note(_('no clone bundles available on remote; '
1629 'falling back to regular clone\n'))
1629 'falling back to regular clone\n'))
1630 return
1630 return
1631
1631
1632 entries = filterclonebundleentries(repo, entries)
1632 entries = filterclonebundleentries(repo, entries)
1633 if not entries:
1633 if not entries:
1634 # There is a thundering herd concern here. However, if a server
1634 # There is a thundering herd concern here. However, if a server
1635 # operator doesn't advertise bundles appropriate for its clients,
1635 # operator doesn't advertise bundles appropriate for its clients,
1636 # they deserve what's coming. Furthermore, from a client's
1636 # they deserve what's coming. Furthermore, from a client's
1637 # perspective, no automatic fallback would mean not being able to
1637 # perspective, no automatic fallback would mean not being able to
1638 # clone!
1638 # clone!
1639 repo.ui.warn(_('no compatible clone bundles available on server; '
1639 repo.ui.warn(_('no compatible clone bundles available on server; '
1640 'falling back to regular clone\n'))
1640 'falling back to regular clone\n'))
1641 repo.ui.warn(_('(you may want to report this to the server '
1641 repo.ui.warn(_('(you may want to report this to the server '
1642 'operator)\n'))
1642 'operator)\n'))
1643 return
1643 return
1644
1644
1645 entries = sortclonebundleentries(repo.ui, entries)
1645 entries = sortclonebundleentries(repo.ui, entries)
1646
1646
1647 url = entries[0]['URL']
1647 url = entries[0]['URL']
1648 repo.ui.status(_('applying clone bundle from %s\n') % url)
1648 repo.ui.status(_('applying clone bundle from %s\n') % url)
1649 if trypullbundlefromurl(repo.ui, repo, url):
1649 if trypullbundlefromurl(repo.ui, repo, url):
1650 repo.ui.status(_('finished applying clone bundle\n'))
1650 repo.ui.status(_('finished applying clone bundle\n'))
1651 # Bundle failed.
1651 # Bundle failed.
1652 #
1652 #
1653 # We abort by default to avoid the thundering herd of
1653 # We abort by default to avoid the thundering herd of
1654 # clients flooding a server that was expecting expensive
1654 # clients flooding a server that was expecting expensive
1655 # clone load to be offloaded.
1655 # clone load to be offloaded.
1656 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1656 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1657 repo.ui.warn(_('falling back to normal clone\n'))
1657 repo.ui.warn(_('falling back to normal clone\n'))
1658 else:
1658 else:
1659 raise error.Abort(_('error applying bundle'),
1659 raise error.Abort(_('error applying bundle'),
1660 hint=_('if this error persists, consider contacting '
1660 hint=_('if this error persists, consider contacting '
1661 'the server operator or disable clone '
1661 'the server operator or disable clone '
1662 'bundles via '
1662 'bundles via '
1663 '"--config experimental.clonebundles=false"'))
1663 '"--config experimental.clonebundles=false"'))
1664
1664
1665 def parseclonebundlesmanifest(repo, s):
1665 def parseclonebundlesmanifest(repo, s):
1666 """Parses the raw text of a clone bundles manifest.
1666 """Parses the raw text of a clone bundles manifest.
1667
1667
1668 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1668 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1669 to the URL and other keys are the attributes for the entry.
1669 to the URL and other keys are the attributes for the entry.
1670 """
1670 """
1671 m = []
1671 m = []
1672 for line in s.splitlines():
1672 for line in s.splitlines():
1673 fields = line.split()
1673 fields = line.split()
1674 if not fields:
1674 if not fields:
1675 continue
1675 continue
1676 attrs = {'URL': fields[0]}
1676 attrs = {'URL': fields[0]}
1677 for rawattr in fields[1:]:
1677 for rawattr in fields[1:]:
1678 key, value = rawattr.split('=', 1)
1678 key, value = rawattr.split('=', 1)
1679 key = urllib.unquote(key)
1679 key = urllib.unquote(key)
1680 value = urllib.unquote(value)
1680 value = urllib.unquote(value)
1681 attrs[key] = value
1681 attrs[key] = value
1682
1682
1683 # Parse BUNDLESPEC into components. This makes client-side
1683 # Parse BUNDLESPEC into components. This makes client-side
1684 # preferences easier to specify since you can prefer a single
1684 # preferences easier to specify since you can prefer a single
1685 # component of the BUNDLESPEC.
1685 # component of the BUNDLESPEC.
1686 if key == 'BUNDLESPEC':
1686 if key == 'BUNDLESPEC':
1687 try:
1687 try:
1688 comp, version = parsebundlespec(repo, value,
1688 comp, version = parsebundlespec(repo, value,
1689 externalnames=True)
1689 externalnames=True)
1690 attrs['COMPRESSION'] = comp
1690 attrs['COMPRESSION'] = comp
1691 attrs['VERSION'] = version
1691 attrs['VERSION'] = version
1692 except error.InvalidBundleSpecification:
1692 except error.InvalidBundleSpecification:
1693 pass
1693 pass
1694 except error.UnsupportedBundleSpecification:
1694 except error.UnsupportedBundleSpecification:
1695 pass
1695 pass
1696
1696
1697 m.append(attrs)
1697 m.append(attrs)
1698
1698
1699 return m
1699 return m
1700
1700
1701 def filterclonebundleentries(repo, entries):
1701 def filterclonebundleentries(repo, entries):
1702 """Remove incompatible clone bundle manifest entries.
1702 """Remove incompatible clone bundle manifest entries.
1703
1703
1704 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1704 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1705 and returns a new list consisting of only the entries that this client
1705 and returns a new list consisting of only the entries that this client
1706 should be able to apply.
1706 should be able to apply.
1707
1707
1708 There is no guarantee we'll be able to apply all returned entries because
1708 There is no guarantee we'll be able to apply all returned entries because
1709 the metadata we use to filter on may be missing or wrong.
1709 the metadata we use to filter on may be missing or wrong.
1710 """
1710 """
1711 newentries = []
1711 newentries = []
1712 for entry in entries:
1712 for entry in entries:
1713 spec = entry.get('BUNDLESPEC')
1713 spec = entry.get('BUNDLESPEC')
1714 if spec:
1714 if spec:
1715 try:
1715 try:
1716 parsebundlespec(repo, spec, strict=True)
1716 parsebundlespec(repo, spec, strict=True)
1717 except error.InvalidBundleSpecification as e:
1717 except error.InvalidBundleSpecification as e:
1718 repo.ui.debug(str(e) + '\n')
1718 repo.ui.debug(str(e) + '\n')
1719 continue
1719 continue
1720 except error.UnsupportedBundleSpecification as e:
1720 except error.UnsupportedBundleSpecification as e:
1721 repo.ui.debug('filtering %s because unsupported bundle '
1721 repo.ui.debug('filtering %s because unsupported bundle '
1722 'spec: %s\n' % (entry['URL'], str(e)))
1722 'spec: %s\n' % (entry['URL'], str(e)))
1723 continue
1723 continue
1724
1724
1725 if 'REQUIRESNI' in entry and not sslutil.hassni:
1725 if 'REQUIRESNI' in entry and not sslutil.hassni:
1726 repo.ui.debug('filtering %s because SNI not supported\n' %
1726 repo.ui.debug('filtering %s because SNI not supported\n' %
1727 entry['URL'])
1727 entry['URL'])
1728 continue
1728 continue
1729
1729
1730 newentries.append(entry)
1730 newentries.append(entry)
1731
1731
1732 return newentries
1732 return newentries
1733
1733
1734 def sortclonebundleentries(ui, entries):
1734 def sortclonebundleentries(ui, entries):
1735 # experimental config: experimental.clonebundleprefers
1735 # experimental config: experimental.clonebundleprefers
1736 prefers = ui.configlist('experimental', 'clonebundleprefers', default=[])
1736 prefers = ui.configlist('experimental', 'clonebundleprefers', default=[])
1737 if not prefers:
1737 if not prefers:
1738 return list(entries)
1738 return list(entries)
1739
1739
1740 prefers = [p.split('=', 1) for p in prefers]
1740 prefers = [p.split('=', 1) for p in prefers]
1741
1741
1742 # Our sort function.
1742 # Our sort function.
1743 def compareentry(a, b):
1743 def compareentry(a, b):
1744 for prefkey, prefvalue in prefers:
1744 for prefkey, prefvalue in prefers:
1745 avalue = a.get(prefkey)
1745 avalue = a.get(prefkey)
1746 bvalue = b.get(prefkey)
1746 bvalue = b.get(prefkey)
1747
1747
1748 # Special case for b missing attribute and a matches exactly.
1748 # Special case for b missing attribute and a matches exactly.
1749 if avalue is not None and bvalue is None and avalue == prefvalue:
1749 if avalue is not None and bvalue is None and avalue == prefvalue:
1750 return -1
1750 return -1
1751
1751
1752 # Special case for a missing attribute and b matches exactly.
1752 # Special case for a missing attribute and b matches exactly.
1753 if bvalue is not None and avalue is None and bvalue == prefvalue:
1753 if bvalue is not None and avalue is None and bvalue == prefvalue:
1754 return 1
1754 return 1
1755
1755
1756 # We can't compare unless attribute present on both.
1756 # We can't compare unless attribute present on both.
1757 if avalue is None or bvalue is None:
1757 if avalue is None or bvalue is None:
1758 continue
1758 continue
1759
1759
1760 # Same values should fall back to next attribute.
1760 # Same values should fall back to next attribute.
1761 if avalue == bvalue:
1761 if avalue == bvalue:
1762 continue
1762 continue
1763
1763
1764 # Exact matches come first.
1764 # Exact matches come first.
1765 if avalue == prefvalue:
1765 if avalue == prefvalue:
1766 return -1
1766 return -1
1767 if bvalue == prefvalue:
1767 if bvalue == prefvalue:
1768 return 1
1768 return 1
1769
1769
1770 # Fall back to next attribute.
1770 # Fall back to next attribute.
1771 continue
1771 continue
1772
1772
1773 # If we got here we couldn't sort by attributes and prefers. Fall
1773 # If we got here we couldn't sort by attributes and prefers. Fall
1774 # back to index order.
1774 # back to index order.
1775 return 0
1775 return 0
1776
1776
1777 return sorted(entries, cmp=compareentry)
1777 return sorted(entries, cmp=compareentry)
1778
1778
1779 def trypullbundlefromurl(ui, repo, url):
1779 def trypullbundlefromurl(ui, repo, url):
1780 """Attempt to apply a bundle from a URL."""
1780 """Attempt to apply a bundle from a URL."""
1781 lock = repo.lock()
1781 lock = repo.lock()
1782 try:
1782 try:
1783 tr = repo.transaction('bundleurl')
1783 tr = repo.transaction('bundleurl')
1784 try:
1784 try:
1785 try:
1785 try:
1786 fh = urlmod.open(ui, url)
1786 fh = urlmod.open(ui, url)
1787 cg = readbundle(ui, fh, 'stream')
1787 cg = readbundle(ui, fh, 'stream')
1788
1788
1789 if isinstance(cg, bundle2.unbundle20):
1789 if isinstance(cg, bundle2.unbundle20):
1790 bundle2.processbundle(repo, cg, lambda: tr)
1790 bundle2.processbundle(repo, cg, lambda: tr)
1791 else:
1791 else:
1792 cg.apply(repo, 'clonebundles', url)
1792 cg.apply(repo, 'clonebundles', url)
1793 tr.close()
1793 tr.close()
1794 return True
1794 return True
1795 except urllib2.HTTPError as e:
1795 except urllib2.HTTPError as e:
1796 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1796 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1797 except urllib2.URLError as e:
1797 except urllib2.URLError as e:
1798 ui.warn(_('error fetching bundle: %s\n') % e.reason)
1798 ui.warn(_('error fetching bundle: %s\n') % e.reason[1])
1799
1799
1800 return False
1800 return False
1801 finally:
1801 finally:
1802 tr.release()
1802 tr.release()
1803 finally:
1803 finally:
1804 lock.release()
1804 lock.release()
@@ -1,368 +1,368 b''
1 Set up a server
1 Set up a server
2
2
3 $ hg init server
3 $ hg init server
4 $ cd server
4 $ cd server
5 $ cat >> .hg/hgrc << EOF
5 $ cat >> .hg/hgrc << EOF
6 > [extensions]
6 > [extensions]
7 > clonebundles =
7 > clonebundles =
8 > EOF
8 > EOF
9
9
10 $ touch foo
10 $ touch foo
11 $ hg -q commit -A -m 'add foo'
11 $ hg -q commit -A -m 'add foo'
12 $ touch bar
12 $ touch bar
13 $ hg -q commit -A -m 'add bar'
13 $ hg -q commit -A -m 'add bar'
14
14
15 $ hg serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
15 $ hg serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
16 $ cat hg.pid >> $DAEMON_PIDS
16 $ cat hg.pid >> $DAEMON_PIDS
17 $ cd ..
17 $ cd ..
18
18
19 Feature disabled by default
19 Feature disabled by default
20 (client should not request manifest)
20 (client should not request manifest)
21
21
22 $ hg clone -U http://localhost:$HGPORT feature-disabled
22 $ hg clone -U http://localhost:$HGPORT feature-disabled
23 requesting all changes
23 requesting all changes
24 adding changesets
24 adding changesets
25 adding manifests
25 adding manifests
26 adding file changes
26 adding file changes
27 added 2 changesets with 2 changes to 2 files
27 added 2 changesets with 2 changes to 2 files
28
28
29 $ cat server/access.log
29 $ cat server/access.log
30 * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
30 * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
31 * - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
31 * - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
32 * - - [*] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=aaff8d2ffbbf07a46dd1f05d8ae7877e3f56e2a2&listkeys=phase%2Cbookmarks (glob)
32 * - - [*] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=aaff8d2ffbbf07a46dd1f05d8ae7877e3f56e2a2&listkeys=phase%2Cbookmarks (glob)
33 * - - [*] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases (glob)
33 * - - [*] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases (glob)
34
34
35 $ cat >> $HGRCPATH << EOF
35 $ cat >> $HGRCPATH << EOF
36 > [experimental]
36 > [experimental]
37 > clonebundles = true
37 > clonebundles = true
38 > EOF
38 > EOF
39
39
40 Missing manifest should not result in server lookup
40 Missing manifest should not result in server lookup
41
41
42 $ hg --verbose clone -U http://localhost:$HGPORT no-manifest
42 $ hg --verbose clone -U http://localhost:$HGPORT no-manifest
43 requesting all changes
43 requesting all changes
44 adding changesets
44 adding changesets
45 adding manifests
45 adding manifests
46 adding file changes
46 adding file changes
47 added 2 changesets with 2 changes to 2 files
47 added 2 changesets with 2 changes to 2 files
48
48
49 $ tail -4 server/access.log
49 $ tail -4 server/access.log
50 * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
50 * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
51 * - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
51 * - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
52 * - - [*] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=aaff8d2ffbbf07a46dd1f05d8ae7877e3f56e2a2&listkeys=phase%2Cbookmarks (glob)
52 * - - [*] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=aaff8d2ffbbf07a46dd1f05d8ae7877e3f56e2a2&listkeys=phase%2Cbookmarks (glob)
53 * - - [*] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases (glob)
53 * - - [*] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases (glob)
54
54
55 Empty manifest file results in retrieval
55 Empty manifest file results in retrieval
56 (the extension only checks if the manifest file exists)
56 (the extension only checks if the manifest file exists)
57
57
58 $ touch server/.hg/clonebundles.manifest
58 $ touch server/.hg/clonebundles.manifest
59 $ hg --verbose clone -U http://localhost:$HGPORT empty-manifest
59 $ hg --verbose clone -U http://localhost:$HGPORT empty-manifest
60 no clone bundles available on remote; falling back to regular clone
60 no clone bundles available on remote; falling back to regular clone
61 requesting all changes
61 requesting all changes
62 adding changesets
62 adding changesets
63 adding manifests
63 adding manifests
64 adding file changes
64 adding file changes
65 added 2 changesets with 2 changes to 2 files
65 added 2 changesets with 2 changes to 2 files
66
66
67 Server advertises presence of feature to client requesting full clone
67 Server advertises presence of feature to client requesting full clone
68
68
69 $ hg --config experimental.clonebundles=false clone -U http://localhost:$HGPORT advertise-on-clone
69 $ hg --config experimental.clonebundles=false clone -U http://localhost:$HGPORT advertise-on-clone
70 requesting all changes
70 requesting all changes
71 remote: this server supports the experimental "clone bundles" feature that should enable faster and more reliable cloning
71 remote: this server supports the experimental "clone bundles" feature that should enable faster and more reliable cloning
72 remote: help test it by setting the "experimental.clonebundles" config flag to "true"
72 remote: help test it by setting the "experimental.clonebundles" config flag to "true"
73 adding changesets
73 adding changesets
74 adding manifests
74 adding manifests
75 adding file changes
75 adding file changes
76 added 2 changesets with 2 changes to 2 files
76 added 2 changesets with 2 changes to 2 files
77
77
78 Manifest file with invalid URL aborts
78 Manifest file with invalid URL aborts
79
79
80 $ echo 'http://does.not.exist/bundle.hg' > server/.hg/clonebundles.manifest
80 $ echo 'http://does.not.exist/bundle.hg' > server/.hg/clonebundles.manifest
81 $ hg clone http://localhost:$HGPORT 404-url
81 $ hg clone http://localhost:$HGPORT 404-url
82 applying clone bundle from http://does.not.exist/bundle.hg
82 applying clone bundle from http://does.not.exist/bundle.hg
83 error fetching bundle: [Errno -2] Name or service not known
83 error fetching bundle: * not known (glob)
84 abort: error applying bundle
84 abort: error applying bundle
85 (if this error persists, consider contacting the server operator or disable clone bundles via "--config experimental.clonebundles=false")
85 (if this error persists, consider contacting the server operator or disable clone bundles via "--config experimental.clonebundles=false")
86 [255]
86 [255]
87
87
88 Server is not running aborts
88 Server is not running aborts
89
89
90 $ echo "http://localhost:$HGPORT1/bundle.hg" > server/.hg/clonebundles.manifest
90 $ echo "http://localhost:$HGPORT1/bundle.hg" > server/.hg/clonebundles.manifest
91 $ hg clone http://localhost:$HGPORT server-not-runner
91 $ hg clone http://localhost:$HGPORT server-not-runner
92 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
92 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
93 error fetching bundle: [Errno 111] Connection refused
93 error fetching bundle: Connection refused
94 abort: error applying bundle
94 abort: error applying bundle
95 (if this error persists, consider contacting the server operator or disable clone bundles via "--config experimental.clonebundles=false")
95 (if this error persists, consider contacting the server operator or disable clone bundles via "--config experimental.clonebundles=false")
96 [255]
96 [255]
97
97
98 Server returns 404
98 Server returns 404
99
99
100 $ python $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
100 $ python $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
101 $ cat http.pid >> $DAEMON_PIDS
101 $ cat http.pid >> $DAEMON_PIDS
102 $ hg clone http://localhost:$HGPORT running-404
102 $ hg clone http://localhost:$HGPORT running-404
103 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
103 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
104 HTTP error fetching bundle: HTTP Error 404: File not found
104 HTTP error fetching bundle: HTTP Error 404: File not found
105 abort: error applying bundle
105 abort: error applying bundle
106 (if this error persists, consider contacting the server operator or disable clone bundles via "--config experimental.clonebundles=false")
106 (if this error persists, consider contacting the server operator or disable clone bundles via "--config experimental.clonebundles=false")
107 [255]
107 [255]
108
108
109 We can override failure to fall back to regular clone
109 We can override failure to fall back to regular clone
110
110
111 $ hg --config ui.clonebundlefallback=true clone -U http://localhost:$HGPORT 404-fallback
111 $ hg --config ui.clonebundlefallback=true clone -U http://localhost:$HGPORT 404-fallback
112 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
112 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
113 HTTP error fetching bundle: HTTP Error 404: File not found
113 HTTP error fetching bundle: HTTP Error 404: File not found
114 falling back to normal clone
114 falling back to normal clone
115 requesting all changes
115 requesting all changes
116 adding changesets
116 adding changesets
117 adding manifests
117 adding manifests
118 adding file changes
118 adding file changes
119 added 2 changesets with 2 changes to 2 files
119 added 2 changesets with 2 changes to 2 files
120
120
121 Bundle with partial content works
121 Bundle with partial content works
122
122
123 $ hg -R server bundle --type gzip-v1 --base null -r 53245c60e682 partial.hg
123 $ hg -R server bundle --type gzip-v1 --base null -r 53245c60e682 partial.hg
124 1 changesets found
124 1 changesets found
125
125
126 We verify exact bundle content as an extra check against accidental future
126 We verify exact bundle content as an extra check against accidental future
127 changes. If this output changes, we could break old clients.
127 changes. If this output changes, we could break old clients.
128
128
129 $ f --size --hexdump partial.hg
129 $ f --size --hexdump partial.hg
130 partial.hg: size=208
130 partial.hg: size=208
131 0000: 48 47 31 30 47 5a 78 9c 63 60 60 98 17 ac 12 93 |HG10GZx.c``.....|
131 0000: 48 47 31 30 47 5a 78 9c 63 60 60 98 17 ac 12 93 |HG10GZx.c``.....|
132 0010: f0 ac a9 23 45 70 cb bf 0d 5f 59 4e 4a 7f 79 21 |...#Ep..._YNJ.y!|
132 0010: f0 ac a9 23 45 70 cb bf 0d 5f 59 4e 4a 7f 79 21 |...#Ep..._YNJ.y!|
133 0020: 9b cc 40 24 20 a0 d7 ce 2c d1 38 25 cd 24 25 d5 |..@$ ...,.8%.$%.|
133 0020: 9b cc 40 24 20 a0 d7 ce 2c d1 38 25 cd 24 25 d5 |..@$ ...,.8%.$%.|
134 0030: d8 c2 22 cd 38 d9 24 cd 22 d5 c8 22 cd 24 cd 32 |..".8.$."..".$.2|
134 0030: d8 c2 22 cd 38 d9 24 cd 22 d5 c8 22 cd 24 cd 32 |..".8.$."..".$.2|
135 0040: d1 c2 d0 c4 c8 d2 32 d1 38 39 29 c9 34 cd d4 80 |......2.89).4...|
135 0040: d1 c2 d0 c4 c8 d2 32 d1 38 39 29 c9 34 cd d4 80 |......2.89).4...|
136 0050: ab 24 b5 b8 84 cb 40 c1 80 2b 2d 3f 9f 8b 2b 31 |.$....@..+-?..+1|
136 0050: ab 24 b5 b8 84 cb 40 c1 80 2b 2d 3f 9f 8b 2b 31 |.$....@..+-?..+1|
137 0060: 25 45 01 c8 80 9a d2 9b 65 fb e5 9e 45 bf 8d 7f |%E......e...E...|
137 0060: 25 45 01 c8 80 9a d2 9b 65 fb e5 9e 45 bf 8d 7f |%E......e...E...|
138 0070: 9f c6 97 9f 2b 44 34 67 d9 ec 8e 0f a0 92 0b 75 |....+D4g.......u|
138 0070: 9f c6 97 9f 2b 44 34 67 d9 ec 8e 0f a0 92 0b 75 |....+D4g.......u|
139 0080: 41 d6 24 59 18 a4 a4 9a a6 18 1a 5b 98 9b 5a 98 |A.$Y.......[..Z.|
139 0080: 41 d6 24 59 18 a4 a4 9a a6 18 1a 5b 98 9b 5a 98 |A.$Y.......[..Z.|
140 0090: 9a 18 26 9b a6 19 98 1a 99 99 26 a6 18 9a 98 24 |..&.......&....$|
140 0090: 9a 18 26 9b a6 19 98 1a 99 99 26 a6 18 9a 98 24 |..&.......&....$|
141 00a0: 26 59 a6 25 5a 98 a5 18 a6 24 71 41 35 b1 43 dc |&Y.%Z....$qA5.C.|
141 00a0: 26 59 a6 25 5a 98 a5 18 a6 24 71 41 35 b1 43 dc |&Y.%Z....$qA5.C.|
142 00b0: 96 b0 83 f7 e9 45 8b d2 56 c7 a3 1f 82 52 d7 8a |.....E..V....R..|
142 00b0: 96 b0 83 f7 e9 45 8b d2 56 c7 a3 1f 82 52 d7 8a |.....E..V....R..|
143 00c0: 78 ed fc d5 76 f1 36 95 dc 05 07 00 ad 39 5e d3 |x...v.6......9^.|
143 00c0: 78 ed fc d5 76 f1 36 95 dc 05 07 00 ad 39 5e d3 |x...v.6......9^.|
144
144
145 $ echo "http://localhost:$HGPORT1/partial.hg" > server/.hg/clonebundles.manifest
145 $ echo "http://localhost:$HGPORT1/partial.hg" > server/.hg/clonebundles.manifest
146 $ hg clone -U http://localhost:$HGPORT partial-bundle
146 $ hg clone -U http://localhost:$HGPORT partial-bundle
147 applying clone bundle from http://localhost:$HGPORT1/partial.hg
147 applying clone bundle from http://localhost:$HGPORT1/partial.hg
148 adding changesets
148 adding changesets
149 adding manifests
149 adding manifests
150 adding file changes
150 adding file changes
151 added 1 changesets with 1 changes to 1 files
151 added 1 changesets with 1 changes to 1 files
152 finished applying clone bundle
152 finished applying clone bundle
153 searching for changes
153 searching for changes
154 adding changesets
154 adding changesets
155 adding manifests
155 adding manifests
156 adding file changes
156 adding file changes
157 added 1 changesets with 1 changes to 1 files
157 added 1 changesets with 1 changes to 1 files
158
158
159 Bundle with full content works
159 Bundle with full content works
160
160
161 $ hg -R server bundle --type gzip-v2 --base null -r tip full.hg
161 $ hg -R server bundle --type gzip-v2 --base null -r tip full.hg
162 2 changesets found
162 2 changesets found
163
163
164 Again, we perform an extra check against bundle content changes. If this content
164 Again, we perform an extra check against bundle content changes. If this content
165 changes, clone bundles produced by new Mercurial versions may not be readable
165 changes, clone bundles produced by new Mercurial versions may not be readable
166 by old clients.
166 by old clients.
167
167
168 $ f --size --hexdump full.hg
168 $ f --size --hexdump full.hg
169 full.hg: size=408
169 full.hg: size=408
170 0000: 48 47 32 30 00 00 00 0e 43 6f 6d 70 72 65 73 73 |HG20....Compress|
170 0000: 48 47 32 30 00 00 00 0e 43 6f 6d 70 72 65 73 73 |HG20....Compress|
171 0010: 69 6f 6e 3d 47 5a 78 9c 63 60 60 90 e5 76 f6 70 |ion=GZx.c``..v.p|
171 0010: 69 6f 6e 3d 47 5a 78 9c 63 60 60 90 e5 76 f6 70 |ion=GZx.c``..v.p|
172 0020: f4 73 77 75 0f f2 0f 0d 60 00 02 46 06 76 a6 b2 |.swu....`..F.v..|
172 0020: f4 73 77 75 0f f2 0f 0d 60 00 02 46 06 76 a6 b2 |.swu....`..F.v..|
173 0030: d4 a2 e2 cc fc 3c 03 23 06 06 e6 7d 40 b1 4d c1 |.....<.#...}@.M.|
173 0030: d4 a2 e2 cc fc 3c 03 23 06 06 e6 7d 40 b1 4d c1 |.....<.#...}@.M.|
174 0040: 2a 31 09 cf 9a 3a 52 04 b7 fc db f0 95 e5 a4 f4 |*1...:R.........|
174 0040: 2a 31 09 cf 9a 3a 52 04 b7 fc db f0 95 e5 a4 f4 |*1...:R.........|
175 0050: 97 17 b2 c9 0c 14 00 02 e6 d9 99 25 1a a7 a4 99 |...........%....|
175 0050: 97 17 b2 c9 0c 14 00 02 e6 d9 99 25 1a a7 a4 99 |...........%....|
176 0060: a4 a4 1a 5b 58 a4 19 27 9b a4 59 a4 1a 59 a4 99 |...[X..'..Y..Y..|
176 0060: a4 a4 1a 5b 58 a4 19 27 9b a4 59 a4 1a 59 a4 99 |...[X..'..Y..Y..|
177 0070: a4 59 26 5a 18 9a 18 59 5a 26 1a 27 27 25 99 a6 |.Y&Z...YZ&.''%..|
177 0070: a4 59 26 5a 18 9a 18 59 5a 26 1a 27 27 25 99 a6 |.Y&Z...YZ&.''%..|
178 0080: 99 1a 70 95 a4 16 97 70 19 28 18 70 a5 e5 e7 73 |..p....p.(.p...s|
178 0080: 99 1a 70 95 a4 16 97 70 19 28 18 70 a5 e5 e7 73 |..p....p.(.p...s|
179 0090: 71 25 a6 a4 28 00 19 40 13 0e ac fa df ab ff 7b |q%..(..@.......{|
179 0090: 71 25 a6 a4 28 00 19 40 13 0e ac fa df ab ff 7b |q%..(..@.......{|
180 00a0: 3f fb 92 dc 8b 1f 62 bb 9e b7 d7 d9 87 3d 5a 44 |?.....b......=ZD|
180 00a0: 3f fb 92 dc 8b 1f 62 bb 9e b7 d7 d9 87 3d 5a 44 |?.....b......=ZD|
181 00b0: ac 2f b0 a9 c3 66 1e 54 b9 26 08 a7 1a 1b 1a a7 |./...f.T.&......|
181 00b0: ac 2f b0 a9 c3 66 1e 54 b9 26 08 a7 1a 1b 1a a7 |./...f.T.&......|
182 00c0: 25 1b 9a 1b 99 19 9a 5a 18 9b a6 18 19 00 dd 67 |%......Z.......g|
182 00c0: 25 1b 9a 1b 99 19 9a 5a 18 9b a6 18 19 00 dd 67 |%......Z.......g|
183 00d0: 61 61 98 06 f4 80 49 4a 8a 65 52 92 41 9a 81 81 |aa....IJ.eR.A...|
183 00d0: 61 61 98 06 f4 80 49 4a 8a 65 52 92 41 9a 81 81 |aa....IJ.eR.A...|
184 00e0: a5 11 17 50 31 30 58 19 cc 80 98 25 29 b1 08 c4 |...P10X....%)...|
184 00e0: a5 11 17 50 31 30 58 19 cc 80 98 25 29 b1 08 c4 |...P10X....%)...|
185 00f0: 37 07 79 19 88 d9 41 ee 07 8a 41 cd 5d 98 65 fb |7.y...A...A.].e.|
185 00f0: 37 07 79 19 88 d9 41 ee 07 8a 41 cd 5d 98 65 fb |7.y...A...A.].e.|
186 0100: e5 9e 45 bf 8d 7f 9f c6 97 9f 2b 44 34 67 d9 ec |..E.......+D4g..|
186 0100: e5 9e 45 bf 8d 7f 9f c6 97 9f 2b 44 34 67 d9 ec |..E.......+D4g..|
187 0110: 8e 0f a0 61 a8 eb 82 82 2e c9 c2 20 25 d5 34 c5 |...a....... %.4.|
187 0110: 8e 0f a0 61 a8 eb 82 82 2e c9 c2 20 25 d5 34 c5 |...a....... %.4.|
188 0120: d0 d8 c2 dc d4 c2 d4 c4 30 d9 34 cd c0 d4 c8 cc |........0.4.....|
188 0120: d0 d8 c2 dc d4 c2 d4 c4 30 d9 34 cd c0 d4 c8 cc |........0.4.....|
189 0130: 34 31 c5 d0 c4 24 31 c9 32 2d d1 c2 2c c5 30 25 |41...$1.2-..,.0%|
189 0130: 34 31 c5 d0 c4 24 31 c9 32 2d d1 c2 2c c5 30 25 |41...$1.2-..,.0%|
190 0140: 09 e4 ee 85 8f 85 ff 88 ab 89 36 c7 2a c4 47 34 |..........6.*.G4|
190 0140: 09 e4 ee 85 8f 85 ff 88 ab 89 36 c7 2a c4 47 34 |..........6.*.G4|
191 0150: fe f8 ec 7b 73 37 3f c3 24 62 1d 8d 4d 1d 9e 40 |...{s7?.$b..M..@|
191 0150: fe f8 ec 7b 73 37 3f c3 24 62 1d 8d 4d 1d 9e 40 |...{s7?.$b..M..@|
192 0160: 06 3b 10 14 36 a4 38 10 04 d8 21 01 5a b2 83 f7 |.;..6.8...!.Z...|
192 0160: 06 3b 10 14 36 a4 38 10 04 d8 21 01 5a b2 83 f7 |.;..6.8...!.Z...|
193 0170: e9 45 8b d2 56 c7 a3 1f 82 52 d7 8a 78 ed fc d5 |.E..V....R..x...|
193 0170: e9 45 8b d2 56 c7 a3 1f 82 52 d7 8a 78 ed fc d5 |.E..V....R..x...|
194 0180: 76 f1 36 25 81 49 c0 ad 30 c0 0e 49 8f 54 b7 9e |v.6%.I..0..I.T..|
194 0180: 76 f1 36 25 81 49 c0 ad 30 c0 0e 49 8f 54 b7 9e |v.6%.I..0..I.T..|
195 0190: d4 1c 09 00 bb 8d f0 bd |........|
195 0190: d4 1c 09 00 bb 8d f0 bd |........|
196
196
197 $ echo "http://localhost:$HGPORT1/full.hg" > server/.hg/clonebundles.manifest
197 $ echo "http://localhost:$HGPORT1/full.hg" > server/.hg/clonebundles.manifest
198 $ hg clone -U http://localhost:$HGPORT full-bundle
198 $ hg clone -U http://localhost:$HGPORT full-bundle
199 applying clone bundle from http://localhost:$HGPORT1/full.hg
199 applying clone bundle from http://localhost:$HGPORT1/full.hg
200 adding changesets
200 adding changesets
201 adding manifests
201 adding manifests
202 adding file changes
202 adding file changes
203 added 2 changesets with 2 changes to 2 files
203 added 2 changesets with 2 changes to 2 files
204 finished applying clone bundle
204 finished applying clone bundle
205 searching for changes
205 searching for changes
206 no changes found
206 no changes found
207
207
208 Entry with unknown BUNDLESPEC is filtered and not used
208 Entry with unknown BUNDLESPEC is filtered and not used
209
209
210 $ cat > server/.hg/clonebundles.manifest << EOF
210 $ cat > server/.hg/clonebundles.manifest << EOF
211 > http://bad.entry1 BUNDLESPEC=UNKNOWN
211 > http://bad.entry1 BUNDLESPEC=UNKNOWN
212 > http://bad.entry2 BUNDLESPEC=xz-v1
212 > http://bad.entry2 BUNDLESPEC=xz-v1
213 > http://bad.entry3 BUNDLESPEC=none-v100
213 > http://bad.entry3 BUNDLESPEC=none-v100
214 > http://localhost:$HGPORT1/full.hg BUNDLESPEC=gzip-v2
214 > http://localhost:$HGPORT1/full.hg BUNDLESPEC=gzip-v2
215 > EOF
215 > EOF
216
216
217 $ hg clone -U http://localhost:$HGPORT filter-unknown-type
217 $ hg clone -U http://localhost:$HGPORT filter-unknown-type
218 applying clone bundle from http://localhost:$HGPORT1/full.hg
218 applying clone bundle from http://localhost:$HGPORT1/full.hg
219 adding changesets
219 adding changesets
220 adding manifests
220 adding manifests
221 adding file changes
221 adding file changes
222 added 2 changesets with 2 changes to 2 files
222 added 2 changesets with 2 changes to 2 files
223 finished applying clone bundle
223 finished applying clone bundle
224 searching for changes
224 searching for changes
225 no changes found
225 no changes found
226
226
227 Automatic fallback when all entries are filtered
227 Automatic fallback when all entries are filtered
228
228
229 $ cat > server/.hg/clonebundles.manifest << EOF
229 $ cat > server/.hg/clonebundles.manifest << EOF
230 > http://bad.entry BUNDLESPEC=UNKNOWN
230 > http://bad.entry BUNDLESPEC=UNKNOWN
231 > EOF
231 > EOF
232
232
233 $ hg clone -U http://localhost:$HGPORT filter-all
233 $ hg clone -U http://localhost:$HGPORT filter-all
234 no compatible clone bundles available on server; falling back to regular clone
234 no compatible clone bundles available on server; falling back to regular clone
235 (you may want to report this to the server operator)
235 (you may want to report this to the server operator)
236 requesting all changes
236 requesting all changes
237 adding changesets
237 adding changesets
238 adding manifests
238 adding manifests
239 adding file changes
239 adding file changes
240 added 2 changesets with 2 changes to 2 files
240 added 2 changesets with 2 changes to 2 files
241
241
242 URLs requiring SNI are filtered in Python <2.7.9
242 URLs requiring SNI are filtered in Python <2.7.9
243
243
244 $ cp full.hg sni.hg
244 $ cp full.hg sni.hg
245 $ cat > server/.hg/clonebundles.manifest << EOF
245 $ cat > server/.hg/clonebundles.manifest << EOF
246 > http://localhost:$HGPORT1/sni.hg REQUIRESNI=true
246 > http://localhost:$HGPORT1/sni.hg REQUIRESNI=true
247 > http://localhost:$HGPORT1/full.hg
247 > http://localhost:$HGPORT1/full.hg
248 > EOF
248 > EOF
249
249
250 #if sslcontext
250 #if sslcontext
251 Python 2.7.9+ support SNI
251 Python 2.7.9+ support SNI
252
252
253 $ hg clone -U http://localhost:$HGPORT sni-supported
253 $ hg clone -U http://localhost:$HGPORT sni-supported
254 applying clone bundle from http://localhost:$HGPORT1/sni.hg
254 applying clone bundle from http://localhost:$HGPORT1/sni.hg
255 adding changesets
255 adding changesets
256 adding manifests
256 adding manifests
257 adding file changes
257 adding file changes
258 added 2 changesets with 2 changes to 2 files
258 added 2 changesets with 2 changes to 2 files
259 finished applying clone bundle
259 finished applying clone bundle
260 searching for changes
260 searching for changes
261 no changes found
261 no changes found
262 #else
262 #else
263 Python <2.7.9 will filter SNI URLs
263 Python <2.7.9 will filter SNI URLs
264
264
265 $ hg clone -U http://localhost:$HGPORT sni-unsupported
265 $ hg clone -U http://localhost:$HGPORT sni-unsupported
266 applying clone bundle from http://localhost:$HGPORT1/full.hg
266 applying clone bundle from http://localhost:$HGPORT1/full.hg
267 adding changesets
267 adding changesets
268 adding manifests
268 adding manifests
269 adding file changes
269 adding file changes
270 added 2 changesets with 2 changes to 2 files
270 added 2 changesets with 2 changes to 2 files
271 finished applying clone bundle
271 finished applying clone bundle
272 searching for changes
272 searching for changes
273 no changes found
273 no changes found
274 #endif
274 #endif
275
275
276 Set up manifest for testing preferences
276 Set up manifest for testing preferences
277 (Remember, the TYPE does not have to match reality - the URL is
277 (Remember, the TYPE does not have to match reality - the URL is
278 important)
278 important)
279
279
280 $ cp full.hg gz-a.hg
280 $ cp full.hg gz-a.hg
281 $ cp full.hg gz-b.hg
281 $ cp full.hg gz-b.hg
282 $ cp full.hg bz2-a.hg
282 $ cp full.hg bz2-a.hg
283 $ cp full.hg bz2-b.hg
283 $ cp full.hg bz2-b.hg
284 $ cat > server/.hg/clonebundles.manifest << EOF
284 $ cat > server/.hg/clonebundles.manifest << EOF
285 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2 extra=a
285 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2 extra=a
286 > http://localhost:$HGPORT1/bz2-a.hg BUNDLESPEC=bzip2-v2 extra=a
286 > http://localhost:$HGPORT1/bz2-a.hg BUNDLESPEC=bzip2-v2 extra=a
287 > http://localhost:$HGPORT1/gz-b.hg BUNDLESPEC=gzip-v2 extra=b
287 > http://localhost:$HGPORT1/gz-b.hg BUNDLESPEC=gzip-v2 extra=b
288 > http://localhost:$HGPORT1/bz2-b.hg BUNDLESPEC=bzip2-v2 extra=b
288 > http://localhost:$HGPORT1/bz2-b.hg BUNDLESPEC=bzip2-v2 extra=b
289 > EOF
289 > EOF
290
290
291 Preferring an undefined attribute will take first entry
291 Preferring an undefined attribute will take first entry
292
292
293 $ hg --config experimental.clonebundleprefers=foo=bar clone -U http://localhost:$HGPORT prefer-foo
293 $ hg --config experimental.clonebundleprefers=foo=bar clone -U http://localhost:$HGPORT prefer-foo
294 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
294 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
295 adding changesets
295 adding changesets
296 adding manifests
296 adding manifests
297 adding file changes
297 adding file changes
298 added 2 changesets with 2 changes to 2 files
298 added 2 changesets with 2 changes to 2 files
299 finished applying clone bundle
299 finished applying clone bundle
300 searching for changes
300 searching for changes
301 no changes found
301 no changes found
302
302
303 Preferring bz2 type will download first entry of that type
303 Preferring bz2 type will download first entry of that type
304
304
305 $ hg --config experimental.clonebundleprefers=COMPRESSION=bzip2 clone -U http://localhost:$HGPORT prefer-bz
305 $ hg --config experimental.clonebundleprefers=COMPRESSION=bzip2 clone -U http://localhost:$HGPORT prefer-bz
306 applying clone bundle from http://localhost:$HGPORT1/bz2-a.hg
306 applying clone bundle from http://localhost:$HGPORT1/bz2-a.hg
307 adding changesets
307 adding changesets
308 adding manifests
308 adding manifests
309 adding file changes
309 adding file changes
310 added 2 changesets with 2 changes to 2 files
310 added 2 changesets with 2 changes to 2 files
311 finished applying clone bundle
311 finished applying clone bundle
312 searching for changes
312 searching for changes
313 no changes found
313 no changes found
314
314
315 Preferring multiple values of an option works
315 Preferring multiple values of an option works
316
316
317 $ hg --config experimental.clonebundleprefers=COMPRESSION=unknown,COMPRESSION=bzip2 clone -U http://localhost:$HGPORT prefer-multiple-bz
317 $ hg --config experimental.clonebundleprefers=COMPRESSION=unknown,COMPRESSION=bzip2 clone -U http://localhost:$HGPORT prefer-multiple-bz
318 applying clone bundle from http://localhost:$HGPORT1/bz2-a.hg
318 applying clone bundle from http://localhost:$HGPORT1/bz2-a.hg
319 adding changesets
319 adding changesets
320 adding manifests
320 adding manifests
321 adding file changes
321 adding file changes
322 added 2 changesets with 2 changes to 2 files
322 added 2 changesets with 2 changes to 2 files
323 finished applying clone bundle
323 finished applying clone bundle
324 searching for changes
324 searching for changes
325 no changes found
325 no changes found
326
326
327 Sorting multiple values should get us back to original first entry
327 Sorting multiple values should get us back to original first entry
328
328
329 $ hg --config experimental.clonebundleprefers=BUNDLESPEC=unknown,BUNDLESPEC=gzip-v2,BUNDLESPEC=bzip2-v2 clone -U http://localhost:$HGPORT prefer-multiple-gz
329 $ hg --config experimental.clonebundleprefers=BUNDLESPEC=unknown,BUNDLESPEC=gzip-v2,BUNDLESPEC=bzip2-v2 clone -U http://localhost:$HGPORT prefer-multiple-gz
330 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
330 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
331 adding changesets
331 adding changesets
332 adding manifests
332 adding manifests
333 adding file changes
333 adding file changes
334 added 2 changesets with 2 changes to 2 files
334 added 2 changesets with 2 changes to 2 files
335 finished applying clone bundle
335 finished applying clone bundle
336 searching for changes
336 searching for changes
337 no changes found
337 no changes found
338
338
339 Preferring multiple attributes has correct order
339 Preferring multiple attributes has correct order
340
340
341 $ hg --config experimental.clonebundleprefers=extra=b,BUNDLESPEC=bzip2-v2 clone -U http://localhost:$HGPORT prefer-separate-attributes
341 $ hg --config experimental.clonebundleprefers=extra=b,BUNDLESPEC=bzip2-v2 clone -U http://localhost:$HGPORT prefer-separate-attributes
342 applying clone bundle from http://localhost:$HGPORT1/bz2-b.hg
342 applying clone bundle from http://localhost:$HGPORT1/bz2-b.hg
343 adding changesets
343 adding changesets
344 adding manifests
344 adding manifests
345 adding file changes
345 adding file changes
346 added 2 changesets with 2 changes to 2 files
346 added 2 changesets with 2 changes to 2 files
347 finished applying clone bundle
347 finished applying clone bundle
348 searching for changes
348 searching for changes
349 no changes found
349 no changes found
350
350
351 Test where attribute is missing from some entries
351 Test where attribute is missing from some entries
352
352
353 $ cat > server/.hg/clonebundles.manifest << EOF
353 $ cat > server/.hg/clonebundles.manifest << EOF
354 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
354 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
355 > http://localhost:$HGPORT1/bz2-a.hg BUNDLESPEC=bzip2-v2
355 > http://localhost:$HGPORT1/bz2-a.hg BUNDLESPEC=bzip2-v2
356 > http://localhost:$HGPORT1/gz-b.hg BUNDLESPEC=gzip-v2 extra=b
356 > http://localhost:$HGPORT1/gz-b.hg BUNDLESPEC=gzip-v2 extra=b
357 > http://localhost:$HGPORT1/bz2-b.hg BUNDLESPEC=bzip2-v2 extra=b
357 > http://localhost:$HGPORT1/bz2-b.hg BUNDLESPEC=bzip2-v2 extra=b
358 > EOF
358 > EOF
359
359
360 $ hg --config experimental.clonebundleprefers=extra=b clone -U http://localhost:$HGPORT prefer-partially-defined-attribute
360 $ hg --config experimental.clonebundleprefers=extra=b clone -U http://localhost:$HGPORT prefer-partially-defined-attribute
361 applying clone bundle from http://localhost:$HGPORT1/gz-b.hg
361 applying clone bundle from http://localhost:$HGPORT1/gz-b.hg
362 adding changesets
362 adding changesets
363 adding manifests
363 adding manifests
364 adding file changes
364 adding file changes
365 added 2 changesets with 2 changes to 2 files
365 added 2 changesets with 2 changes to 2 files
366 finished applying clone bundle
366 finished applying clone bundle
367 searching for changes
367 searching for changes
368 no changes found
368 no changes found
General Comments 0
You need to be logged in to leave comments. Login now