##// END OF EJS Templates
exchange: do not attempt clone bundle if local repo is non-empty (issue4932)
Gregory Szorc -
r26855:9350f00a stable
parent child Browse files
Show More
@@ -1,1853 +1,1857
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno, urllib, urllib2
10 import errno, urllib, urllib2
11 import util, scmutil, changegroup, base85, error
11 import util, scmutil, changegroup, base85, error
12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 import lock as lockmod
13 import lock as lockmod
14 import streamclone
14 import streamclone
15 import sslutil
15 import sslutil
16 import tags
16 import tags
17 import url as urlmod
17 import url as urlmod
18
18
19 # Maps bundle compression human names to internal representation.
19 # Maps bundle compression human names to internal representation.
20 _bundlespeccompressions = {'none': None,
20 _bundlespeccompressions = {'none': None,
21 'bzip2': 'BZ',
21 'bzip2': 'BZ',
22 'gzip': 'GZ',
22 'gzip': 'GZ',
23 }
23 }
24
24
25 # Maps bundle version human names to changegroup versions.
25 # Maps bundle version human names to changegroup versions.
26 _bundlespeccgversions = {'v1': '01',
26 _bundlespeccgversions = {'v1': '01',
27 'v2': '02',
27 'v2': '02',
28 'packed1': 's1',
28 'packed1': 's1',
29 'bundle2': '02', #legacy
29 'bundle2': '02', #legacy
30 }
30 }
31
31
32 def parsebundlespec(repo, spec, strict=True, externalnames=False):
32 def parsebundlespec(repo, spec, strict=True, externalnames=False):
33 """Parse a bundle string specification into parts.
33 """Parse a bundle string specification into parts.
34
34
35 Bundle specifications denote a well-defined bundle/exchange format.
35 Bundle specifications denote a well-defined bundle/exchange format.
36 The content of a given specification should not change over time in
36 The content of a given specification should not change over time in
37 order to ensure that bundles produced by a newer version of Mercurial are
37 order to ensure that bundles produced by a newer version of Mercurial are
38 readable from an older version.
38 readable from an older version.
39
39
40 The string currently has the form:
40 The string currently has the form:
41
41
42 <compression>-<type>[;<parameter0>[;<parameter1>]]
42 <compression>-<type>[;<parameter0>[;<parameter1>]]
43
43
44 Where <compression> is one of the supported compression formats
44 Where <compression> is one of the supported compression formats
45 and <type> is (currently) a version string. A ";" can follow the type and
45 and <type> is (currently) a version string. A ";" can follow the type and
46 all text afterwards is interpretted as URI encoded, ";" delimited key=value
46 all text afterwards is interpretted as URI encoded, ";" delimited key=value
47 pairs.
47 pairs.
48
48
49 If ``strict`` is True (the default) <compression> is required. Otherwise,
49 If ``strict`` is True (the default) <compression> is required. Otherwise,
50 it is optional.
50 it is optional.
51
51
52 If ``externalnames`` is False (the default), the human-centric names will
52 If ``externalnames`` is False (the default), the human-centric names will
53 be converted to their internal representation.
53 be converted to their internal representation.
54
54
55 Returns a 3-tuple of (compression, version, parameters). Compression will
55 Returns a 3-tuple of (compression, version, parameters). Compression will
56 be ``None`` if not in strict mode and a compression isn't defined.
56 be ``None`` if not in strict mode and a compression isn't defined.
57
57
58 An ``InvalidBundleSpecification`` is raised when the specification is
58 An ``InvalidBundleSpecification`` is raised when the specification is
59 not syntactically well formed.
59 not syntactically well formed.
60
60
61 An ``UnsupportedBundleSpecification`` is raised when the compression or
61 An ``UnsupportedBundleSpecification`` is raised when the compression or
62 bundle type/version is not recognized.
62 bundle type/version is not recognized.
63
63
64 Note: this function will likely eventually return a more complex data
64 Note: this function will likely eventually return a more complex data
65 structure, including bundle2 part information.
65 structure, including bundle2 part information.
66 """
66 """
67 def parseparams(s):
67 def parseparams(s):
68 if ';' not in s:
68 if ';' not in s:
69 return s, {}
69 return s, {}
70
70
71 params = {}
71 params = {}
72 version, paramstr = s.split(';', 1)
72 version, paramstr = s.split(';', 1)
73
73
74 for p in paramstr.split(';'):
74 for p in paramstr.split(';'):
75 if '=' not in p:
75 if '=' not in p:
76 raise error.InvalidBundleSpecification(
76 raise error.InvalidBundleSpecification(
77 _('invalid bundle specification: '
77 _('invalid bundle specification: '
78 'missing "=" in parameter: %s') % p)
78 'missing "=" in parameter: %s') % p)
79
79
80 key, value = p.split('=', 1)
80 key, value = p.split('=', 1)
81 key = urllib.unquote(key)
81 key = urllib.unquote(key)
82 value = urllib.unquote(value)
82 value = urllib.unquote(value)
83 params[key] = value
83 params[key] = value
84
84
85 return version, params
85 return version, params
86
86
87
87
88 if strict and '-' not in spec:
88 if strict and '-' not in spec:
89 raise error.InvalidBundleSpecification(
89 raise error.InvalidBundleSpecification(
90 _('invalid bundle specification; '
90 _('invalid bundle specification; '
91 'must be prefixed with compression: %s') % spec)
91 'must be prefixed with compression: %s') % spec)
92
92
93 if '-' in spec:
93 if '-' in spec:
94 compression, version = spec.split('-', 1)
94 compression, version = spec.split('-', 1)
95
95
96 if compression not in _bundlespeccompressions:
96 if compression not in _bundlespeccompressions:
97 raise error.UnsupportedBundleSpecification(
97 raise error.UnsupportedBundleSpecification(
98 _('%s compression is not supported') % compression)
98 _('%s compression is not supported') % compression)
99
99
100 version, params = parseparams(version)
100 version, params = parseparams(version)
101
101
102 if version not in _bundlespeccgversions:
102 if version not in _bundlespeccgversions:
103 raise error.UnsupportedBundleSpecification(
103 raise error.UnsupportedBundleSpecification(
104 _('%s is not a recognized bundle version') % version)
104 _('%s is not a recognized bundle version') % version)
105 else:
105 else:
106 # Value could be just the compression or just the version, in which
106 # Value could be just the compression or just the version, in which
107 # case some defaults are assumed (but only when not in strict mode).
107 # case some defaults are assumed (but only when not in strict mode).
108 assert not strict
108 assert not strict
109
109
110 spec, params = parseparams(spec)
110 spec, params = parseparams(spec)
111
111
112 if spec in _bundlespeccompressions:
112 if spec in _bundlespeccompressions:
113 compression = spec
113 compression = spec
114 version = 'v1'
114 version = 'v1'
115 if 'generaldelta' in repo.requirements:
115 if 'generaldelta' in repo.requirements:
116 version = 'v2'
116 version = 'v2'
117 elif spec in _bundlespeccgversions:
117 elif spec in _bundlespeccgversions:
118 if spec == 'packed1':
118 if spec == 'packed1':
119 compression = 'none'
119 compression = 'none'
120 else:
120 else:
121 compression = 'bzip2'
121 compression = 'bzip2'
122 version = spec
122 version = spec
123 else:
123 else:
124 raise error.UnsupportedBundleSpecification(
124 raise error.UnsupportedBundleSpecification(
125 _('%s is not a recognized bundle specification') % spec)
125 _('%s is not a recognized bundle specification') % spec)
126
126
127 # The specification for packed1 can optionally declare the data formats
127 # The specification for packed1 can optionally declare the data formats
128 # required to apply it. If we see this metadata, compare against what the
128 # required to apply it. If we see this metadata, compare against what the
129 # repo supports and error if the bundle isn't compatible.
129 # repo supports and error if the bundle isn't compatible.
130 if version == 'packed1' and 'requirements' in params:
130 if version == 'packed1' and 'requirements' in params:
131 requirements = set(params['requirements'].split(','))
131 requirements = set(params['requirements'].split(','))
132 missingreqs = requirements - repo.supportedformats
132 missingreqs = requirements - repo.supportedformats
133 if missingreqs:
133 if missingreqs:
134 raise error.UnsupportedBundleSpecification(
134 raise error.UnsupportedBundleSpecification(
135 _('missing support for repository features: %s') %
135 _('missing support for repository features: %s') %
136 ', '.join(sorted(missingreqs)))
136 ', '.join(sorted(missingreqs)))
137
137
138 if not externalnames:
138 if not externalnames:
139 compression = _bundlespeccompressions[compression]
139 compression = _bundlespeccompressions[compression]
140 version = _bundlespeccgversions[version]
140 version = _bundlespeccgversions[version]
141 return compression, version, params
141 return compression, version, params
142
142
143 def readbundle(ui, fh, fname, vfs=None):
143 def readbundle(ui, fh, fname, vfs=None):
144 header = changegroup.readexactly(fh, 4)
144 header = changegroup.readexactly(fh, 4)
145
145
146 alg = None
146 alg = None
147 if not fname:
147 if not fname:
148 fname = "stream"
148 fname = "stream"
149 if not header.startswith('HG') and header.startswith('\0'):
149 if not header.startswith('HG') and header.startswith('\0'):
150 fh = changegroup.headerlessfixup(fh, header)
150 fh = changegroup.headerlessfixup(fh, header)
151 header = "HG10"
151 header = "HG10"
152 alg = 'UN'
152 alg = 'UN'
153 elif vfs:
153 elif vfs:
154 fname = vfs.join(fname)
154 fname = vfs.join(fname)
155
155
156 magic, version = header[0:2], header[2:4]
156 magic, version = header[0:2], header[2:4]
157
157
158 if magic != 'HG':
158 if magic != 'HG':
159 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
159 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
160 if version == '10':
160 if version == '10':
161 if alg is None:
161 if alg is None:
162 alg = changegroup.readexactly(fh, 2)
162 alg = changegroup.readexactly(fh, 2)
163 return changegroup.cg1unpacker(fh, alg)
163 return changegroup.cg1unpacker(fh, alg)
164 elif version.startswith('2'):
164 elif version.startswith('2'):
165 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
165 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
166 elif version == 'S1':
166 elif version == 'S1':
167 return streamclone.streamcloneapplier(fh)
167 return streamclone.streamcloneapplier(fh)
168 else:
168 else:
169 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
169 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
170
170
171 def buildobsmarkerspart(bundler, markers):
171 def buildobsmarkerspart(bundler, markers):
172 """add an obsmarker part to the bundler with <markers>
172 """add an obsmarker part to the bundler with <markers>
173
173
174 No part is created if markers is empty.
174 No part is created if markers is empty.
175 Raises ValueError if the bundler doesn't support any known obsmarker format.
175 Raises ValueError if the bundler doesn't support any known obsmarker format.
176 """
176 """
177 if markers:
177 if markers:
178 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
178 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
179 version = obsolete.commonversion(remoteversions)
179 version = obsolete.commonversion(remoteversions)
180 if version is None:
180 if version is None:
181 raise ValueError('bundler does not support common obsmarker format')
181 raise ValueError('bundler does not support common obsmarker format')
182 stream = obsolete.encodemarkers(markers, True, version=version)
182 stream = obsolete.encodemarkers(markers, True, version=version)
183 return bundler.newpart('obsmarkers', data=stream)
183 return bundler.newpart('obsmarkers', data=stream)
184 return None
184 return None
185
185
186 def _canusebundle2(op):
186 def _canusebundle2(op):
187 """return true if a pull/push can use bundle2
187 """return true if a pull/push can use bundle2
188
188
189 Feel free to nuke this function when we drop the experimental option"""
189 Feel free to nuke this function when we drop the experimental option"""
190 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
190 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
191 and op.remote.capable('bundle2'))
191 and op.remote.capable('bundle2'))
192
192
193
193
194 class pushoperation(object):
194 class pushoperation(object):
195 """A object that represent a single push operation
195 """A object that represent a single push operation
196
196
197 It purpose is to carry push related state and very common operation.
197 It purpose is to carry push related state and very common operation.
198
198
199 A new should be created at the beginning of each push and discarded
199 A new should be created at the beginning of each push and discarded
200 afterward.
200 afterward.
201 """
201 """
202
202
203 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
203 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
204 bookmarks=()):
204 bookmarks=()):
205 # repo we push from
205 # repo we push from
206 self.repo = repo
206 self.repo = repo
207 self.ui = repo.ui
207 self.ui = repo.ui
208 # repo we push to
208 # repo we push to
209 self.remote = remote
209 self.remote = remote
210 # force option provided
210 # force option provided
211 self.force = force
211 self.force = force
212 # revs to be pushed (None is "all")
212 # revs to be pushed (None is "all")
213 self.revs = revs
213 self.revs = revs
214 # bookmark explicitly pushed
214 # bookmark explicitly pushed
215 self.bookmarks = bookmarks
215 self.bookmarks = bookmarks
216 # allow push of new branch
216 # allow push of new branch
217 self.newbranch = newbranch
217 self.newbranch = newbranch
218 # did a local lock get acquired?
218 # did a local lock get acquired?
219 self.locallocked = None
219 self.locallocked = None
220 # step already performed
220 # step already performed
221 # (used to check what steps have been already performed through bundle2)
221 # (used to check what steps have been already performed through bundle2)
222 self.stepsdone = set()
222 self.stepsdone = set()
223 # Integer version of the changegroup push result
223 # Integer version of the changegroup push result
224 # - None means nothing to push
224 # - None means nothing to push
225 # - 0 means HTTP error
225 # - 0 means HTTP error
226 # - 1 means we pushed and remote head count is unchanged *or*
226 # - 1 means we pushed and remote head count is unchanged *or*
227 # we have outgoing changesets but refused to push
227 # we have outgoing changesets but refused to push
228 # - other values as described by addchangegroup()
228 # - other values as described by addchangegroup()
229 self.cgresult = None
229 self.cgresult = None
230 # Boolean value for the bookmark push
230 # Boolean value for the bookmark push
231 self.bkresult = None
231 self.bkresult = None
232 # discover.outgoing object (contains common and outgoing data)
232 # discover.outgoing object (contains common and outgoing data)
233 self.outgoing = None
233 self.outgoing = None
234 # all remote heads before the push
234 # all remote heads before the push
235 self.remoteheads = None
235 self.remoteheads = None
236 # testable as a boolean indicating if any nodes are missing locally.
236 # testable as a boolean indicating if any nodes are missing locally.
237 self.incoming = None
237 self.incoming = None
238 # phases changes that must be pushed along side the changesets
238 # phases changes that must be pushed along side the changesets
239 self.outdatedphases = None
239 self.outdatedphases = None
240 # phases changes that must be pushed if changeset push fails
240 # phases changes that must be pushed if changeset push fails
241 self.fallbackoutdatedphases = None
241 self.fallbackoutdatedphases = None
242 # outgoing obsmarkers
242 # outgoing obsmarkers
243 self.outobsmarkers = set()
243 self.outobsmarkers = set()
244 # outgoing bookmarks
244 # outgoing bookmarks
245 self.outbookmarks = []
245 self.outbookmarks = []
246 # transaction manager
246 # transaction manager
247 self.trmanager = None
247 self.trmanager = None
248 # map { pushkey partid -> callback handling failure}
248 # map { pushkey partid -> callback handling failure}
249 # used to handle exception from mandatory pushkey part failure
249 # used to handle exception from mandatory pushkey part failure
250 self.pkfailcb = {}
250 self.pkfailcb = {}
251
251
252 @util.propertycache
252 @util.propertycache
253 def futureheads(self):
253 def futureheads(self):
254 """future remote heads if the changeset push succeeds"""
254 """future remote heads if the changeset push succeeds"""
255 return self.outgoing.missingheads
255 return self.outgoing.missingheads
256
256
257 @util.propertycache
257 @util.propertycache
258 def fallbackheads(self):
258 def fallbackheads(self):
259 """future remote heads if the changeset push fails"""
259 """future remote heads if the changeset push fails"""
260 if self.revs is None:
260 if self.revs is None:
261 # not target to push, all common are relevant
261 # not target to push, all common are relevant
262 return self.outgoing.commonheads
262 return self.outgoing.commonheads
263 unfi = self.repo.unfiltered()
263 unfi = self.repo.unfiltered()
264 # I want cheads = heads(::missingheads and ::commonheads)
264 # I want cheads = heads(::missingheads and ::commonheads)
265 # (missingheads is revs with secret changeset filtered out)
265 # (missingheads is revs with secret changeset filtered out)
266 #
266 #
267 # This can be expressed as:
267 # This can be expressed as:
268 # cheads = ( (missingheads and ::commonheads)
268 # cheads = ( (missingheads and ::commonheads)
269 # + (commonheads and ::missingheads))"
269 # + (commonheads and ::missingheads))"
270 # )
270 # )
271 #
271 #
272 # while trying to push we already computed the following:
272 # while trying to push we already computed the following:
273 # common = (::commonheads)
273 # common = (::commonheads)
274 # missing = ((commonheads::missingheads) - commonheads)
274 # missing = ((commonheads::missingheads) - commonheads)
275 #
275 #
276 # We can pick:
276 # We can pick:
277 # * missingheads part of common (::commonheads)
277 # * missingheads part of common (::commonheads)
278 common = self.outgoing.common
278 common = self.outgoing.common
279 nm = self.repo.changelog.nodemap
279 nm = self.repo.changelog.nodemap
280 cheads = [node for node in self.revs if nm[node] in common]
280 cheads = [node for node in self.revs if nm[node] in common]
281 # and
281 # and
282 # * commonheads parents on missing
282 # * commonheads parents on missing
283 revset = unfi.set('%ln and parents(roots(%ln))',
283 revset = unfi.set('%ln and parents(roots(%ln))',
284 self.outgoing.commonheads,
284 self.outgoing.commonheads,
285 self.outgoing.missing)
285 self.outgoing.missing)
286 cheads.extend(c.node() for c in revset)
286 cheads.extend(c.node() for c in revset)
287 return cheads
287 return cheads
288
288
289 @property
289 @property
290 def commonheads(self):
290 def commonheads(self):
291 """set of all common heads after changeset bundle push"""
291 """set of all common heads after changeset bundle push"""
292 if self.cgresult:
292 if self.cgresult:
293 return self.futureheads
293 return self.futureheads
294 else:
294 else:
295 return self.fallbackheads
295 return self.fallbackheads
296
296
297 # mapping of message used when pushing bookmark
297 # mapping of message used when pushing bookmark
298 bookmsgmap = {'update': (_("updating bookmark %s\n"),
298 bookmsgmap = {'update': (_("updating bookmark %s\n"),
299 _('updating bookmark %s failed!\n')),
299 _('updating bookmark %s failed!\n')),
300 'export': (_("exporting bookmark %s\n"),
300 'export': (_("exporting bookmark %s\n"),
301 _('exporting bookmark %s failed!\n')),
301 _('exporting bookmark %s failed!\n')),
302 'delete': (_("deleting remote bookmark %s\n"),
302 'delete': (_("deleting remote bookmark %s\n"),
303 _('deleting remote bookmark %s failed!\n')),
303 _('deleting remote bookmark %s failed!\n')),
304 }
304 }
305
305
306
306
307 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
307 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
308 opargs=None):
308 opargs=None):
309 '''Push outgoing changesets (limited by revs) from a local
309 '''Push outgoing changesets (limited by revs) from a local
310 repository to remote. Return an integer:
310 repository to remote. Return an integer:
311 - None means nothing to push
311 - None means nothing to push
312 - 0 means HTTP error
312 - 0 means HTTP error
313 - 1 means we pushed and remote head count is unchanged *or*
313 - 1 means we pushed and remote head count is unchanged *or*
314 we have outgoing changesets but refused to push
314 we have outgoing changesets but refused to push
315 - other values as described by addchangegroup()
315 - other values as described by addchangegroup()
316 '''
316 '''
317 if opargs is None:
317 if opargs is None:
318 opargs = {}
318 opargs = {}
319 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
319 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
320 **opargs)
320 **opargs)
321 if pushop.remote.local():
321 if pushop.remote.local():
322 missing = (set(pushop.repo.requirements)
322 missing = (set(pushop.repo.requirements)
323 - pushop.remote.local().supported)
323 - pushop.remote.local().supported)
324 if missing:
324 if missing:
325 msg = _("required features are not"
325 msg = _("required features are not"
326 " supported in the destination:"
326 " supported in the destination:"
327 " %s") % (', '.join(sorted(missing)))
327 " %s") % (', '.join(sorted(missing)))
328 raise error.Abort(msg)
328 raise error.Abort(msg)
329
329
330 # there are two ways to push to remote repo:
330 # there are two ways to push to remote repo:
331 #
331 #
332 # addchangegroup assumes local user can lock remote
332 # addchangegroup assumes local user can lock remote
333 # repo (local filesystem, old ssh servers).
333 # repo (local filesystem, old ssh servers).
334 #
334 #
335 # unbundle assumes local user cannot lock remote repo (new ssh
335 # unbundle assumes local user cannot lock remote repo (new ssh
336 # servers, http servers).
336 # servers, http servers).
337
337
338 if not pushop.remote.canpush():
338 if not pushop.remote.canpush():
339 raise error.Abort(_("destination does not support push"))
339 raise error.Abort(_("destination does not support push"))
340 # get local lock as we might write phase data
340 # get local lock as we might write phase data
341 localwlock = locallock = None
341 localwlock = locallock = None
342 try:
342 try:
343 # bundle2 push may receive a reply bundle touching bookmarks or other
343 # bundle2 push may receive a reply bundle touching bookmarks or other
344 # things requiring the wlock. Take it now to ensure proper ordering.
344 # things requiring the wlock. Take it now to ensure proper ordering.
345 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
345 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
346 if _canusebundle2(pushop) and maypushback:
346 if _canusebundle2(pushop) and maypushback:
347 localwlock = pushop.repo.wlock()
347 localwlock = pushop.repo.wlock()
348 locallock = pushop.repo.lock()
348 locallock = pushop.repo.lock()
349 pushop.locallocked = True
349 pushop.locallocked = True
350 except IOError as err:
350 except IOError as err:
351 pushop.locallocked = False
351 pushop.locallocked = False
352 if err.errno != errno.EACCES:
352 if err.errno != errno.EACCES:
353 raise
353 raise
354 # source repo cannot be locked.
354 # source repo cannot be locked.
355 # We do not abort the push, but just disable the local phase
355 # We do not abort the push, but just disable the local phase
356 # synchronisation.
356 # synchronisation.
357 msg = 'cannot lock source repository: %s\n' % err
357 msg = 'cannot lock source repository: %s\n' % err
358 pushop.ui.debug(msg)
358 pushop.ui.debug(msg)
359 try:
359 try:
360 if pushop.locallocked:
360 if pushop.locallocked:
361 pushop.trmanager = transactionmanager(pushop.repo,
361 pushop.trmanager = transactionmanager(pushop.repo,
362 'push-response',
362 'push-response',
363 pushop.remote.url())
363 pushop.remote.url())
364 pushop.repo.checkpush(pushop)
364 pushop.repo.checkpush(pushop)
365 lock = None
365 lock = None
366 unbundle = pushop.remote.capable('unbundle')
366 unbundle = pushop.remote.capable('unbundle')
367 if not unbundle:
367 if not unbundle:
368 lock = pushop.remote.lock()
368 lock = pushop.remote.lock()
369 try:
369 try:
370 _pushdiscovery(pushop)
370 _pushdiscovery(pushop)
371 if _canusebundle2(pushop):
371 if _canusebundle2(pushop):
372 _pushbundle2(pushop)
372 _pushbundle2(pushop)
373 _pushchangeset(pushop)
373 _pushchangeset(pushop)
374 _pushsyncphase(pushop)
374 _pushsyncphase(pushop)
375 _pushobsolete(pushop)
375 _pushobsolete(pushop)
376 _pushbookmark(pushop)
376 _pushbookmark(pushop)
377 finally:
377 finally:
378 if lock is not None:
378 if lock is not None:
379 lock.release()
379 lock.release()
380 if pushop.trmanager:
380 if pushop.trmanager:
381 pushop.trmanager.close()
381 pushop.trmanager.close()
382 finally:
382 finally:
383 if pushop.trmanager:
383 if pushop.trmanager:
384 pushop.trmanager.release()
384 pushop.trmanager.release()
385 if locallock is not None:
385 if locallock is not None:
386 locallock.release()
386 locallock.release()
387 if localwlock is not None:
387 if localwlock is not None:
388 localwlock.release()
388 localwlock.release()
389
389
390 return pushop
390 return pushop
391
391
392 # list of steps to perform discovery before push
392 # list of steps to perform discovery before push
393 pushdiscoveryorder = []
393 pushdiscoveryorder = []
394
394
395 # Mapping between step name and function
395 # Mapping between step name and function
396 #
396 #
397 # This exists to help extensions wrap steps if necessary
397 # This exists to help extensions wrap steps if necessary
398 pushdiscoverymapping = {}
398 pushdiscoverymapping = {}
399
399
400 def pushdiscovery(stepname):
400 def pushdiscovery(stepname):
401 """decorator for function performing discovery before push
401 """decorator for function performing discovery before push
402
402
403 The function is added to the step -> function mapping and appended to the
403 The function is added to the step -> function mapping and appended to the
404 list of steps. Beware that decorated function will be added in order (this
404 list of steps. Beware that decorated function will be added in order (this
405 may matter).
405 may matter).
406
406
407 You can only use this decorator for a new step, if you want to wrap a step
407 You can only use this decorator for a new step, if you want to wrap a step
408 from an extension, change the pushdiscovery dictionary directly."""
408 from an extension, change the pushdiscovery dictionary directly."""
409 def dec(func):
409 def dec(func):
410 assert stepname not in pushdiscoverymapping
410 assert stepname not in pushdiscoverymapping
411 pushdiscoverymapping[stepname] = func
411 pushdiscoverymapping[stepname] = func
412 pushdiscoveryorder.append(stepname)
412 pushdiscoveryorder.append(stepname)
413 return func
413 return func
414 return dec
414 return dec
415
415
416 def _pushdiscovery(pushop):
416 def _pushdiscovery(pushop):
417 """Run all discovery steps"""
417 """Run all discovery steps"""
418 for stepname in pushdiscoveryorder:
418 for stepname in pushdiscoveryorder:
419 step = pushdiscoverymapping[stepname]
419 step = pushdiscoverymapping[stepname]
420 step(pushop)
420 step(pushop)
421
421
422 @pushdiscovery('changeset')
422 @pushdiscovery('changeset')
423 def _pushdiscoverychangeset(pushop):
423 def _pushdiscoverychangeset(pushop):
424 """discover the changeset that need to be pushed"""
424 """discover the changeset that need to be pushed"""
425 fci = discovery.findcommonincoming
425 fci = discovery.findcommonincoming
426 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
426 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
427 common, inc, remoteheads = commoninc
427 common, inc, remoteheads = commoninc
428 fco = discovery.findcommonoutgoing
428 fco = discovery.findcommonoutgoing
429 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
429 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
430 commoninc=commoninc, force=pushop.force)
430 commoninc=commoninc, force=pushop.force)
431 pushop.outgoing = outgoing
431 pushop.outgoing = outgoing
432 pushop.remoteheads = remoteheads
432 pushop.remoteheads = remoteheads
433 pushop.incoming = inc
433 pushop.incoming = inc
434
434
435 @pushdiscovery('phase')
435 @pushdiscovery('phase')
436 def _pushdiscoveryphase(pushop):
436 def _pushdiscoveryphase(pushop):
437 """discover the phase that needs to be pushed
437 """discover the phase that needs to be pushed
438
438
439 (computed for both success and failure case for changesets push)"""
439 (computed for both success and failure case for changesets push)"""
440 outgoing = pushop.outgoing
440 outgoing = pushop.outgoing
441 unfi = pushop.repo.unfiltered()
441 unfi = pushop.repo.unfiltered()
442 remotephases = pushop.remote.listkeys('phases')
442 remotephases = pushop.remote.listkeys('phases')
443 publishing = remotephases.get('publishing', False)
443 publishing = remotephases.get('publishing', False)
444 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
444 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
445 and remotephases # server supports phases
445 and remotephases # server supports phases
446 and not pushop.outgoing.missing # no changesets to be pushed
446 and not pushop.outgoing.missing # no changesets to be pushed
447 and publishing):
447 and publishing):
448 # When:
448 # When:
449 # - this is a subrepo push
449 # - this is a subrepo push
450 # - and remote support phase
450 # - and remote support phase
451 # - and no changeset are to be pushed
451 # - and no changeset are to be pushed
452 # - and remote is publishing
452 # - and remote is publishing
453 # We may be in issue 3871 case!
453 # We may be in issue 3871 case!
454 # We drop the possible phase synchronisation done by
454 # We drop the possible phase synchronisation done by
455 # courtesy to publish changesets possibly locally draft
455 # courtesy to publish changesets possibly locally draft
456 # on the remote.
456 # on the remote.
457 remotephases = {'publishing': 'True'}
457 remotephases = {'publishing': 'True'}
458 ana = phases.analyzeremotephases(pushop.repo,
458 ana = phases.analyzeremotephases(pushop.repo,
459 pushop.fallbackheads,
459 pushop.fallbackheads,
460 remotephases)
460 remotephases)
461 pheads, droots = ana
461 pheads, droots = ana
462 extracond = ''
462 extracond = ''
463 if not publishing:
463 if not publishing:
464 extracond = ' and public()'
464 extracond = ' and public()'
465 revset = 'heads((%%ln::%%ln) %s)' % extracond
465 revset = 'heads((%%ln::%%ln) %s)' % extracond
466 # Get the list of all revs draft on remote by public here.
466 # Get the list of all revs draft on remote by public here.
467 # XXX Beware that revset break if droots is not strictly
467 # XXX Beware that revset break if droots is not strictly
468 # XXX root we may want to ensure it is but it is costly
468 # XXX root we may want to ensure it is but it is costly
469 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
469 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
470 if not outgoing.missing:
470 if not outgoing.missing:
471 future = fallback
471 future = fallback
472 else:
472 else:
473 # adds changeset we are going to push as draft
473 # adds changeset we are going to push as draft
474 #
474 #
475 # should not be necessary for publishing server, but because of an
475 # should not be necessary for publishing server, but because of an
476 # issue fixed in xxxxx we have to do it anyway.
476 # issue fixed in xxxxx we have to do it anyway.
477 fdroots = list(unfi.set('roots(%ln + %ln::)',
477 fdroots = list(unfi.set('roots(%ln + %ln::)',
478 outgoing.missing, droots))
478 outgoing.missing, droots))
479 fdroots = [f.node() for f in fdroots]
479 fdroots = [f.node() for f in fdroots]
480 future = list(unfi.set(revset, fdroots, pushop.futureheads))
480 future = list(unfi.set(revset, fdroots, pushop.futureheads))
481 pushop.outdatedphases = future
481 pushop.outdatedphases = future
482 pushop.fallbackoutdatedphases = fallback
482 pushop.fallbackoutdatedphases = fallback
483
483
484 @pushdiscovery('obsmarker')
484 @pushdiscovery('obsmarker')
485 def _pushdiscoveryobsmarkers(pushop):
485 def _pushdiscoveryobsmarkers(pushop):
486 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
486 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
487 and pushop.repo.obsstore
487 and pushop.repo.obsstore
488 and 'obsolete' in pushop.remote.listkeys('namespaces')):
488 and 'obsolete' in pushop.remote.listkeys('namespaces')):
489 repo = pushop.repo
489 repo = pushop.repo
490 # very naive computation, that can be quite expensive on big repo.
490 # very naive computation, that can be quite expensive on big repo.
491 # However: evolution is currently slow on them anyway.
491 # However: evolution is currently slow on them anyway.
492 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
492 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
493 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
493 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
494
494
495 @pushdiscovery('bookmarks')
495 @pushdiscovery('bookmarks')
496 def _pushdiscoverybookmarks(pushop):
496 def _pushdiscoverybookmarks(pushop):
497 ui = pushop.ui
497 ui = pushop.ui
498 repo = pushop.repo.unfiltered()
498 repo = pushop.repo.unfiltered()
499 remote = pushop.remote
499 remote = pushop.remote
500 ui.debug("checking for updated bookmarks\n")
500 ui.debug("checking for updated bookmarks\n")
501 ancestors = ()
501 ancestors = ()
502 if pushop.revs:
502 if pushop.revs:
503 revnums = map(repo.changelog.rev, pushop.revs)
503 revnums = map(repo.changelog.rev, pushop.revs)
504 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
504 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
505 remotebookmark = remote.listkeys('bookmarks')
505 remotebookmark = remote.listkeys('bookmarks')
506
506
507 explicit = set(pushop.bookmarks)
507 explicit = set(pushop.bookmarks)
508
508
509 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
509 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
510 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
510 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
511 for b, scid, dcid in advsrc:
511 for b, scid, dcid in advsrc:
512 if b in explicit:
512 if b in explicit:
513 explicit.remove(b)
513 explicit.remove(b)
514 if not ancestors or repo[scid].rev() in ancestors:
514 if not ancestors or repo[scid].rev() in ancestors:
515 pushop.outbookmarks.append((b, dcid, scid))
515 pushop.outbookmarks.append((b, dcid, scid))
516 # search added bookmark
516 # search added bookmark
517 for b, scid, dcid in addsrc:
517 for b, scid, dcid in addsrc:
518 if b in explicit:
518 if b in explicit:
519 explicit.remove(b)
519 explicit.remove(b)
520 pushop.outbookmarks.append((b, '', scid))
520 pushop.outbookmarks.append((b, '', scid))
521 # search for overwritten bookmark
521 # search for overwritten bookmark
522 for b, scid, dcid in advdst + diverge + differ:
522 for b, scid, dcid in advdst + diverge + differ:
523 if b in explicit:
523 if b in explicit:
524 explicit.remove(b)
524 explicit.remove(b)
525 pushop.outbookmarks.append((b, dcid, scid))
525 pushop.outbookmarks.append((b, dcid, scid))
526 # search for bookmark to delete
526 # search for bookmark to delete
527 for b, scid, dcid in adddst:
527 for b, scid, dcid in adddst:
528 if b in explicit:
528 if b in explicit:
529 explicit.remove(b)
529 explicit.remove(b)
530 # treat as "deleted locally"
530 # treat as "deleted locally"
531 pushop.outbookmarks.append((b, dcid, ''))
531 pushop.outbookmarks.append((b, dcid, ''))
532 # identical bookmarks shouldn't get reported
532 # identical bookmarks shouldn't get reported
533 for b, scid, dcid in same:
533 for b, scid, dcid in same:
534 if b in explicit:
534 if b in explicit:
535 explicit.remove(b)
535 explicit.remove(b)
536
536
537 if explicit:
537 if explicit:
538 explicit = sorted(explicit)
538 explicit = sorted(explicit)
539 # we should probably list all of them
539 # we should probably list all of them
540 ui.warn(_('bookmark %s does not exist on the local '
540 ui.warn(_('bookmark %s does not exist on the local '
541 'or remote repository!\n') % explicit[0])
541 'or remote repository!\n') % explicit[0])
542 pushop.bkresult = 2
542 pushop.bkresult = 2
543
543
544 pushop.outbookmarks.sort()
544 pushop.outbookmarks.sort()
545
545
546 def _pushcheckoutgoing(pushop):
546 def _pushcheckoutgoing(pushop):
547 outgoing = pushop.outgoing
547 outgoing = pushop.outgoing
548 unfi = pushop.repo.unfiltered()
548 unfi = pushop.repo.unfiltered()
549 if not outgoing.missing:
549 if not outgoing.missing:
550 # nothing to push
550 # nothing to push
551 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
551 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
552 return False
552 return False
553 # something to push
553 # something to push
554 if not pushop.force:
554 if not pushop.force:
555 # if repo.obsstore == False --> no obsolete
555 # if repo.obsstore == False --> no obsolete
556 # then, save the iteration
556 # then, save the iteration
557 if unfi.obsstore:
557 if unfi.obsstore:
558 # this message are here for 80 char limit reason
558 # this message are here for 80 char limit reason
559 mso = _("push includes obsolete changeset: %s!")
559 mso = _("push includes obsolete changeset: %s!")
560 mst = {"unstable": _("push includes unstable changeset: %s!"),
560 mst = {"unstable": _("push includes unstable changeset: %s!"),
561 "bumped": _("push includes bumped changeset: %s!"),
561 "bumped": _("push includes bumped changeset: %s!"),
562 "divergent": _("push includes divergent changeset: %s!")}
562 "divergent": _("push includes divergent changeset: %s!")}
563 # If we are to push if there is at least one
563 # If we are to push if there is at least one
564 # obsolete or unstable changeset in missing, at
564 # obsolete or unstable changeset in missing, at
565 # least one of the missinghead will be obsolete or
565 # least one of the missinghead will be obsolete or
566 # unstable. So checking heads only is ok
566 # unstable. So checking heads only is ok
567 for node in outgoing.missingheads:
567 for node in outgoing.missingheads:
568 ctx = unfi[node]
568 ctx = unfi[node]
569 if ctx.obsolete():
569 if ctx.obsolete():
570 raise error.Abort(mso % ctx)
570 raise error.Abort(mso % ctx)
571 elif ctx.troubled():
571 elif ctx.troubled():
572 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
572 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
573
573
574 # internal config: bookmarks.pushing
574 # internal config: bookmarks.pushing
575 newbm = pushop.ui.configlist('bookmarks', 'pushing')
575 newbm = pushop.ui.configlist('bookmarks', 'pushing')
576 discovery.checkheads(unfi, pushop.remote, outgoing,
576 discovery.checkheads(unfi, pushop.remote, outgoing,
577 pushop.remoteheads,
577 pushop.remoteheads,
578 pushop.newbranch,
578 pushop.newbranch,
579 bool(pushop.incoming),
579 bool(pushop.incoming),
580 newbm)
580 newbm)
581 return True
581 return True
582
582
583 # List of names of steps to perform for an outgoing bundle2, order matters.
583 # List of names of steps to perform for an outgoing bundle2, order matters.
584 b2partsgenorder = []
584 b2partsgenorder = []
585
585
586 # Mapping between step name and function
586 # Mapping between step name and function
587 #
587 #
588 # This exists to help extensions wrap steps if necessary
588 # This exists to help extensions wrap steps if necessary
589 b2partsgenmapping = {}
589 b2partsgenmapping = {}
590
590
591 def b2partsgenerator(stepname, idx=None):
591 def b2partsgenerator(stepname, idx=None):
592 """decorator for function generating bundle2 part
592 """decorator for function generating bundle2 part
593
593
594 The function is added to the step -> function mapping and appended to the
594 The function is added to the step -> function mapping and appended to the
595 list of steps. Beware that decorated functions will be added in order
595 list of steps. Beware that decorated functions will be added in order
596 (this may matter).
596 (this may matter).
597
597
598 You can only use this decorator for new steps, if you want to wrap a step
598 You can only use this decorator for new steps, if you want to wrap a step
599 from an extension, attack the b2partsgenmapping dictionary directly."""
599 from an extension, attack the b2partsgenmapping dictionary directly."""
600 def dec(func):
600 def dec(func):
601 assert stepname not in b2partsgenmapping
601 assert stepname not in b2partsgenmapping
602 b2partsgenmapping[stepname] = func
602 b2partsgenmapping[stepname] = func
603 if idx is None:
603 if idx is None:
604 b2partsgenorder.append(stepname)
604 b2partsgenorder.append(stepname)
605 else:
605 else:
606 b2partsgenorder.insert(idx, stepname)
606 b2partsgenorder.insert(idx, stepname)
607 return func
607 return func
608 return dec
608 return dec
609
609
610 def _pushb2ctxcheckheads(pushop, bundler):
610 def _pushb2ctxcheckheads(pushop, bundler):
611 """Generate race condition checking parts
611 """Generate race condition checking parts
612
612
613 Exists as an independent function to aid extensions
613 Exists as an independent function to aid extensions
614 """
614 """
615 if not pushop.force:
615 if not pushop.force:
616 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
616 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
617
617
618 @b2partsgenerator('changeset')
618 @b2partsgenerator('changeset')
619 def _pushb2ctx(pushop, bundler):
619 def _pushb2ctx(pushop, bundler):
620 """handle changegroup push through bundle2
620 """handle changegroup push through bundle2
621
621
622 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
622 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
623 """
623 """
624 if 'changesets' in pushop.stepsdone:
624 if 'changesets' in pushop.stepsdone:
625 return
625 return
626 pushop.stepsdone.add('changesets')
626 pushop.stepsdone.add('changesets')
627 # Send known heads to the server for race detection.
627 # Send known heads to the server for race detection.
628 if not _pushcheckoutgoing(pushop):
628 if not _pushcheckoutgoing(pushop):
629 return
629 return
630 pushop.repo.prepushoutgoinghooks(pushop.repo,
630 pushop.repo.prepushoutgoinghooks(pushop.repo,
631 pushop.remote,
631 pushop.remote,
632 pushop.outgoing)
632 pushop.outgoing)
633
633
634 _pushb2ctxcheckheads(pushop, bundler)
634 _pushb2ctxcheckheads(pushop, bundler)
635
635
636 b2caps = bundle2.bundle2caps(pushop.remote)
636 b2caps = bundle2.bundle2caps(pushop.remote)
637 version = None
637 version = None
638 cgversions = b2caps.get('changegroup')
638 cgversions = b2caps.get('changegroup')
639 if not cgversions: # 3.1 and 3.2 ship with an empty value
639 if not cgversions: # 3.1 and 3.2 ship with an empty value
640 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
640 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
641 pushop.outgoing)
641 pushop.outgoing)
642 else:
642 else:
643 cgversions = [v for v in cgversions if v in changegroup.packermap]
643 cgversions = [v for v in cgversions if v in changegroup.packermap]
644 if not cgversions:
644 if not cgversions:
645 raise ValueError(_('no common changegroup version'))
645 raise ValueError(_('no common changegroup version'))
646 version = max(cgversions)
646 version = max(cgversions)
647 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
647 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
648 pushop.outgoing,
648 pushop.outgoing,
649 version=version)
649 version=version)
650 cgpart = bundler.newpart('changegroup', data=cg)
650 cgpart = bundler.newpart('changegroup', data=cg)
651 if version is not None:
651 if version is not None:
652 cgpart.addparam('version', version)
652 cgpart.addparam('version', version)
653 def handlereply(op):
653 def handlereply(op):
654 """extract addchangegroup returns from server reply"""
654 """extract addchangegroup returns from server reply"""
655 cgreplies = op.records.getreplies(cgpart.id)
655 cgreplies = op.records.getreplies(cgpart.id)
656 assert len(cgreplies['changegroup']) == 1
656 assert len(cgreplies['changegroup']) == 1
657 pushop.cgresult = cgreplies['changegroup'][0]['return']
657 pushop.cgresult = cgreplies['changegroup'][0]['return']
658 return handlereply
658 return handlereply
659
659
660 @b2partsgenerator('phase')
660 @b2partsgenerator('phase')
661 def _pushb2phases(pushop, bundler):
661 def _pushb2phases(pushop, bundler):
662 """handle phase push through bundle2"""
662 """handle phase push through bundle2"""
663 if 'phases' in pushop.stepsdone:
663 if 'phases' in pushop.stepsdone:
664 return
664 return
665 b2caps = bundle2.bundle2caps(pushop.remote)
665 b2caps = bundle2.bundle2caps(pushop.remote)
666 if not 'pushkey' in b2caps:
666 if not 'pushkey' in b2caps:
667 return
667 return
668 pushop.stepsdone.add('phases')
668 pushop.stepsdone.add('phases')
669 part2node = []
669 part2node = []
670
670
671 def handlefailure(pushop, exc):
671 def handlefailure(pushop, exc):
672 targetid = int(exc.partid)
672 targetid = int(exc.partid)
673 for partid, node in part2node:
673 for partid, node in part2node:
674 if partid == targetid:
674 if partid == targetid:
675 raise error.Abort(_('updating %s to public failed') % node)
675 raise error.Abort(_('updating %s to public failed') % node)
676
676
677 enc = pushkey.encode
677 enc = pushkey.encode
678 for newremotehead in pushop.outdatedphases:
678 for newremotehead in pushop.outdatedphases:
679 part = bundler.newpart('pushkey')
679 part = bundler.newpart('pushkey')
680 part.addparam('namespace', enc('phases'))
680 part.addparam('namespace', enc('phases'))
681 part.addparam('key', enc(newremotehead.hex()))
681 part.addparam('key', enc(newremotehead.hex()))
682 part.addparam('old', enc(str(phases.draft)))
682 part.addparam('old', enc(str(phases.draft)))
683 part.addparam('new', enc(str(phases.public)))
683 part.addparam('new', enc(str(phases.public)))
684 part2node.append((part.id, newremotehead))
684 part2node.append((part.id, newremotehead))
685 pushop.pkfailcb[part.id] = handlefailure
685 pushop.pkfailcb[part.id] = handlefailure
686
686
687 def handlereply(op):
687 def handlereply(op):
688 for partid, node in part2node:
688 for partid, node in part2node:
689 partrep = op.records.getreplies(partid)
689 partrep = op.records.getreplies(partid)
690 results = partrep['pushkey']
690 results = partrep['pushkey']
691 assert len(results) <= 1
691 assert len(results) <= 1
692 msg = None
692 msg = None
693 if not results:
693 if not results:
694 msg = _('server ignored update of %s to public!\n') % node
694 msg = _('server ignored update of %s to public!\n') % node
695 elif not int(results[0]['return']):
695 elif not int(results[0]['return']):
696 msg = _('updating %s to public failed!\n') % node
696 msg = _('updating %s to public failed!\n') % node
697 if msg is not None:
697 if msg is not None:
698 pushop.ui.warn(msg)
698 pushop.ui.warn(msg)
699 return handlereply
699 return handlereply
700
700
701 @b2partsgenerator('obsmarkers')
701 @b2partsgenerator('obsmarkers')
702 def _pushb2obsmarkers(pushop, bundler):
702 def _pushb2obsmarkers(pushop, bundler):
703 if 'obsmarkers' in pushop.stepsdone:
703 if 'obsmarkers' in pushop.stepsdone:
704 return
704 return
705 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
705 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
706 if obsolete.commonversion(remoteversions) is None:
706 if obsolete.commonversion(remoteversions) is None:
707 return
707 return
708 pushop.stepsdone.add('obsmarkers')
708 pushop.stepsdone.add('obsmarkers')
709 if pushop.outobsmarkers:
709 if pushop.outobsmarkers:
710 markers = sorted(pushop.outobsmarkers)
710 markers = sorted(pushop.outobsmarkers)
711 buildobsmarkerspart(bundler, markers)
711 buildobsmarkerspart(bundler, markers)
712
712
713 @b2partsgenerator('bookmarks')
713 @b2partsgenerator('bookmarks')
714 def _pushb2bookmarks(pushop, bundler):
714 def _pushb2bookmarks(pushop, bundler):
715 """handle bookmark push through bundle2"""
715 """handle bookmark push through bundle2"""
716 if 'bookmarks' in pushop.stepsdone:
716 if 'bookmarks' in pushop.stepsdone:
717 return
717 return
718 b2caps = bundle2.bundle2caps(pushop.remote)
718 b2caps = bundle2.bundle2caps(pushop.remote)
719 if 'pushkey' not in b2caps:
719 if 'pushkey' not in b2caps:
720 return
720 return
721 pushop.stepsdone.add('bookmarks')
721 pushop.stepsdone.add('bookmarks')
722 part2book = []
722 part2book = []
723 enc = pushkey.encode
723 enc = pushkey.encode
724
724
725 def handlefailure(pushop, exc):
725 def handlefailure(pushop, exc):
726 targetid = int(exc.partid)
726 targetid = int(exc.partid)
727 for partid, book, action in part2book:
727 for partid, book, action in part2book:
728 if partid == targetid:
728 if partid == targetid:
729 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
729 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
730 # we should not be called for part we did not generated
730 # we should not be called for part we did not generated
731 assert False
731 assert False
732
732
733 for book, old, new in pushop.outbookmarks:
733 for book, old, new in pushop.outbookmarks:
734 part = bundler.newpart('pushkey')
734 part = bundler.newpart('pushkey')
735 part.addparam('namespace', enc('bookmarks'))
735 part.addparam('namespace', enc('bookmarks'))
736 part.addparam('key', enc(book))
736 part.addparam('key', enc(book))
737 part.addparam('old', enc(old))
737 part.addparam('old', enc(old))
738 part.addparam('new', enc(new))
738 part.addparam('new', enc(new))
739 action = 'update'
739 action = 'update'
740 if not old:
740 if not old:
741 action = 'export'
741 action = 'export'
742 elif not new:
742 elif not new:
743 action = 'delete'
743 action = 'delete'
744 part2book.append((part.id, book, action))
744 part2book.append((part.id, book, action))
745 pushop.pkfailcb[part.id] = handlefailure
745 pushop.pkfailcb[part.id] = handlefailure
746
746
747 def handlereply(op):
747 def handlereply(op):
748 ui = pushop.ui
748 ui = pushop.ui
749 for partid, book, action in part2book:
749 for partid, book, action in part2book:
750 partrep = op.records.getreplies(partid)
750 partrep = op.records.getreplies(partid)
751 results = partrep['pushkey']
751 results = partrep['pushkey']
752 assert len(results) <= 1
752 assert len(results) <= 1
753 if not results:
753 if not results:
754 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
754 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
755 else:
755 else:
756 ret = int(results[0]['return'])
756 ret = int(results[0]['return'])
757 if ret:
757 if ret:
758 ui.status(bookmsgmap[action][0] % book)
758 ui.status(bookmsgmap[action][0] % book)
759 else:
759 else:
760 ui.warn(bookmsgmap[action][1] % book)
760 ui.warn(bookmsgmap[action][1] % book)
761 if pushop.bkresult is not None:
761 if pushop.bkresult is not None:
762 pushop.bkresult = 1
762 pushop.bkresult = 1
763 return handlereply
763 return handlereply
764
764
765
765
766 def _pushbundle2(pushop):
766 def _pushbundle2(pushop):
767 """push data to the remote using bundle2
767 """push data to the remote using bundle2
768
768
769 The only currently supported type of data is changegroup but this will
769 The only currently supported type of data is changegroup but this will
770 evolve in the future."""
770 evolve in the future."""
771 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
771 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
772 pushback = (pushop.trmanager
772 pushback = (pushop.trmanager
773 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
773 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
774
774
775 # create reply capability
775 # create reply capability
776 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
776 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
777 allowpushback=pushback))
777 allowpushback=pushback))
778 bundler.newpart('replycaps', data=capsblob)
778 bundler.newpart('replycaps', data=capsblob)
779 replyhandlers = []
779 replyhandlers = []
780 for partgenname in b2partsgenorder:
780 for partgenname in b2partsgenorder:
781 partgen = b2partsgenmapping[partgenname]
781 partgen = b2partsgenmapping[partgenname]
782 ret = partgen(pushop, bundler)
782 ret = partgen(pushop, bundler)
783 if callable(ret):
783 if callable(ret):
784 replyhandlers.append(ret)
784 replyhandlers.append(ret)
785 # do not push if nothing to push
785 # do not push if nothing to push
786 if bundler.nbparts <= 1:
786 if bundler.nbparts <= 1:
787 return
787 return
788 stream = util.chunkbuffer(bundler.getchunks())
788 stream = util.chunkbuffer(bundler.getchunks())
789 try:
789 try:
790 try:
790 try:
791 reply = pushop.remote.unbundle(stream, ['force'], 'push')
791 reply = pushop.remote.unbundle(stream, ['force'], 'push')
792 except error.BundleValueError as exc:
792 except error.BundleValueError as exc:
793 raise error.Abort('missing support for %s' % exc)
793 raise error.Abort('missing support for %s' % exc)
794 try:
794 try:
795 trgetter = None
795 trgetter = None
796 if pushback:
796 if pushback:
797 trgetter = pushop.trmanager.transaction
797 trgetter = pushop.trmanager.transaction
798 op = bundle2.processbundle(pushop.repo, reply, trgetter)
798 op = bundle2.processbundle(pushop.repo, reply, trgetter)
799 except error.BundleValueError as exc:
799 except error.BundleValueError as exc:
800 raise error.Abort('missing support for %s' % exc)
800 raise error.Abort('missing support for %s' % exc)
801 except bundle2.AbortFromPart as exc:
801 except bundle2.AbortFromPart as exc:
802 pushop.ui.status(_('remote: %s\n') % exc)
802 pushop.ui.status(_('remote: %s\n') % exc)
803 raise error.Abort(_('push failed on remote'), hint=exc.hint)
803 raise error.Abort(_('push failed on remote'), hint=exc.hint)
804 except error.PushkeyFailed as exc:
804 except error.PushkeyFailed as exc:
805 partid = int(exc.partid)
805 partid = int(exc.partid)
806 if partid not in pushop.pkfailcb:
806 if partid not in pushop.pkfailcb:
807 raise
807 raise
808 pushop.pkfailcb[partid](pushop, exc)
808 pushop.pkfailcb[partid](pushop, exc)
809 for rephand in replyhandlers:
809 for rephand in replyhandlers:
810 rephand(op)
810 rephand(op)
811
811
812 def _pushchangeset(pushop):
812 def _pushchangeset(pushop):
813 """Make the actual push of changeset bundle to remote repo"""
813 """Make the actual push of changeset bundle to remote repo"""
814 if 'changesets' in pushop.stepsdone:
814 if 'changesets' in pushop.stepsdone:
815 return
815 return
816 pushop.stepsdone.add('changesets')
816 pushop.stepsdone.add('changesets')
817 if not _pushcheckoutgoing(pushop):
817 if not _pushcheckoutgoing(pushop):
818 return
818 return
819 pushop.repo.prepushoutgoinghooks(pushop.repo,
819 pushop.repo.prepushoutgoinghooks(pushop.repo,
820 pushop.remote,
820 pushop.remote,
821 pushop.outgoing)
821 pushop.outgoing)
822 outgoing = pushop.outgoing
822 outgoing = pushop.outgoing
823 unbundle = pushop.remote.capable('unbundle')
823 unbundle = pushop.remote.capable('unbundle')
824 # TODO: get bundlecaps from remote
824 # TODO: get bundlecaps from remote
825 bundlecaps = None
825 bundlecaps = None
826 # create a changegroup from local
826 # create a changegroup from local
827 if pushop.revs is None and not (outgoing.excluded
827 if pushop.revs is None and not (outgoing.excluded
828 or pushop.repo.changelog.filteredrevs):
828 or pushop.repo.changelog.filteredrevs):
829 # push everything,
829 # push everything,
830 # use the fast path, no race possible on push
830 # use the fast path, no race possible on push
831 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
831 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
832 cg = changegroup.getsubset(pushop.repo,
832 cg = changegroup.getsubset(pushop.repo,
833 outgoing,
833 outgoing,
834 bundler,
834 bundler,
835 'push',
835 'push',
836 fastpath=True)
836 fastpath=True)
837 else:
837 else:
838 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
838 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
839 bundlecaps)
839 bundlecaps)
840
840
841 # apply changegroup to remote
841 # apply changegroup to remote
842 if unbundle:
842 if unbundle:
843 # local repo finds heads on server, finds out what
843 # local repo finds heads on server, finds out what
844 # revs it must push. once revs transferred, if server
844 # revs it must push. once revs transferred, if server
845 # finds it has different heads (someone else won
845 # finds it has different heads (someone else won
846 # commit/push race), server aborts.
846 # commit/push race), server aborts.
847 if pushop.force:
847 if pushop.force:
848 remoteheads = ['force']
848 remoteheads = ['force']
849 else:
849 else:
850 remoteheads = pushop.remoteheads
850 remoteheads = pushop.remoteheads
851 # ssh: return remote's addchangegroup()
851 # ssh: return remote's addchangegroup()
852 # http: return remote's addchangegroup() or 0 for error
852 # http: return remote's addchangegroup() or 0 for error
853 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
853 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
854 pushop.repo.url())
854 pushop.repo.url())
855 else:
855 else:
856 # we return an integer indicating remote head count
856 # we return an integer indicating remote head count
857 # change
857 # change
858 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
858 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
859 pushop.repo.url())
859 pushop.repo.url())
860
860
861 def _pushsyncphase(pushop):
861 def _pushsyncphase(pushop):
862 """synchronise phase information locally and remotely"""
862 """synchronise phase information locally and remotely"""
863 cheads = pushop.commonheads
863 cheads = pushop.commonheads
864 # even when we don't push, exchanging phase data is useful
864 # even when we don't push, exchanging phase data is useful
865 remotephases = pushop.remote.listkeys('phases')
865 remotephases = pushop.remote.listkeys('phases')
866 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
866 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
867 and remotephases # server supports phases
867 and remotephases # server supports phases
868 and pushop.cgresult is None # nothing was pushed
868 and pushop.cgresult is None # nothing was pushed
869 and remotephases.get('publishing', False)):
869 and remotephases.get('publishing', False)):
870 # When:
870 # When:
871 # - this is a subrepo push
871 # - this is a subrepo push
872 # - and remote support phase
872 # - and remote support phase
873 # - and no changeset was pushed
873 # - and no changeset was pushed
874 # - and remote is publishing
874 # - and remote is publishing
875 # We may be in issue 3871 case!
875 # We may be in issue 3871 case!
876 # We drop the possible phase synchronisation done by
876 # We drop the possible phase synchronisation done by
877 # courtesy to publish changesets possibly locally draft
877 # courtesy to publish changesets possibly locally draft
878 # on the remote.
878 # on the remote.
879 remotephases = {'publishing': 'True'}
879 remotephases = {'publishing': 'True'}
880 if not remotephases: # old server or public only reply from non-publishing
880 if not remotephases: # old server or public only reply from non-publishing
881 _localphasemove(pushop, cheads)
881 _localphasemove(pushop, cheads)
882 # don't push any phase data as there is nothing to push
882 # don't push any phase data as there is nothing to push
883 else:
883 else:
884 ana = phases.analyzeremotephases(pushop.repo, cheads,
884 ana = phases.analyzeremotephases(pushop.repo, cheads,
885 remotephases)
885 remotephases)
886 pheads, droots = ana
886 pheads, droots = ana
887 ### Apply remote phase on local
887 ### Apply remote phase on local
888 if remotephases.get('publishing', False):
888 if remotephases.get('publishing', False):
889 _localphasemove(pushop, cheads)
889 _localphasemove(pushop, cheads)
890 else: # publish = False
890 else: # publish = False
891 _localphasemove(pushop, pheads)
891 _localphasemove(pushop, pheads)
892 _localphasemove(pushop, cheads, phases.draft)
892 _localphasemove(pushop, cheads, phases.draft)
893 ### Apply local phase on remote
893 ### Apply local phase on remote
894
894
895 if pushop.cgresult:
895 if pushop.cgresult:
896 if 'phases' in pushop.stepsdone:
896 if 'phases' in pushop.stepsdone:
897 # phases already pushed though bundle2
897 # phases already pushed though bundle2
898 return
898 return
899 outdated = pushop.outdatedphases
899 outdated = pushop.outdatedphases
900 else:
900 else:
901 outdated = pushop.fallbackoutdatedphases
901 outdated = pushop.fallbackoutdatedphases
902
902
903 pushop.stepsdone.add('phases')
903 pushop.stepsdone.add('phases')
904
904
905 # filter heads already turned public by the push
905 # filter heads already turned public by the push
906 outdated = [c for c in outdated if c.node() not in pheads]
906 outdated = [c for c in outdated if c.node() not in pheads]
907 # fallback to independent pushkey command
907 # fallback to independent pushkey command
908 for newremotehead in outdated:
908 for newremotehead in outdated:
909 r = pushop.remote.pushkey('phases',
909 r = pushop.remote.pushkey('phases',
910 newremotehead.hex(),
910 newremotehead.hex(),
911 str(phases.draft),
911 str(phases.draft),
912 str(phases.public))
912 str(phases.public))
913 if not r:
913 if not r:
914 pushop.ui.warn(_('updating %s to public failed!\n')
914 pushop.ui.warn(_('updating %s to public failed!\n')
915 % newremotehead)
915 % newremotehead)
916
916
917 def _localphasemove(pushop, nodes, phase=phases.public):
917 def _localphasemove(pushop, nodes, phase=phases.public):
918 """move <nodes> to <phase> in the local source repo"""
918 """move <nodes> to <phase> in the local source repo"""
919 if pushop.trmanager:
919 if pushop.trmanager:
920 phases.advanceboundary(pushop.repo,
920 phases.advanceboundary(pushop.repo,
921 pushop.trmanager.transaction(),
921 pushop.trmanager.transaction(),
922 phase,
922 phase,
923 nodes)
923 nodes)
924 else:
924 else:
925 # repo is not locked, do not change any phases!
925 # repo is not locked, do not change any phases!
926 # Informs the user that phases should have been moved when
926 # Informs the user that phases should have been moved when
927 # applicable.
927 # applicable.
928 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
928 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
929 phasestr = phases.phasenames[phase]
929 phasestr = phases.phasenames[phase]
930 if actualmoves:
930 if actualmoves:
931 pushop.ui.status(_('cannot lock source repo, skipping '
931 pushop.ui.status(_('cannot lock source repo, skipping '
932 'local %s phase update\n') % phasestr)
932 'local %s phase update\n') % phasestr)
933
933
934 def _pushobsolete(pushop):
934 def _pushobsolete(pushop):
935 """utility function to push obsolete markers to a remote"""
935 """utility function to push obsolete markers to a remote"""
936 if 'obsmarkers' in pushop.stepsdone:
936 if 'obsmarkers' in pushop.stepsdone:
937 return
937 return
938 repo = pushop.repo
938 repo = pushop.repo
939 remote = pushop.remote
939 remote = pushop.remote
940 pushop.stepsdone.add('obsmarkers')
940 pushop.stepsdone.add('obsmarkers')
941 if pushop.outobsmarkers:
941 if pushop.outobsmarkers:
942 pushop.ui.debug('try to push obsolete markers to remote\n')
942 pushop.ui.debug('try to push obsolete markers to remote\n')
943 rslts = []
943 rslts = []
944 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
944 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
945 for key in sorted(remotedata, reverse=True):
945 for key in sorted(remotedata, reverse=True):
946 # reverse sort to ensure we end with dump0
946 # reverse sort to ensure we end with dump0
947 data = remotedata[key]
947 data = remotedata[key]
948 rslts.append(remote.pushkey('obsolete', key, '', data))
948 rslts.append(remote.pushkey('obsolete', key, '', data))
949 if [r for r in rslts if not r]:
949 if [r for r in rslts if not r]:
950 msg = _('failed to push some obsolete markers!\n')
950 msg = _('failed to push some obsolete markers!\n')
951 repo.ui.warn(msg)
951 repo.ui.warn(msg)
952
952
953 def _pushbookmark(pushop):
953 def _pushbookmark(pushop):
954 """Update bookmark position on remote"""
954 """Update bookmark position on remote"""
955 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
955 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
956 return
956 return
957 pushop.stepsdone.add('bookmarks')
957 pushop.stepsdone.add('bookmarks')
958 ui = pushop.ui
958 ui = pushop.ui
959 remote = pushop.remote
959 remote = pushop.remote
960
960
961 for b, old, new in pushop.outbookmarks:
961 for b, old, new in pushop.outbookmarks:
962 action = 'update'
962 action = 'update'
963 if not old:
963 if not old:
964 action = 'export'
964 action = 'export'
965 elif not new:
965 elif not new:
966 action = 'delete'
966 action = 'delete'
967 if remote.pushkey('bookmarks', b, old, new):
967 if remote.pushkey('bookmarks', b, old, new):
968 ui.status(bookmsgmap[action][0] % b)
968 ui.status(bookmsgmap[action][0] % b)
969 else:
969 else:
970 ui.warn(bookmsgmap[action][1] % b)
970 ui.warn(bookmsgmap[action][1] % b)
971 # discovery can have set the value form invalid entry
971 # discovery can have set the value form invalid entry
972 if pushop.bkresult is not None:
972 if pushop.bkresult is not None:
973 pushop.bkresult = 1
973 pushop.bkresult = 1
974
974
975 class pulloperation(object):
975 class pulloperation(object):
976 """A object that represent a single pull operation
976 """A object that represent a single pull operation
977
977
978 It purpose is to carry pull related state and very common operation.
978 It purpose is to carry pull related state and very common operation.
979
979
980 A new should be created at the beginning of each pull and discarded
980 A new should be created at the beginning of each pull and discarded
981 afterward.
981 afterward.
982 """
982 """
983
983
984 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
984 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
985 remotebookmarks=None, streamclonerequested=None):
985 remotebookmarks=None, streamclonerequested=None):
986 # repo we pull into
986 # repo we pull into
987 self.repo = repo
987 self.repo = repo
988 # repo we pull from
988 # repo we pull from
989 self.remote = remote
989 self.remote = remote
990 # revision we try to pull (None is "all")
990 # revision we try to pull (None is "all")
991 self.heads = heads
991 self.heads = heads
992 # bookmark pulled explicitly
992 # bookmark pulled explicitly
993 self.explicitbookmarks = bookmarks
993 self.explicitbookmarks = bookmarks
994 # do we force pull?
994 # do we force pull?
995 self.force = force
995 self.force = force
996 # whether a streaming clone was requested
996 # whether a streaming clone was requested
997 self.streamclonerequested = streamclonerequested
997 self.streamclonerequested = streamclonerequested
998 # transaction manager
998 # transaction manager
999 self.trmanager = None
999 self.trmanager = None
1000 # set of common changeset between local and remote before pull
1000 # set of common changeset between local and remote before pull
1001 self.common = None
1001 self.common = None
1002 # set of pulled head
1002 # set of pulled head
1003 self.rheads = None
1003 self.rheads = None
1004 # list of missing changeset to fetch remotely
1004 # list of missing changeset to fetch remotely
1005 self.fetch = None
1005 self.fetch = None
1006 # remote bookmarks data
1006 # remote bookmarks data
1007 self.remotebookmarks = remotebookmarks
1007 self.remotebookmarks = remotebookmarks
1008 # result of changegroup pulling (used as return code by pull)
1008 # result of changegroup pulling (used as return code by pull)
1009 self.cgresult = None
1009 self.cgresult = None
1010 # list of step already done
1010 # list of step already done
1011 self.stepsdone = set()
1011 self.stepsdone = set()
1012 # Whether we attempted a clone from pre-generated bundles.
1012 # Whether we attempted a clone from pre-generated bundles.
1013 self.clonebundleattempted = False
1013 self.clonebundleattempted = False
1014
1014
1015 @util.propertycache
1015 @util.propertycache
1016 def pulledsubset(self):
1016 def pulledsubset(self):
1017 """heads of the set of changeset target by the pull"""
1017 """heads of the set of changeset target by the pull"""
1018 # compute target subset
1018 # compute target subset
1019 if self.heads is None:
1019 if self.heads is None:
1020 # We pulled every thing possible
1020 # We pulled every thing possible
1021 # sync on everything common
1021 # sync on everything common
1022 c = set(self.common)
1022 c = set(self.common)
1023 ret = list(self.common)
1023 ret = list(self.common)
1024 for n in self.rheads:
1024 for n in self.rheads:
1025 if n not in c:
1025 if n not in c:
1026 ret.append(n)
1026 ret.append(n)
1027 return ret
1027 return ret
1028 else:
1028 else:
1029 # We pulled a specific subset
1029 # We pulled a specific subset
1030 # sync on this subset
1030 # sync on this subset
1031 return self.heads
1031 return self.heads
1032
1032
1033 @util.propertycache
1033 @util.propertycache
1034 def canusebundle2(self):
1034 def canusebundle2(self):
1035 return _canusebundle2(self)
1035 return _canusebundle2(self)
1036
1036
1037 @util.propertycache
1037 @util.propertycache
1038 def remotebundle2caps(self):
1038 def remotebundle2caps(self):
1039 return bundle2.bundle2caps(self.remote)
1039 return bundle2.bundle2caps(self.remote)
1040
1040
1041 def gettransaction(self):
1041 def gettransaction(self):
1042 # deprecated; talk to trmanager directly
1042 # deprecated; talk to trmanager directly
1043 return self.trmanager.transaction()
1043 return self.trmanager.transaction()
1044
1044
1045 class transactionmanager(object):
1045 class transactionmanager(object):
1046 """An object to manage the life cycle of a transaction
1046 """An object to manage the life cycle of a transaction
1047
1047
1048 It creates the transaction on demand and calls the appropriate hooks when
1048 It creates the transaction on demand and calls the appropriate hooks when
1049 closing the transaction."""
1049 closing the transaction."""
1050 def __init__(self, repo, source, url):
1050 def __init__(self, repo, source, url):
1051 self.repo = repo
1051 self.repo = repo
1052 self.source = source
1052 self.source = source
1053 self.url = url
1053 self.url = url
1054 self._tr = None
1054 self._tr = None
1055
1055
1056 def transaction(self):
1056 def transaction(self):
1057 """Return an open transaction object, constructing if necessary"""
1057 """Return an open transaction object, constructing if necessary"""
1058 if not self._tr:
1058 if not self._tr:
1059 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1059 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1060 self._tr = self.repo.transaction(trname)
1060 self._tr = self.repo.transaction(trname)
1061 self._tr.hookargs['source'] = self.source
1061 self._tr.hookargs['source'] = self.source
1062 self._tr.hookargs['url'] = self.url
1062 self._tr.hookargs['url'] = self.url
1063 return self._tr
1063 return self._tr
1064
1064
1065 def close(self):
1065 def close(self):
1066 """close transaction if created"""
1066 """close transaction if created"""
1067 if self._tr is not None:
1067 if self._tr is not None:
1068 self._tr.close()
1068 self._tr.close()
1069
1069
1070 def release(self):
1070 def release(self):
1071 """release transaction if created"""
1071 """release transaction if created"""
1072 if self._tr is not None:
1072 if self._tr is not None:
1073 self._tr.release()
1073 self._tr.release()
1074
1074
1075 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1075 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1076 streamclonerequested=None):
1076 streamclonerequested=None):
1077 """Fetch repository data from a remote.
1077 """Fetch repository data from a remote.
1078
1078
1079 This is the main function used to retrieve data from a remote repository.
1079 This is the main function used to retrieve data from a remote repository.
1080
1080
1081 ``repo`` is the local repository to clone into.
1081 ``repo`` is the local repository to clone into.
1082 ``remote`` is a peer instance.
1082 ``remote`` is a peer instance.
1083 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1083 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1084 default) means to pull everything from the remote.
1084 default) means to pull everything from the remote.
1085 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1085 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1086 default, all remote bookmarks are pulled.
1086 default, all remote bookmarks are pulled.
1087 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1087 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1088 initialization.
1088 initialization.
1089 ``streamclonerequested`` is a boolean indicating whether a "streaming
1089 ``streamclonerequested`` is a boolean indicating whether a "streaming
1090 clone" is requested. A "streaming clone" is essentially a raw file copy
1090 clone" is requested. A "streaming clone" is essentially a raw file copy
1091 of revlogs from the server. This only works when the local repository is
1091 of revlogs from the server. This only works when the local repository is
1092 empty. The default value of ``None`` means to respect the server
1092 empty. The default value of ``None`` means to respect the server
1093 configuration for preferring stream clones.
1093 configuration for preferring stream clones.
1094
1094
1095 Returns the ``pulloperation`` created for this pull.
1095 Returns the ``pulloperation`` created for this pull.
1096 """
1096 """
1097 if opargs is None:
1097 if opargs is None:
1098 opargs = {}
1098 opargs = {}
1099 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1099 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1100 streamclonerequested=streamclonerequested, **opargs)
1100 streamclonerequested=streamclonerequested, **opargs)
1101 if pullop.remote.local():
1101 if pullop.remote.local():
1102 missing = set(pullop.remote.requirements) - pullop.repo.supported
1102 missing = set(pullop.remote.requirements) - pullop.repo.supported
1103 if missing:
1103 if missing:
1104 msg = _("required features are not"
1104 msg = _("required features are not"
1105 " supported in the destination:"
1105 " supported in the destination:"
1106 " %s") % (', '.join(sorted(missing)))
1106 " %s") % (', '.join(sorted(missing)))
1107 raise error.Abort(msg)
1107 raise error.Abort(msg)
1108
1108
1109 lock = pullop.repo.lock()
1109 lock = pullop.repo.lock()
1110 try:
1110 try:
1111 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1111 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1112 streamclone.maybeperformlegacystreamclone(pullop)
1112 streamclone.maybeperformlegacystreamclone(pullop)
1113 # This should ideally be in _pullbundle2(). However, it needs to run
1113 # This should ideally be in _pullbundle2(). However, it needs to run
1114 # before discovery to avoid extra work.
1114 # before discovery to avoid extra work.
1115 _maybeapplyclonebundle(pullop)
1115 _maybeapplyclonebundle(pullop)
1116 _pulldiscovery(pullop)
1116 _pulldiscovery(pullop)
1117 if pullop.canusebundle2:
1117 if pullop.canusebundle2:
1118 _pullbundle2(pullop)
1118 _pullbundle2(pullop)
1119 _pullchangeset(pullop)
1119 _pullchangeset(pullop)
1120 _pullphase(pullop)
1120 _pullphase(pullop)
1121 _pullbookmarks(pullop)
1121 _pullbookmarks(pullop)
1122 _pullobsolete(pullop)
1122 _pullobsolete(pullop)
1123 pullop.trmanager.close()
1123 pullop.trmanager.close()
1124 finally:
1124 finally:
1125 pullop.trmanager.release()
1125 pullop.trmanager.release()
1126 lock.release()
1126 lock.release()
1127
1127
1128 return pullop
1128 return pullop
1129
1129
1130 # list of steps to perform discovery before pull
1130 # list of steps to perform discovery before pull
1131 pulldiscoveryorder = []
1131 pulldiscoveryorder = []
1132
1132
1133 # Mapping between step name and function
1133 # Mapping between step name and function
1134 #
1134 #
1135 # This exists to help extensions wrap steps if necessary
1135 # This exists to help extensions wrap steps if necessary
1136 pulldiscoverymapping = {}
1136 pulldiscoverymapping = {}
1137
1137
1138 def pulldiscovery(stepname):
1138 def pulldiscovery(stepname):
1139 """decorator for function performing discovery before pull
1139 """decorator for function performing discovery before pull
1140
1140
1141 The function is added to the step -> function mapping and appended to the
1141 The function is added to the step -> function mapping and appended to the
1142 list of steps. Beware that decorated function will be added in order (this
1142 list of steps. Beware that decorated function will be added in order (this
1143 may matter).
1143 may matter).
1144
1144
1145 You can only use this decorator for a new step, if you want to wrap a step
1145 You can only use this decorator for a new step, if you want to wrap a step
1146 from an extension, change the pulldiscovery dictionary directly."""
1146 from an extension, change the pulldiscovery dictionary directly."""
1147 def dec(func):
1147 def dec(func):
1148 assert stepname not in pulldiscoverymapping
1148 assert stepname not in pulldiscoverymapping
1149 pulldiscoverymapping[stepname] = func
1149 pulldiscoverymapping[stepname] = func
1150 pulldiscoveryorder.append(stepname)
1150 pulldiscoveryorder.append(stepname)
1151 return func
1151 return func
1152 return dec
1152 return dec
1153
1153
1154 def _pulldiscovery(pullop):
1154 def _pulldiscovery(pullop):
1155 """Run all discovery steps"""
1155 """Run all discovery steps"""
1156 for stepname in pulldiscoveryorder:
1156 for stepname in pulldiscoveryorder:
1157 step = pulldiscoverymapping[stepname]
1157 step = pulldiscoverymapping[stepname]
1158 step(pullop)
1158 step(pullop)
1159
1159
1160 @pulldiscovery('b1:bookmarks')
1160 @pulldiscovery('b1:bookmarks')
1161 def _pullbookmarkbundle1(pullop):
1161 def _pullbookmarkbundle1(pullop):
1162 """fetch bookmark data in bundle1 case
1162 """fetch bookmark data in bundle1 case
1163
1163
1164 If not using bundle2, we have to fetch bookmarks before changeset
1164 If not using bundle2, we have to fetch bookmarks before changeset
1165 discovery to reduce the chance and impact of race conditions."""
1165 discovery to reduce the chance and impact of race conditions."""
1166 if pullop.remotebookmarks is not None:
1166 if pullop.remotebookmarks is not None:
1167 return
1167 return
1168 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1168 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1169 # all known bundle2 servers now support listkeys, but lets be nice with
1169 # all known bundle2 servers now support listkeys, but lets be nice with
1170 # new implementation.
1170 # new implementation.
1171 return
1171 return
1172 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1172 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1173
1173
1174
1174
1175 @pulldiscovery('changegroup')
1175 @pulldiscovery('changegroup')
1176 def _pulldiscoverychangegroup(pullop):
1176 def _pulldiscoverychangegroup(pullop):
1177 """discovery phase for the pull
1177 """discovery phase for the pull
1178
1178
1179 Current handle changeset discovery only, will change handle all discovery
1179 Current handle changeset discovery only, will change handle all discovery
1180 at some point."""
1180 at some point."""
1181 tmp = discovery.findcommonincoming(pullop.repo,
1181 tmp = discovery.findcommonincoming(pullop.repo,
1182 pullop.remote,
1182 pullop.remote,
1183 heads=pullop.heads,
1183 heads=pullop.heads,
1184 force=pullop.force)
1184 force=pullop.force)
1185 common, fetch, rheads = tmp
1185 common, fetch, rheads = tmp
1186 nm = pullop.repo.unfiltered().changelog.nodemap
1186 nm = pullop.repo.unfiltered().changelog.nodemap
1187 if fetch and rheads:
1187 if fetch and rheads:
1188 # If a remote heads in filtered locally, lets drop it from the unknown
1188 # If a remote heads in filtered locally, lets drop it from the unknown
1189 # remote heads and put in back in common.
1189 # remote heads and put in back in common.
1190 #
1190 #
1191 # This is a hackish solution to catch most of "common but locally
1191 # This is a hackish solution to catch most of "common but locally
1192 # hidden situation". We do not performs discovery on unfiltered
1192 # hidden situation". We do not performs discovery on unfiltered
1193 # repository because it end up doing a pathological amount of round
1193 # repository because it end up doing a pathological amount of round
1194 # trip for w huge amount of changeset we do not care about.
1194 # trip for w huge amount of changeset we do not care about.
1195 #
1195 #
1196 # If a set of such "common but filtered" changeset exist on the server
1196 # If a set of such "common but filtered" changeset exist on the server
1197 # but are not including a remote heads, we'll not be able to detect it,
1197 # but are not including a remote heads, we'll not be able to detect it,
1198 scommon = set(common)
1198 scommon = set(common)
1199 filteredrheads = []
1199 filteredrheads = []
1200 for n in rheads:
1200 for n in rheads:
1201 if n in nm:
1201 if n in nm:
1202 if n not in scommon:
1202 if n not in scommon:
1203 common.append(n)
1203 common.append(n)
1204 else:
1204 else:
1205 filteredrheads.append(n)
1205 filteredrheads.append(n)
1206 if not filteredrheads:
1206 if not filteredrheads:
1207 fetch = []
1207 fetch = []
1208 rheads = filteredrheads
1208 rheads = filteredrheads
1209 pullop.common = common
1209 pullop.common = common
1210 pullop.fetch = fetch
1210 pullop.fetch = fetch
1211 pullop.rheads = rheads
1211 pullop.rheads = rheads
1212
1212
1213 def _pullbundle2(pullop):
1213 def _pullbundle2(pullop):
1214 """pull data using bundle2
1214 """pull data using bundle2
1215
1215
1216 For now, the only supported data are changegroup."""
1216 For now, the only supported data are changegroup."""
1217 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1217 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1218
1218
1219 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1219 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1220
1220
1221 # pulling changegroup
1221 # pulling changegroup
1222 pullop.stepsdone.add('changegroup')
1222 pullop.stepsdone.add('changegroup')
1223
1223
1224 kwargs['common'] = pullop.common
1224 kwargs['common'] = pullop.common
1225 kwargs['heads'] = pullop.heads or pullop.rheads
1225 kwargs['heads'] = pullop.heads or pullop.rheads
1226 kwargs['cg'] = pullop.fetch
1226 kwargs['cg'] = pullop.fetch
1227 if 'listkeys' in pullop.remotebundle2caps:
1227 if 'listkeys' in pullop.remotebundle2caps:
1228 kwargs['listkeys'] = ['phase']
1228 kwargs['listkeys'] = ['phase']
1229 if pullop.remotebookmarks is None:
1229 if pullop.remotebookmarks is None:
1230 # make sure to always includes bookmark data when migrating
1230 # make sure to always includes bookmark data when migrating
1231 # `hg incoming --bundle` to using this function.
1231 # `hg incoming --bundle` to using this function.
1232 kwargs['listkeys'].append('bookmarks')
1232 kwargs['listkeys'].append('bookmarks')
1233
1233
1234 # If this is a full pull / clone and the server supports the clone bundles
1234 # If this is a full pull / clone and the server supports the clone bundles
1235 # feature, tell the server whether we attempted a clone bundle. The
1235 # feature, tell the server whether we attempted a clone bundle. The
1236 # presence of this flag indicates the client supports clone bundles. This
1236 # presence of this flag indicates the client supports clone bundles. This
1237 # will enable the server to treat clients that support clone bundles
1237 # will enable the server to treat clients that support clone bundles
1238 # differently from those that don't.
1238 # differently from those that don't.
1239 if (pullop.remote.capable('clonebundles')
1239 if (pullop.remote.capable('clonebundles')
1240 and pullop.heads is None and list(pullop.common) == [nullid]):
1240 and pullop.heads is None and list(pullop.common) == [nullid]):
1241 kwargs['cbattempted'] = pullop.clonebundleattempted
1241 kwargs['cbattempted'] = pullop.clonebundleattempted
1242
1242
1243 if streaming:
1243 if streaming:
1244 pullop.repo.ui.status(_('streaming all changes\n'))
1244 pullop.repo.ui.status(_('streaming all changes\n'))
1245 elif not pullop.fetch:
1245 elif not pullop.fetch:
1246 pullop.repo.ui.status(_("no changes found\n"))
1246 pullop.repo.ui.status(_("no changes found\n"))
1247 pullop.cgresult = 0
1247 pullop.cgresult = 0
1248 else:
1248 else:
1249 if pullop.heads is None and list(pullop.common) == [nullid]:
1249 if pullop.heads is None and list(pullop.common) == [nullid]:
1250 pullop.repo.ui.status(_("requesting all changes\n"))
1250 pullop.repo.ui.status(_("requesting all changes\n"))
1251 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1251 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1252 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1252 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1253 if obsolete.commonversion(remoteversions) is not None:
1253 if obsolete.commonversion(remoteversions) is not None:
1254 kwargs['obsmarkers'] = True
1254 kwargs['obsmarkers'] = True
1255 pullop.stepsdone.add('obsmarkers')
1255 pullop.stepsdone.add('obsmarkers')
1256 _pullbundle2extraprepare(pullop, kwargs)
1256 _pullbundle2extraprepare(pullop, kwargs)
1257 bundle = pullop.remote.getbundle('pull', **kwargs)
1257 bundle = pullop.remote.getbundle('pull', **kwargs)
1258 try:
1258 try:
1259 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1259 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1260 except error.BundleValueError as exc:
1260 except error.BundleValueError as exc:
1261 raise error.Abort('missing support for %s' % exc)
1261 raise error.Abort('missing support for %s' % exc)
1262
1262
1263 if pullop.fetch:
1263 if pullop.fetch:
1264 results = [cg['return'] for cg in op.records['changegroup']]
1264 results = [cg['return'] for cg in op.records['changegroup']]
1265 pullop.cgresult = changegroup.combineresults(results)
1265 pullop.cgresult = changegroup.combineresults(results)
1266
1266
1267 # processing phases change
1267 # processing phases change
1268 for namespace, value in op.records['listkeys']:
1268 for namespace, value in op.records['listkeys']:
1269 if namespace == 'phases':
1269 if namespace == 'phases':
1270 _pullapplyphases(pullop, value)
1270 _pullapplyphases(pullop, value)
1271
1271
1272 # processing bookmark update
1272 # processing bookmark update
1273 for namespace, value in op.records['listkeys']:
1273 for namespace, value in op.records['listkeys']:
1274 if namespace == 'bookmarks':
1274 if namespace == 'bookmarks':
1275 pullop.remotebookmarks = value
1275 pullop.remotebookmarks = value
1276
1276
1277 # bookmark data were either already there or pulled in the bundle
1277 # bookmark data were either already there or pulled in the bundle
1278 if pullop.remotebookmarks is not None:
1278 if pullop.remotebookmarks is not None:
1279 _pullbookmarks(pullop)
1279 _pullbookmarks(pullop)
1280
1280
1281 def _pullbundle2extraprepare(pullop, kwargs):
1281 def _pullbundle2extraprepare(pullop, kwargs):
1282 """hook function so that extensions can extend the getbundle call"""
1282 """hook function so that extensions can extend the getbundle call"""
1283 pass
1283 pass
1284
1284
1285 def _pullchangeset(pullop):
1285 def _pullchangeset(pullop):
1286 """pull changeset from unbundle into the local repo"""
1286 """pull changeset from unbundle into the local repo"""
1287 # We delay the open of the transaction as late as possible so we
1287 # We delay the open of the transaction as late as possible so we
1288 # don't open transaction for nothing or you break future useful
1288 # don't open transaction for nothing or you break future useful
1289 # rollback call
1289 # rollback call
1290 if 'changegroup' in pullop.stepsdone:
1290 if 'changegroup' in pullop.stepsdone:
1291 return
1291 return
1292 pullop.stepsdone.add('changegroup')
1292 pullop.stepsdone.add('changegroup')
1293 if not pullop.fetch:
1293 if not pullop.fetch:
1294 pullop.repo.ui.status(_("no changes found\n"))
1294 pullop.repo.ui.status(_("no changes found\n"))
1295 pullop.cgresult = 0
1295 pullop.cgresult = 0
1296 return
1296 return
1297 pullop.gettransaction()
1297 pullop.gettransaction()
1298 if pullop.heads is None and list(pullop.common) == [nullid]:
1298 if pullop.heads is None and list(pullop.common) == [nullid]:
1299 pullop.repo.ui.status(_("requesting all changes\n"))
1299 pullop.repo.ui.status(_("requesting all changes\n"))
1300 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1300 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1301 # issue1320, avoid a race if remote changed after discovery
1301 # issue1320, avoid a race if remote changed after discovery
1302 pullop.heads = pullop.rheads
1302 pullop.heads = pullop.rheads
1303
1303
1304 if pullop.remote.capable('getbundle'):
1304 if pullop.remote.capable('getbundle'):
1305 # TODO: get bundlecaps from remote
1305 # TODO: get bundlecaps from remote
1306 cg = pullop.remote.getbundle('pull', common=pullop.common,
1306 cg = pullop.remote.getbundle('pull', common=pullop.common,
1307 heads=pullop.heads or pullop.rheads)
1307 heads=pullop.heads or pullop.rheads)
1308 elif pullop.heads is None:
1308 elif pullop.heads is None:
1309 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1309 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1310 elif not pullop.remote.capable('changegroupsubset'):
1310 elif not pullop.remote.capable('changegroupsubset'):
1311 raise error.Abort(_("partial pull cannot be done because "
1311 raise error.Abort(_("partial pull cannot be done because "
1312 "other repository doesn't support "
1312 "other repository doesn't support "
1313 "changegroupsubset."))
1313 "changegroupsubset."))
1314 else:
1314 else:
1315 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1315 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1316 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1316 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1317
1317
1318 def _pullphase(pullop):
1318 def _pullphase(pullop):
1319 # Get remote phases data from remote
1319 # Get remote phases data from remote
1320 if 'phases' in pullop.stepsdone:
1320 if 'phases' in pullop.stepsdone:
1321 return
1321 return
1322 remotephases = pullop.remote.listkeys('phases')
1322 remotephases = pullop.remote.listkeys('phases')
1323 _pullapplyphases(pullop, remotephases)
1323 _pullapplyphases(pullop, remotephases)
1324
1324
1325 def _pullapplyphases(pullop, remotephases):
1325 def _pullapplyphases(pullop, remotephases):
1326 """apply phase movement from observed remote state"""
1326 """apply phase movement from observed remote state"""
1327 if 'phases' in pullop.stepsdone:
1327 if 'phases' in pullop.stepsdone:
1328 return
1328 return
1329 pullop.stepsdone.add('phases')
1329 pullop.stepsdone.add('phases')
1330 publishing = bool(remotephases.get('publishing', False))
1330 publishing = bool(remotephases.get('publishing', False))
1331 if remotephases and not publishing:
1331 if remotephases and not publishing:
1332 # remote is new and unpublishing
1332 # remote is new and unpublishing
1333 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1333 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1334 pullop.pulledsubset,
1334 pullop.pulledsubset,
1335 remotephases)
1335 remotephases)
1336 dheads = pullop.pulledsubset
1336 dheads = pullop.pulledsubset
1337 else:
1337 else:
1338 # Remote is old or publishing all common changesets
1338 # Remote is old or publishing all common changesets
1339 # should be seen as public
1339 # should be seen as public
1340 pheads = pullop.pulledsubset
1340 pheads = pullop.pulledsubset
1341 dheads = []
1341 dheads = []
1342 unfi = pullop.repo.unfiltered()
1342 unfi = pullop.repo.unfiltered()
1343 phase = unfi._phasecache.phase
1343 phase = unfi._phasecache.phase
1344 rev = unfi.changelog.nodemap.get
1344 rev = unfi.changelog.nodemap.get
1345 public = phases.public
1345 public = phases.public
1346 draft = phases.draft
1346 draft = phases.draft
1347
1347
1348 # exclude changesets already public locally and update the others
1348 # exclude changesets already public locally and update the others
1349 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1349 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1350 if pheads:
1350 if pheads:
1351 tr = pullop.gettransaction()
1351 tr = pullop.gettransaction()
1352 phases.advanceboundary(pullop.repo, tr, public, pheads)
1352 phases.advanceboundary(pullop.repo, tr, public, pheads)
1353
1353
1354 # exclude changesets already draft locally and update the others
1354 # exclude changesets already draft locally and update the others
1355 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1355 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1356 if dheads:
1356 if dheads:
1357 tr = pullop.gettransaction()
1357 tr = pullop.gettransaction()
1358 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1358 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1359
1359
1360 def _pullbookmarks(pullop):
1360 def _pullbookmarks(pullop):
1361 """process the remote bookmark information to update the local one"""
1361 """process the remote bookmark information to update the local one"""
1362 if 'bookmarks' in pullop.stepsdone:
1362 if 'bookmarks' in pullop.stepsdone:
1363 return
1363 return
1364 pullop.stepsdone.add('bookmarks')
1364 pullop.stepsdone.add('bookmarks')
1365 repo = pullop.repo
1365 repo = pullop.repo
1366 remotebookmarks = pullop.remotebookmarks
1366 remotebookmarks = pullop.remotebookmarks
1367 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1367 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1368 pullop.remote.url(),
1368 pullop.remote.url(),
1369 pullop.gettransaction,
1369 pullop.gettransaction,
1370 explicit=pullop.explicitbookmarks)
1370 explicit=pullop.explicitbookmarks)
1371
1371
1372 def _pullobsolete(pullop):
1372 def _pullobsolete(pullop):
1373 """utility function to pull obsolete markers from a remote
1373 """utility function to pull obsolete markers from a remote
1374
1374
1375 The `gettransaction` is function that return the pull transaction, creating
1375 The `gettransaction` is function that return the pull transaction, creating
1376 one if necessary. We return the transaction to inform the calling code that
1376 one if necessary. We return the transaction to inform the calling code that
1377 a new transaction have been created (when applicable).
1377 a new transaction have been created (when applicable).
1378
1378
1379 Exists mostly to allow overriding for experimentation purpose"""
1379 Exists mostly to allow overriding for experimentation purpose"""
1380 if 'obsmarkers' in pullop.stepsdone:
1380 if 'obsmarkers' in pullop.stepsdone:
1381 return
1381 return
1382 pullop.stepsdone.add('obsmarkers')
1382 pullop.stepsdone.add('obsmarkers')
1383 tr = None
1383 tr = None
1384 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1384 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1385 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1385 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1386 remoteobs = pullop.remote.listkeys('obsolete')
1386 remoteobs = pullop.remote.listkeys('obsolete')
1387 if 'dump0' in remoteobs:
1387 if 'dump0' in remoteobs:
1388 tr = pullop.gettransaction()
1388 tr = pullop.gettransaction()
1389 for key in sorted(remoteobs, reverse=True):
1389 for key in sorted(remoteobs, reverse=True):
1390 if key.startswith('dump'):
1390 if key.startswith('dump'):
1391 data = base85.b85decode(remoteobs[key])
1391 data = base85.b85decode(remoteobs[key])
1392 pullop.repo.obsstore.mergemarkers(tr, data)
1392 pullop.repo.obsstore.mergemarkers(tr, data)
1393 pullop.repo.invalidatevolatilesets()
1393 pullop.repo.invalidatevolatilesets()
1394 return tr
1394 return tr
1395
1395
1396 def caps20to10(repo):
1396 def caps20to10(repo):
1397 """return a set with appropriate options to use bundle20 during getbundle"""
1397 """return a set with appropriate options to use bundle20 during getbundle"""
1398 caps = set(['HG20'])
1398 caps = set(['HG20'])
1399 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1399 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1400 caps.add('bundle2=' + urllib.quote(capsblob))
1400 caps.add('bundle2=' + urllib.quote(capsblob))
1401 return caps
1401 return caps
1402
1402
1403 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1403 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1404 getbundle2partsorder = []
1404 getbundle2partsorder = []
1405
1405
1406 # Mapping between step name and function
1406 # Mapping between step name and function
1407 #
1407 #
1408 # This exists to help extensions wrap steps if necessary
1408 # This exists to help extensions wrap steps if necessary
1409 getbundle2partsmapping = {}
1409 getbundle2partsmapping = {}
1410
1410
1411 def getbundle2partsgenerator(stepname, idx=None):
1411 def getbundle2partsgenerator(stepname, idx=None):
1412 """decorator for function generating bundle2 part for getbundle
1412 """decorator for function generating bundle2 part for getbundle
1413
1413
1414 The function is added to the step -> function mapping and appended to the
1414 The function is added to the step -> function mapping and appended to the
1415 list of steps. Beware that decorated functions will be added in order
1415 list of steps. Beware that decorated functions will be added in order
1416 (this may matter).
1416 (this may matter).
1417
1417
1418 You can only use this decorator for new steps, if you want to wrap a step
1418 You can only use this decorator for new steps, if you want to wrap a step
1419 from an extension, attack the getbundle2partsmapping dictionary directly."""
1419 from an extension, attack the getbundle2partsmapping dictionary directly."""
1420 def dec(func):
1420 def dec(func):
1421 assert stepname not in getbundle2partsmapping
1421 assert stepname not in getbundle2partsmapping
1422 getbundle2partsmapping[stepname] = func
1422 getbundle2partsmapping[stepname] = func
1423 if idx is None:
1423 if idx is None:
1424 getbundle2partsorder.append(stepname)
1424 getbundle2partsorder.append(stepname)
1425 else:
1425 else:
1426 getbundle2partsorder.insert(idx, stepname)
1426 getbundle2partsorder.insert(idx, stepname)
1427 return func
1427 return func
1428 return dec
1428 return dec
1429
1429
1430 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1430 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1431 **kwargs):
1431 **kwargs):
1432 """return a full bundle (with potentially multiple kind of parts)
1432 """return a full bundle (with potentially multiple kind of parts)
1433
1433
1434 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1434 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1435 passed. For now, the bundle can contain only changegroup, but this will
1435 passed. For now, the bundle can contain only changegroup, but this will
1436 changes when more part type will be available for bundle2.
1436 changes when more part type will be available for bundle2.
1437
1437
1438 This is different from changegroup.getchangegroup that only returns an HG10
1438 This is different from changegroup.getchangegroup that only returns an HG10
1439 changegroup bundle. They may eventually get reunited in the future when we
1439 changegroup bundle. They may eventually get reunited in the future when we
1440 have a clearer idea of the API we what to query different data.
1440 have a clearer idea of the API we what to query different data.
1441
1441
1442 The implementation is at a very early stage and will get massive rework
1442 The implementation is at a very early stage and will get massive rework
1443 when the API of bundle is refined.
1443 when the API of bundle is refined.
1444 """
1444 """
1445 # bundle10 case
1445 # bundle10 case
1446 usebundle2 = False
1446 usebundle2 = False
1447 if bundlecaps is not None:
1447 if bundlecaps is not None:
1448 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1448 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1449 if not usebundle2:
1449 if not usebundle2:
1450 if bundlecaps and not kwargs.get('cg', True):
1450 if bundlecaps and not kwargs.get('cg', True):
1451 raise ValueError(_('request for bundle10 must include changegroup'))
1451 raise ValueError(_('request for bundle10 must include changegroup'))
1452
1452
1453 if kwargs:
1453 if kwargs:
1454 raise ValueError(_('unsupported getbundle arguments: %s')
1454 raise ValueError(_('unsupported getbundle arguments: %s')
1455 % ', '.join(sorted(kwargs.keys())))
1455 % ', '.join(sorted(kwargs.keys())))
1456 return changegroup.getchangegroup(repo, source, heads=heads,
1456 return changegroup.getchangegroup(repo, source, heads=heads,
1457 common=common, bundlecaps=bundlecaps)
1457 common=common, bundlecaps=bundlecaps)
1458
1458
1459 # bundle20 case
1459 # bundle20 case
1460 b2caps = {}
1460 b2caps = {}
1461 for bcaps in bundlecaps:
1461 for bcaps in bundlecaps:
1462 if bcaps.startswith('bundle2='):
1462 if bcaps.startswith('bundle2='):
1463 blob = urllib.unquote(bcaps[len('bundle2='):])
1463 blob = urllib.unquote(bcaps[len('bundle2='):])
1464 b2caps.update(bundle2.decodecaps(blob))
1464 b2caps.update(bundle2.decodecaps(blob))
1465 bundler = bundle2.bundle20(repo.ui, b2caps)
1465 bundler = bundle2.bundle20(repo.ui, b2caps)
1466
1466
1467 kwargs['heads'] = heads
1467 kwargs['heads'] = heads
1468 kwargs['common'] = common
1468 kwargs['common'] = common
1469
1469
1470 for name in getbundle2partsorder:
1470 for name in getbundle2partsorder:
1471 func = getbundle2partsmapping[name]
1471 func = getbundle2partsmapping[name]
1472 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1472 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1473 **kwargs)
1473 **kwargs)
1474
1474
1475 return util.chunkbuffer(bundler.getchunks())
1475 return util.chunkbuffer(bundler.getchunks())
1476
1476
1477 @getbundle2partsgenerator('changegroup')
1477 @getbundle2partsgenerator('changegroup')
1478 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1478 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1479 b2caps=None, heads=None, common=None, **kwargs):
1479 b2caps=None, heads=None, common=None, **kwargs):
1480 """add a changegroup part to the requested bundle"""
1480 """add a changegroup part to the requested bundle"""
1481 cg = None
1481 cg = None
1482 if kwargs.get('cg', True):
1482 if kwargs.get('cg', True):
1483 # build changegroup bundle here.
1483 # build changegroup bundle here.
1484 version = None
1484 version = None
1485 cgversions = b2caps.get('changegroup')
1485 cgversions = b2caps.get('changegroup')
1486 getcgkwargs = {}
1486 getcgkwargs = {}
1487 if cgversions: # 3.1 and 3.2 ship with an empty value
1487 if cgversions: # 3.1 and 3.2 ship with an empty value
1488 cgversions = [v for v in cgversions if v in changegroup.packermap]
1488 cgversions = [v for v in cgversions if v in changegroup.packermap]
1489 if not cgversions:
1489 if not cgversions:
1490 raise ValueError(_('no common changegroup version'))
1490 raise ValueError(_('no common changegroup version'))
1491 version = getcgkwargs['version'] = max(cgversions)
1491 version = getcgkwargs['version'] = max(cgversions)
1492 outgoing = changegroup.computeoutgoing(repo, heads, common)
1492 outgoing = changegroup.computeoutgoing(repo, heads, common)
1493 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1493 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1494 bundlecaps=bundlecaps,
1494 bundlecaps=bundlecaps,
1495 **getcgkwargs)
1495 **getcgkwargs)
1496
1496
1497 if cg:
1497 if cg:
1498 part = bundler.newpart('changegroup', data=cg)
1498 part = bundler.newpart('changegroup', data=cg)
1499 if version is not None:
1499 if version is not None:
1500 part.addparam('version', version)
1500 part.addparam('version', version)
1501 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1501 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1502
1502
1503 @getbundle2partsgenerator('listkeys')
1503 @getbundle2partsgenerator('listkeys')
1504 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1504 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1505 b2caps=None, **kwargs):
1505 b2caps=None, **kwargs):
1506 """add parts containing listkeys namespaces to the requested bundle"""
1506 """add parts containing listkeys namespaces to the requested bundle"""
1507 listkeys = kwargs.get('listkeys', ())
1507 listkeys = kwargs.get('listkeys', ())
1508 for namespace in listkeys:
1508 for namespace in listkeys:
1509 part = bundler.newpart('listkeys')
1509 part = bundler.newpart('listkeys')
1510 part.addparam('namespace', namespace)
1510 part.addparam('namespace', namespace)
1511 keys = repo.listkeys(namespace).items()
1511 keys = repo.listkeys(namespace).items()
1512 part.data = pushkey.encodekeys(keys)
1512 part.data = pushkey.encodekeys(keys)
1513
1513
1514 @getbundle2partsgenerator('obsmarkers')
1514 @getbundle2partsgenerator('obsmarkers')
1515 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1515 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1516 b2caps=None, heads=None, **kwargs):
1516 b2caps=None, heads=None, **kwargs):
1517 """add an obsolescence markers part to the requested bundle"""
1517 """add an obsolescence markers part to the requested bundle"""
1518 if kwargs.get('obsmarkers', False):
1518 if kwargs.get('obsmarkers', False):
1519 if heads is None:
1519 if heads is None:
1520 heads = repo.heads()
1520 heads = repo.heads()
1521 subset = [c.node() for c in repo.set('::%ln', heads)]
1521 subset = [c.node() for c in repo.set('::%ln', heads)]
1522 markers = repo.obsstore.relevantmarkers(subset)
1522 markers = repo.obsstore.relevantmarkers(subset)
1523 markers = sorted(markers)
1523 markers = sorted(markers)
1524 buildobsmarkerspart(bundler, markers)
1524 buildobsmarkerspart(bundler, markers)
1525
1525
1526 @getbundle2partsgenerator('hgtagsfnodes')
1526 @getbundle2partsgenerator('hgtagsfnodes')
1527 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1527 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1528 b2caps=None, heads=None, common=None,
1528 b2caps=None, heads=None, common=None,
1529 **kwargs):
1529 **kwargs):
1530 """Transfer the .hgtags filenodes mapping.
1530 """Transfer the .hgtags filenodes mapping.
1531
1531
1532 Only values for heads in this bundle will be transferred.
1532 Only values for heads in this bundle will be transferred.
1533
1533
1534 The part data consists of pairs of 20 byte changeset node and .hgtags
1534 The part data consists of pairs of 20 byte changeset node and .hgtags
1535 filenodes raw values.
1535 filenodes raw values.
1536 """
1536 """
1537 # Don't send unless:
1537 # Don't send unless:
1538 # - changeset are being exchanged,
1538 # - changeset are being exchanged,
1539 # - the client supports it.
1539 # - the client supports it.
1540 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1540 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1541 return
1541 return
1542
1542
1543 outgoing = changegroup.computeoutgoing(repo, heads, common)
1543 outgoing = changegroup.computeoutgoing(repo, heads, common)
1544
1544
1545 if not outgoing.missingheads:
1545 if not outgoing.missingheads:
1546 return
1546 return
1547
1547
1548 cache = tags.hgtagsfnodescache(repo.unfiltered())
1548 cache = tags.hgtagsfnodescache(repo.unfiltered())
1549 chunks = []
1549 chunks = []
1550
1550
1551 # .hgtags fnodes are only relevant for head changesets. While we could
1551 # .hgtags fnodes are only relevant for head changesets. While we could
1552 # transfer values for all known nodes, there will likely be little to
1552 # transfer values for all known nodes, there will likely be little to
1553 # no benefit.
1553 # no benefit.
1554 #
1554 #
1555 # We don't bother using a generator to produce output data because
1555 # We don't bother using a generator to produce output data because
1556 # a) we only have 40 bytes per head and even esoteric numbers of heads
1556 # a) we only have 40 bytes per head and even esoteric numbers of heads
1557 # consume little memory (1M heads is 40MB) b) we don't want to send the
1557 # consume little memory (1M heads is 40MB) b) we don't want to send the
1558 # part if we don't have entries and knowing if we have entries requires
1558 # part if we don't have entries and knowing if we have entries requires
1559 # cache lookups.
1559 # cache lookups.
1560 for node in outgoing.missingheads:
1560 for node in outgoing.missingheads:
1561 # Don't compute missing, as this may slow down serving.
1561 # Don't compute missing, as this may slow down serving.
1562 fnode = cache.getfnode(node, computemissing=False)
1562 fnode = cache.getfnode(node, computemissing=False)
1563 if fnode is not None:
1563 if fnode is not None:
1564 chunks.extend([node, fnode])
1564 chunks.extend([node, fnode])
1565
1565
1566 if chunks:
1566 if chunks:
1567 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1567 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1568
1568
1569 def check_heads(repo, their_heads, context):
1569 def check_heads(repo, their_heads, context):
1570 """check if the heads of a repo have been modified
1570 """check if the heads of a repo have been modified
1571
1571
1572 Used by peer for unbundling.
1572 Used by peer for unbundling.
1573 """
1573 """
1574 heads = repo.heads()
1574 heads = repo.heads()
1575 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1575 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1576 if not (their_heads == ['force'] or their_heads == heads or
1576 if not (their_heads == ['force'] or their_heads == heads or
1577 their_heads == ['hashed', heads_hash]):
1577 their_heads == ['hashed', heads_hash]):
1578 # someone else committed/pushed/unbundled while we
1578 # someone else committed/pushed/unbundled while we
1579 # were transferring data
1579 # were transferring data
1580 raise error.PushRaced('repository changed while %s - '
1580 raise error.PushRaced('repository changed while %s - '
1581 'please try again' % context)
1581 'please try again' % context)
1582
1582
1583 def unbundle(repo, cg, heads, source, url):
1583 def unbundle(repo, cg, heads, source, url):
1584 """Apply a bundle to a repo.
1584 """Apply a bundle to a repo.
1585
1585
1586 this function makes sure the repo is locked during the application and have
1586 this function makes sure the repo is locked during the application and have
1587 mechanism to check that no push race occurred between the creation of the
1587 mechanism to check that no push race occurred between the creation of the
1588 bundle and its application.
1588 bundle and its application.
1589
1589
1590 If the push was raced as PushRaced exception is raised."""
1590 If the push was raced as PushRaced exception is raised."""
1591 r = 0
1591 r = 0
1592 # need a transaction when processing a bundle2 stream
1592 # need a transaction when processing a bundle2 stream
1593 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1593 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1594 lockandtr = [None, None, None]
1594 lockandtr = [None, None, None]
1595 recordout = None
1595 recordout = None
1596 # quick fix for output mismatch with bundle2 in 3.4
1596 # quick fix for output mismatch with bundle2 in 3.4
1597 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1597 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1598 False)
1598 False)
1599 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1599 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1600 captureoutput = True
1600 captureoutput = True
1601 try:
1601 try:
1602 check_heads(repo, heads, 'uploading changes')
1602 check_heads(repo, heads, 'uploading changes')
1603 # push can proceed
1603 # push can proceed
1604 if util.safehasattr(cg, 'params'):
1604 if util.safehasattr(cg, 'params'):
1605 r = None
1605 r = None
1606 try:
1606 try:
1607 def gettransaction():
1607 def gettransaction():
1608 if not lockandtr[2]:
1608 if not lockandtr[2]:
1609 lockandtr[0] = repo.wlock()
1609 lockandtr[0] = repo.wlock()
1610 lockandtr[1] = repo.lock()
1610 lockandtr[1] = repo.lock()
1611 lockandtr[2] = repo.transaction(source)
1611 lockandtr[2] = repo.transaction(source)
1612 lockandtr[2].hookargs['source'] = source
1612 lockandtr[2].hookargs['source'] = source
1613 lockandtr[2].hookargs['url'] = url
1613 lockandtr[2].hookargs['url'] = url
1614 lockandtr[2].hookargs['bundle2'] = '1'
1614 lockandtr[2].hookargs['bundle2'] = '1'
1615 return lockandtr[2]
1615 return lockandtr[2]
1616
1616
1617 # Do greedy locking by default until we're satisfied with lazy
1617 # Do greedy locking by default until we're satisfied with lazy
1618 # locking.
1618 # locking.
1619 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1619 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1620 gettransaction()
1620 gettransaction()
1621
1621
1622 op = bundle2.bundleoperation(repo, gettransaction,
1622 op = bundle2.bundleoperation(repo, gettransaction,
1623 captureoutput=captureoutput)
1623 captureoutput=captureoutput)
1624 try:
1624 try:
1625 op = bundle2.processbundle(repo, cg, op=op)
1625 op = bundle2.processbundle(repo, cg, op=op)
1626 finally:
1626 finally:
1627 r = op.reply
1627 r = op.reply
1628 if captureoutput and r is not None:
1628 if captureoutput and r is not None:
1629 repo.ui.pushbuffer(error=True, subproc=True)
1629 repo.ui.pushbuffer(error=True, subproc=True)
1630 def recordout(output):
1630 def recordout(output):
1631 r.newpart('output', data=output, mandatory=False)
1631 r.newpart('output', data=output, mandatory=False)
1632 if lockandtr[2] is not None:
1632 if lockandtr[2] is not None:
1633 lockandtr[2].close()
1633 lockandtr[2].close()
1634 except BaseException as exc:
1634 except BaseException as exc:
1635 exc.duringunbundle2 = True
1635 exc.duringunbundle2 = True
1636 if captureoutput and r is not None:
1636 if captureoutput and r is not None:
1637 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1637 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1638 def recordout(output):
1638 def recordout(output):
1639 part = bundle2.bundlepart('output', data=output,
1639 part = bundle2.bundlepart('output', data=output,
1640 mandatory=False)
1640 mandatory=False)
1641 parts.append(part)
1641 parts.append(part)
1642 raise
1642 raise
1643 else:
1643 else:
1644 lockandtr[1] = repo.lock()
1644 lockandtr[1] = repo.lock()
1645 r = cg.apply(repo, source, url)
1645 r = cg.apply(repo, source, url)
1646 finally:
1646 finally:
1647 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1647 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1648 if recordout is not None:
1648 if recordout is not None:
1649 recordout(repo.ui.popbuffer())
1649 recordout(repo.ui.popbuffer())
1650 return r
1650 return r
1651
1651
1652 def _maybeapplyclonebundle(pullop):
1652 def _maybeapplyclonebundle(pullop):
1653 """Apply a clone bundle from a remote, if possible."""
1653 """Apply a clone bundle from a remote, if possible."""
1654
1654
1655 repo = pullop.repo
1655 repo = pullop.repo
1656 remote = pullop.remote
1656 remote = pullop.remote
1657
1657
1658 if not repo.ui.configbool('experimental', 'clonebundles', False):
1658 if not repo.ui.configbool('experimental', 'clonebundles', False):
1659 return
1659 return
1660
1660
1661 # Only run if local repo is empty.
1662 if len(repo):
1663 return
1664
1661 if pullop.heads:
1665 if pullop.heads:
1662 return
1666 return
1663
1667
1664 if not remote.capable('clonebundles'):
1668 if not remote.capable('clonebundles'):
1665 return
1669 return
1666
1670
1667 res = remote._call('clonebundles')
1671 res = remote._call('clonebundles')
1668
1672
1669 # If we call the wire protocol command, that's good enough to record the
1673 # If we call the wire protocol command, that's good enough to record the
1670 # attempt.
1674 # attempt.
1671 pullop.clonebundleattempted = True
1675 pullop.clonebundleattempted = True
1672
1676
1673 entries = parseclonebundlesmanifest(repo, res)
1677 entries = parseclonebundlesmanifest(repo, res)
1674 if not entries:
1678 if not entries:
1675 repo.ui.note(_('no clone bundles available on remote; '
1679 repo.ui.note(_('no clone bundles available on remote; '
1676 'falling back to regular clone\n'))
1680 'falling back to regular clone\n'))
1677 return
1681 return
1678
1682
1679 entries = filterclonebundleentries(repo, entries)
1683 entries = filterclonebundleentries(repo, entries)
1680 if not entries:
1684 if not entries:
1681 # There is a thundering herd concern here. However, if a server
1685 # There is a thundering herd concern here. However, if a server
1682 # operator doesn't advertise bundles appropriate for its clients,
1686 # operator doesn't advertise bundles appropriate for its clients,
1683 # they deserve what's coming. Furthermore, from a client's
1687 # they deserve what's coming. Furthermore, from a client's
1684 # perspective, no automatic fallback would mean not being able to
1688 # perspective, no automatic fallback would mean not being able to
1685 # clone!
1689 # clone!
1686 repo.ui.warn(_('no compatible clone bundles available on server; '
1690 repo.ui.warn(_('no compatible clone bundles available on server; '
1687 'falling back to regular clone\n'))
1691 'falling back to regular clone\n'))
1688 repo.ui.warn(_('(you may want to report this to the server '
1692 repo.ui.warn(_('(you may want to report this to the server '
1689 'operator)\n'))
1693 'operator)\n'))
1690 return
1694 return
1691
1695
1692 entries = sortclonebundleentries(repo.ui, entries)
1696 entries = sortclonebundleentries(repo.ui, entries)
1693
1697
1694 url = entries[0]['URL']
1698 url = entries[0]['URL']
1695 repo.ui.status(_('applying clone bundle from %s\n') % url)
1699 repo.ui.status(_('applying clone bundle from %s\n') % url)
1696 if trypullbundlefromurl(repo.ui, repo, url):
1700 if trypullbundlefromurl(repo.ui, repo, url):
1697 repo.ui.status(_('finished applying clone bundle\n'))
1701 repo.ui.status(_('finished applying clone bundle\n'))
1698 # Bundle failed.
1702 # Bundle failed.
1699 #
1703 #
1700 # We abort by default to avoid the thundering herd of
1704 # We abort by default to avoid the thundering herd of
1701 # clients flooding a server that was expecting expensive
1705 # clients flooding a server that was expecting expensive
1702 # clone load to be offloaded.
1706 # clone load to be offloaded.
1703 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1707 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1704 repo.ui.warn(_('falling back to normal clone\n'))
1708 repo.ui.warn(_('falling back to normal clone\n'))
1705 else:
1709 else:
1706 raise error.Abort(_('error applying bundle'),
1710 raise error.Abort(_('error applying bundle'),
1707 hint=_('if this error persists, consider contacting '
1711 hint=_('if this error persists, consider contacting '
1708 'the server operator or disable clone '
1712 'the server operator or disable clone '
1709 'bundles via '
1713 'bundles via '
1710 '"--config experimental.clonebundles=false"'))
1714 '"--config experimental.clonebundles=false"'))
1711
1715
1712 def parseclonebundlesmanifest(repo, s):
1716 def parseclonebundlesmanifest(repo, s):
1713 """Parses the raw text of a clone bundles manifest.
1717 """Parses the raw text of a clone bundles manifest.
1714
1718
1715 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1719 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1716 to the URL and other keys are the attributes for the entry.
1720 to the URL and other keys are the attributes for the entry.
1717 """
1721 """
1718 m = []
1722 m = []
1719 for line in s.splitlines():
1723 for line in s.splitlines():
1720 fields = line.split()
1724 fields = line.split()
1721 if not fields:
1725 if not fields:
1722 continue
1726 continue
1723 attrs = {'URL': fields[0]}
1727 attrs = {'URL': fields[0]}
1724 for rawattr in fields[1:]:
1728 for rawattr in fields[1:]:
1725 key, value = rawattr.split('=', 1)
1729 key, value = rawattr.split('=', 1)
1726 key = urllib.unquote(key)
1730 key = urllib.unquote(key)
1727 value = urllib.unquote(value)
1731 value = urllib.unquote(value)
1728 attrs[key] = value
1732 attrs[key] = value
1729
1733
1730 # Parse BUNDLESPEC into components. This makes client-side
1734 # Parse BUNDLESPEC into components. This makes client-side
1731 # preferences easier to specify since you can prefer a single
1735 # preferences easier to specify since you can prefer a single
1732 # component of the BUNDLESPEC.
1736 # component of the BUNDLESPEC.
1733 if key == 'BUNDLESPEC':
1737 if key == 'BUNDLESPEC':
1734 try:
1738 try:
1735 comp, version, params = parsebundlespec(repo, value,
1739 comp, version, params = parsebundlespec(repo, value,
1736 externalnames=True)
1740 externalnames=True)
1737 attrs['COMPRESSION'] = comp
1741 attrs['COMPRESSION'] = comp
1738 attrs['VERSION'] = version
1742 attrs['VERSION'] = version
1739 except error.InvalidBundleSpecification:
1743 except error.InvalidBundleSpecification:
1740 pass
1744 pass
1741 except error.UnsupportedBundleSpecification:
1745 except error.UnsupportedBundleSpecification:
1742 pass
1746 pass
1743
1747
1744 m.append(attrs)
1748 m.append(attrs)
1745
1749
1746 return m
1750 return m
1747
1751
1748 def filterclonebundleentries(repo, entries):
1752 def filterclonebundleentries(repo, entries):
1749 """Remove incompatible clone bundle manifest entries.
1753 """Remove incompatible clone bundle manifest entries.
1750
1754
1751 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1755 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1752 and returns a new list consisting of only the entries that this client
1756 and returns a new list consisting of only the entries that this client
1753 should be able to apply.
1757 should be able to apply.
1754
1758
1755 There is no guarantee we'll be able to apply all returned entries because
1759 There is no guarantee we'll be able to apply all returned entries because
1756 the metadata we use to filter on may be missing or wrong.
1760 the metadata we use to filter on may be missing or wrong.
1757 """
1761 """
1758 newentries = []
1762 newentries = []
1759 for entry in entries:
1763 for entry in entries:
1760 spec = entry.get('BUNDLESPEC')
1764 spec = entry.get('BUNDLESPEC')
1761 if spec:
1765 if spec:
1762 try:
1766 try:
1763 parsebundlespec(repo, spec, strict=True)
1767 parsebundlespec(repo, spec, strict=True)
1764 except error.InvalidBundleSpecification as e:
1768 except error.InvalidBundleSpecification as e:
1765 repo.ui.debug(str(e) + '\n')
1769 repo.ui.debug(str(e) + '\n')
1766 continue
1770 continue
1767 except error.UnsupportedBundleSpecification as e:
1771 except error.UnsupportedBundleSpecification as e:
1768 repo.ui.debug('filtering %s because unsupported bundle '
1772 repo.ui.debug('filtering %s because unsupported bundle '
1769 'spec: %s\n' % (entry['URL'], str(e)))
1773 'spec: %s\n' % (entry['URL'], str(e)))
1770 continue
1774 continue
1771
1775
1772 if 'REQUIRESNI' in entry and not sslutil.hassni:
1776 if 'REQUIRESNI' in entry and not sslutil.hassni:
1773 repo.ui.debug('filtering %s because SNI not supported\n' %
1777 repo.ui.debug('filtering %s because SNI not supported\n' %
1774 entry['URL'])
1778 entry['URL'])
1775 continue
1779 continue
1776
1780
1777 newentries.append(entry)
1781 newentries.append(entry)
1778
1782
1779 return newentries
1783 return newentries
1780
1784
1781 def sortclonebundleentries(ui, entries):
1785 def sortclonebundleentries(ui, entries):
1782 # experimental config: experimental.clonebundleprefers
1786 # experimental config: experimental.clonebundleprefers
1783 prefers = ui.configlist('experimental', 'clonebundleprefers', default=[])
1787 prefers = ui.configlist('experimental', 'clonebundleprefers', default=[])
1784 if not prefers:
1788 if not prefers:
1785 return list(entries)
1789 return list(entries)
1786
1790
1787 prefers = [p.split('=', 1) for p in prefers]
1791 prefers = [p.split('=', 1) for p in prefers]
1788
1792
1789 # Our sort function.
1793 # Our sort function.
1790 def compareentry(a, b):
1794 def compareentry(a, b):
1791 for prefkey, prefvalue in prefers:
1795 for prefkey, prefvalue in prefers:
1792 avalue = a.get(prefkey)
1796 avalue = a.get(prefkey)
1793 bvalue = b.get(prefkey)
1797 bvalue = b.get(prefkey)
1794
1798
1795 # Special case for b missing attribute and a matches exactly.
1799 # Special case for b missing attribute and a matches exactly.
1796 if avalue is not None and bvalue is None and avalue == prefvalue:
1800 if avalue is not None and bvalue is None and avalue == prefvalue:
1797 return -1
1801 return -1
1798
1802
1799 # Special case for a missing attribute and b matches exactly.
1803 # Special case for a missing attribute and b matches exactly.
1800 if bvalue is not None and avalue is None and bvalue == prefvalue:
1804 if bvalue is not None and avalue is None and bvalue == prefvalue:
1801 return 1
1805 return 1
1802
1806
1803 # We can't compare unless attribute present on both.
1807 # We can't compare unless attribute present on both.
1804 if avalue is None or bvalue is None:
1808 if avalue is None or bvalue is None:
1805 continue
1809 continue
1806
1810
1807 # Same values should fall back to next attribute.
1811 # Same values should fall back to next attribute.
1808 if avalue == bvalue:
1812 if avalue == bvalue:
1809 continue
1813 continue
1810
1814
1811 # Exact matches come first.
1815 # Exact matches come first.
1812 if avalue == prefvalue:
1816 if avalue == prefvalue:
1813 return -1
1817 return -1
1814 if bvalue == prefvalue:
1818 if bvalue == prefvalue:
1815 return 1
1819 return 1
1816
1820
1817 # Fall back to next attribute.
1821 # Fall back to next attribute.
1818 continue
1822 continue
1819
1823
1820 # If we got here we couldn't sort by attributes and prefers. Fall
1824 # If we got here we couldn't sort by attributes and prefers. Fall
1821 # back to index order.
1825 # back to index order.
1822 return 0
1826 return 0
1823
1827
1824 return sorted(entries, cmp=compareentry)
1828 return sorted(entries, cmp=compareentry)
1825
1829
1826 def trypullbundlefromurl(ui, repo, url):
1830 def trypullbundlefromurl(ui, repo, url):
1827 """Attempt to apply a bundle from a URL."""
1831 """Attempt to apply a bundle from a URL."""
1828 lock = repo.lock()
1832 lock = repo.lock()
1829 try:
1833 try:
1830 tr = repo.transaction('bundleurl')
1834 tr = repo.transaction('bundleurl')
1831 try:
1835 try:
1832 try:
1836 try:
1833 fh = urlmod.open(ui, url)
1837 fh = urlmod.open(ui, url)
1834 cg = readbundle(ui, fh, 'stream')
1838 cg = readbundle(ui, fh, 'stream')
1835
1839
1836 if isinstance(cg, bundle2.unbundle20):
1840 if isinstance(cg, bundle2.unbundle20):
1837 bundle2.processbundle(repo, cg, lambda: tr)
1841 bundle2.processbundle(repo, cg, lambda: tr)
1838 elif isinstance(cg, streamclone.streamcloneapplier):
1842 elif isinstance(cg, streamclone.streamcloneapplier):
1839 cg.apply(repo)
1843 cg.apply(repo)
1840 else:
1844 else:
1841 cg.apply(repo, 'clonebundles', url)
1845 cg.apply(repo, 'clonebundles', url)
1842 tr.close()
1846 tr.close()
1843 return True
1847 return True
1844 except urllib2.HTTPError as e:
1848 except urllib2.HTTPError as e:
1845 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1849 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1846 except urllib2.URLError as e:
1850 except urllib2.URLError as e:
1847 ui.warn(_('error fetching bundle: %s\n') % e.reason[1])
1851 ui.warn(_('error fetching bundle: %s\n') % e.reason[1])
1848
1852
1849 return False
1853 return False
1850 finally:
1854 finally:
1851 tr.release()
1855 tr.release()
1852 finally:
1856 finally:
1853 lock.release()
1857 lock.release()
@@ -1,456 +1,450
1 Set up a server
1 Set up a server
2
2
3 $ hg init server
3 $ hg init server
4 $ cd server
4 $ cd server
5 $ cat >> .hg/hgrc << EOF
5 $ cat >> .hg/hgrc << EOF
6 > [extensions]
6 > [extensions]
7 > clonebundles =
7 > clonebundles =
8 > EOF
8 > EOF
9
9
10 $ touch foo
10 $ touch foo
11 $ hg -q commit -A -m 'add foo'
11 $ hg -q commit -A -m 'add foo'
12 $ touch bar
12 $ touch bar
13 $ hg -q commit -A -m 'add bar'
13 $ hg -q commit -A -m 'add bar'
14
14
15 $ hg serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
15 $ hg serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
16 $ cat hg.pid >> $DAEMON_PIDS
16 $ cat hg.pid >> $DAEMON_PIDS
17 $ cd ..
17 $ cd ..
18
18
19 Feature disabled by default
19 Feature disabled by default
20 (client should not request manifest)
20 (client should not request manifest)
21
21
22 $ hg clone -U http://localhost:$HGPORT feature-disabled
22 $ hg clone -U http://localhost:$HGPORT feature-disabled
23 requesting all changes
23 requesting all changes
24 adding changesets
24 adding changesets
25 adding manifests
25 adding manifests
26 adding file changes
26 adding file changes
27 added 2 changesets with 2 changes to 2 files
27 added 2 changesets with 2 changes to 2 files
28
28
29 $ cat server/access.log
29 $ cat server/access.log
30 * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
30 * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
31 * - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
31 * - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
32 * - - [*] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=aaff8d2ffbbf07a46dd1f05d8ae7877e3f56e2a2&listkeys=phase%2Cbookmarks (glob)
32 * - - [*] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=aaff8d2ffbbf07a46dd1f05d8ae7877e3f56e2a2&listkeys=phase%2Cbookmarks (glob)
33 * - - [*] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases (glob)
33 * - - [*] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases (glob)
34
34
35 $ cat >> $HGRCPATH << EOF
35 $ cat >> $HGRCPATH << EOF
36 > [experimental]
36 > [experimental]
37 > clonebundles = true
37 > clonebundles = true
38 > EOF
38 > EOF
39
39
40 Missing manifest should not result in server lookup
40 Missing manifest should not result in server lookup
41
41
42 $ hg --verbose clone -U http://localhost:$HGPORT no-manifest
42 $ hg --verbose clone -U http://localhost:$HGPORT no-manifest
43 requesting all changes
43 requesting all changes
44 adding changesets
44 adding changesets
45 adding manifests
45 adding manifests
46 adding file changes
46 adding file changes
47 added 2 changesets with 2 changes to 2 files
47 added 2 changesets with 2 changes to 2 files
48
48
49 $ tail -4 server/access.log
49 $ tail -4 server/access.log
50 * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
50 * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
51 * - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
51 * - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
52 * - - [*] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=aaff8d2ffbbf07a46dd1f05d8ae7877e3f56e2a2&listkeys=phase%2Cbookmarks (glob)
52 * - - [*] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=aaff8d2ffbbf07a46dd1f05d8ae7877e3f56e2a2&listkeys=phase%2Cbookmarks (glob)
53 * - - [*] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases (glob)
53 * - - [*] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases (glob)
54
54
55 Empty manifest file results in retrieval
55 Empty manifest file results in retrieval
56 (the extension only checks if the manifest file exists)
56 (the extension only checks if the manifest file exists)
57
57
58 $ touch server/.hg/clonebundles.manifest
58 $ touch server/.hg/clonebundles.manifest
59 $ hg --verbose clone -U http://localhost:$HGPORT empty-manifest
59 $ hg --verbose clone -U http://localhost:$HGPORT empty-manifest
60 no clone bundles available on remote; falling back to regular clone
60 no clone bundles available on remote; falling back to regular clone
61 requesting all changes
61 requesting all changes
62 adding changesets
62 adding changesets
63 adding manifests
63 adding manifests
64 adding file changes
64 adding file changes
65 added 2 changesets with 2 changes to 2 files
65 added 2 changesets with 2 changes to 2 files
66
66
67 Server advertises presence of feature to client requesting full clone
67 Server advertises presence of feature to client requesting full clone
68
68
69 $ hg --config experimental.clonebundles=false clone -U http://localhost:$HGPORT advertise-on-clone
69 $ hg --config experimental.clonebundles=false clone -U http://localhost:$HGPORT advertise-on-clone
70 requesting all changes
70 requesting all changes
71 remote: this server supports the experimental "clone bundles" feature that should enable faster and more reliable cloning
71 remote: this server supports the experimental "clone bundles" feature that should enable faster and more reliable cloning
72 remote: help test it by setting the "experimental.clonebundles" config flag to "true"
72 remote: help test it by setting the "experimental.clonebundles" config flag to "true"
73 adding changesets
73 adding changesets
74 adding manifests
74 adding manifests
75 adding file changes
75 adding file changes
76 added 2 changesets with 2 changes to 2 files
76 added 2 changesets with 2 changes to 2 files
77
77
78 Manifest file with invalid URL aborts
78 Manifest file with invalid URL aborts
79
79
80 $ echo 'http://does.not.exist/bundle.hg' > server/.hg/clonebundles.manifest
80 $ echo 'http://does.not.exist/bundle.hg' > server/.hg/clonebundles.manifest
81 $ hg clone http://localhost:$HGPORT 404-url
81 $ hg clone http://localhost:$HGPORT 404-url
82 applying clone bundle from http://does.not.exist/bundle.hg
82 applying clone bundle from http://does.not.exist/bundle.hg
83 error fetching bundle: * not known (glob)
83 error fetching bundle: * not known (glob)
84 abort: error applying bundle
84 abort: error applying bundle
85 (if this error persists, consider contacting the server operator or disable clone bundles via "--config experimental.clonebundles=false")
85 (if this error persists, consider contacting the server operator or disable clone bundles via "--config experimental.clonebundles=false")
86 [255]
86 [255]
87
87
88 Server is not running aborts
88 Server is not running aborts
89
89
90 $ echo "http://localhost:$HGPORT1/bundle.hg" > server/.hg/clonebundles.manifest
90 $ echo "http://localhost:$HGPORT1/bundle.hg" > server/.hg/clonebundles.manifest
91 $ hg clone http://localhost:$HGPORT server-not-runner
91 $ hg clone http://localhost:$HGPORT server-not-runner
92 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
92 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
93 error fetching bundle: Connection refused
93 error fetching bundle: Connection refused
94 abort: error applying bundle
94 abort: error applying bundle
95 (if this error persists, consider contacting the server operator or disable clone bundles via "--config experimental.clonebundles=false")
95 (if this error persists, consider contacting the server operator or disable clone bundles via "--config experimental.clonebundles=false")
96 [255]
96 [255]
97
97
98 Server returns 404
98 Server returns 404
99
99
100 $ python $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
100 $ python $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
101 $ cat http.pid >> $DAEMON_PIDS
101 $ cat http.pid >> $DAEMON_PIDS
102 $ hg clone http://localhost:$HGPORT running-404
102 $ hg clone http://localhost:$HGPORT running-404
103 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
103 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
104 HTTP error fetching bundle: HTTP Error 404: File not found
104 HTTP error fetching bundle: HTTP Error 404: File not found
105 abort: error applying bundle
105 abort: error applying bundle
106 (if this error persists, consider contacting the server operator or disable clone bundles via "--config experimental.clonebundles=false")
106 (if this error persists, consider contacting the server operator or disable clone bundles via "--config experimental.clonebundles=false")
107 [255]
107 [255]
108
108
109 We can override failure to fall back to regular clone
109 We can override failure to fall back to regular clone
110
110
111 $ hg --config ui.clonebundlefallback=true clone -U http://localhost:$HGPORT 404-fallback
111 $ hg --config ui.clonebundlefallback=true clone -U http://localhost:$HGPORT 404-fallback
112 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
112 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
113 HTTP error fetching bundle: HTTP Error 404: File not found
113 HTTP error fetching bundle: HTTP Error 404: File not found
114 falling back to normal clone
114 falling back to normal clone
115 requesting all changes
115 requesting all changes
116 adding changesets
116 adding changesets
117 adding manifests
117 adding manifests
118 adding file changes
118 adding file changes
119 added 2 changesets with 2 changes to 2 files
119 added 2 changesets with 2 changes to 2 files
120
120
121 Bundle with partial content works
121 Bundle with partial content works
122
122
123 $ hg -R server bundle --type gzip-v1 --base null -r 53245c60e682 partial.hg
123 $ hg -R server bundle --type gzip-v1 --base null -r 53245c60e682 partial.hg
124 1 changesets found
124 1 changesets found
125
125
126 We verify exact bundle content as an extra check against accidental future
126 We verify exact bundle content as an extra check against accidental future
127 changes. If this output changes, we could break old clients.
127 changes. If this output changes, we could break old clients.
128
128
129 $ f --size --hexdump partial.hg
129 $ f --size --hexdump partial.hg
130 partial.hg: size=208
130 partial.hg: size=208
131 0000: 48 47 31 30 47 5a 78 9c 63 60 60 98 17 ac 12 93 |HG10GZx.c``.....|
131 0000: 48 47 31 30 47 5a 78 9c 63 60 60 98 17 ac 12 93 |HG10GZx.c``.....|
132 0010: f0 ac a9 23 45 70 cb bf 0d 5f 59 4e 4a 7f 79 21 |...#Ep..._YNJ.y!|
132 0010: f0 ac a9 23 45 70 cb bf 0d 5f 59 4e 4a 7f 79 21 |...#Ep..._YNJ.y!|
133 0020: 9b cc 40 24 20 a0 d7 ce 2c d1 38 25 cd 24 25 d5 |..@$ ...,.8%.$%.|
133 0020: 9b cc 40 24 20 a0 d7 ce 2c d1 38 25 cd 24 25 d5 |..@$ ...,.8%.$%.|
134 0030: d8 c2 22 cd 38 d9 24 cd 22 d5 c8 22 cd 24 cd 32 |..".8.$."..".$.2|
134 0030: d8 c2 22 cd 38 d9 24 cd 22 d5 c8 22 cd 24 cd 32 |..".8.$."..".$.2|
135 0040: d1 c2 d0 c4 c8 d2 32 d1 38 39 29 c9 34 cd d4 80 |......2.89).4...|
135 0040: d1 c2 d0 c4 c8 d2 32 d1 38 39 29 c9 34 cd d4 80 |......2.89).4...|
136 0050: ab 24 b5 b8 84 cb 40 c1 80 2b 2d 3f 9f 8b 2b 31 |.$....@..+-?..+1|
136 0050: ab 24 b5 b8 84 cb 40 c1 80 2b 2d 3f 9f 8b 2b 31 |.$....@..+-?..+1|
137 0060: 25 45 01 c8 80 9a d2 9b 65 fb e5 9e 45 bf 8d 7f |%E......e...E...|
137 0060: 25 45 01 c8 80 9a d2 9b 65 fb e5 9e 45 bf 8d 7f |%E......e...E...|
138 0070: 9f c6 97 9f 2b 44 34 67 d9 ec 8e 0f a0 92 0b 75 |....+D4g.......u|
138 0070: 9f c6 97 9f 2b 44 34 67 d9 ec 8e 0f a0 92 0b 75 |....+D4g.......u|
139 0080: 41 d6 24 59 18 a4 a4 9a a6 18 1a 5b 98 9b 5a 98 |A.$Y.......[..Z.|
139 0080: 41 d6 24 59 18 a4 a4 9a a6 18 1a 5b 98 9b 5a 98 |A.$Y.......[..Z.|
140 0090: 9a 18 26 9b a6 19 98 1a 99 99 26 a6 18 9a 98 24 |..&.......&....$|
140 0090: 9a 18 26 9b a6 19 98 1a 99 99 26 a6 18 9a 98 24 |..&.......&....$|
141 00a0: 26 59 a6 25 5a 98 a5 18 a6 24 71 41 35 b1 43 dc |&Y.%Z....$qA5.C.|
141 00a0: 26 59 a6 25 5a 98 a5 18 a6 24 71 41 35 b1 43 dc |&Y.%Z....$qA5.C.|
142 00b0: 96 b0 83 f7 e9 45 8b d2 56 c7 a3 1f 82 52 d7 8a |.....E..V....R..|
142 00b0: 96 b0 83 f7 e9 45 8b d2 56 c7 a3 1f 82 52 d7 8a |.....E..V....R..|
143 00c0: 78 ed fc d5 76 f1 36 95 dc 05 07 00 ad 39 5e d3 |x...v.6......9^.|
143 00c0: 78 ed fc d5 76 f1 36 95 dc 05 07 00 ad 39 5e d3 |x...v.6......9^.|
144
144
145 $ echo "http://localhost:$HGPORT1/partial.hg" > server/.hg/clonebundles.manifest
145 $ echo "http://localhost:$HGPORT1/partial.hg" > server/.hg/clonebundles.manifest
146 $ hg clone -U http://localhost:$HGPORT partial-bundle
146 $ hg clone -U http://localhost:$HGPORT partial-bundle
147 applying clone bundle from http://localhost:$HGPORT1/partial.hg
147 applying clone bundle from http://localhost:$HGPORT1/partial.hg
148 adding changesets
148 adding changesets
149 adding manifests
149 adding manifests
150 adding file changes
150 adding file changes
151 added 1 changesets with 1 changes to 1 files
151 added 1 changesets with 1 changes to 1 files
152 finished applying clone bundle
152 finished applying clone bundle
153 searching for changes
153 searching for changes
154 adding changesets
154 adding changesets
155 adding manifests
155 adding manifests
156 adding file changes
156 adding file changes
157 added 1 changesets with 1 changes to 1 files
157 added 1 changesets with 1 changes to 1 files
158
158
159 Incremental pull doesn't fetch bundle
159 Incremental pull doesn't fetch bundle
160
160
161 $ hg clone -r 53245c60e682 -U http://localhost:$HGPORT partial-clone
161 $ hg clone -r 53245c60e682 -U http://localhost:$HGPORT partial-clone
162 adding changesets
162 adding changesets
163 adding manifests
163 adding manifests
164 adding file changes
164 adding file changes
165 added 1 changesets with 1 changes to 1 files
165 added 1 changesets with 1 changes to 1 files
166
166
167 $ cd partial-clone
167 $ cd partial-clone
168 $ hg pull
168 $ hg pull
169 pulling from http://localhost:$HGPORT/
169 pulling from http://localhost:$HGPORT/
170 applying clone bundle from http://localhost:$HGPORT1/partial.hg
171 adding changesets
172 adding manifests
173 adding file changes
174 added 0 changesets with 0 changes to 1 files
175 finished applying clone bundle
176 searching for changes
170 searching for changes
177 adding changesets
171 adding changesets
178 adding manifests
172 adding manifests
179 adding file changes
173 adding file changes
180 added 1 changesets with 1 changes to 1 files
174 added 1 changesets with 1 changes to 1 files
181 (run 'hg update' to get a working copy)
175 (run 'hg update' to get a working copy)
182 $ cd ..
176 $ cd ..
183
177
184 Bundle with full content works
178 Bundle with full content works
185
179
186 $ hg -R server bundle --type gzip-v2 --base null -r tip full.hg
180 $ hg -R server bundle --type gzip-v2 --base null -r tip full.hg
187 2 changesets found
181 2 changesets found
188
182
189 Again, we perform an extra check against bundle content changes. If this content
183 Again, we perform an extra check against bundle content changes. If this content
190 changes, clone bundles produced by new Mercurial versions may not be readable
184 changes, clone bundles produced by new Mercurial versions may not be readable
191 by old clients.
185 by old clients.
192
186
193 $ f --size --hexdump full.hg
187 $ f --size --hexdump full.hg
194 full.hg: size=408
188 full.hg: size=408
195 0000: 48 47 32 30 00 00 00 0e 43 6f 6d 70 72 65 73 73 |HG20....Compress|
189 0000: 48 47 32 30 00 00 00 0e 43 6f 6d 70 72 65 73 73 |HG20....Compress|
196 0010: 69 6f 6e 3d 47 5a 78 9c 63 60 60 90 e5 76 f6 70 |ion=GZx.c``..v.p|
190 0010: 69 6f 6e 3d 47 5a 78 9c 63 60 60 90 e5 76 f6 70 |ion=GZx.c``..v.p|
197 0020: f4 73 77 75 0f f2 0f 0d 60 00 02 46 06 76 a6 b2 |.swu....`..F.v..|
191 0020: f4 73 77 75 0f f2 0f 0d 60 00 02 46 06 76 a6 b2 |.swu....`..F.v..|
198 0030: d4 a2 e2 cc fc 3c 03 23 06 06 e6 7d 40 b1 4d c1 |.....<.#...}@.M.|
192 0030: d4 a2 e2 cc fc 3c 03 23 06 06 e6 7d 40 b1 4d c1 |.....<.#...}@.M.|
199 0040: 2a 31 09 cf 9a 3a 52 04 b7 fc db f0 95 e5 a4 f4 |*1...:R.........|
193 0040: 2a 31 09 cf 9a 3a 52 04 b7 fc db f0 95 e5 a4 f4 |*1...:R.........|
200 0050: 97 17 b2 c9 0c 14 00 02 e6 d9 99 25 1a a7 a4 99 |...........%....|
194 0050: 97 17 b2 c9 0c 14 00 02 e6 d9 99 25 1a a7 a4 99 |...........%....|
201 0060: a4 a4 1a 5b 58 a4 19 27 9b a4 59 a4 1a 59 a4 99 |...[X..'..Y..Y..|
195 0060: a4 a4 1a 5b 58 a4 19 27 9b a4 59 a4 1a 59 a4 99 |...[X..'..Y..Y..|
202 0070: a4 59 26 5a 18 9a 18 59 5a 26 1a 27 27 25 99 a6 |.Y&Z...YZ&.''%..|
196 0070: a4 59 26 5a 18 9a 18 59 5a 26 1a 27 27 25 99 a6 |.Y&Z...YZ&.''%..|
203 0080: 99 1a 70 95 a4 16 97 70 19 28 18 70 a5 e5 e7 73 |..p....p.(.p...s|
197 0080: 99 1a 70 95 a4 16 97 70 19 28 18 70 a5 e5 e7 73 |..p....p.(.p...s|
204 0090: 71 25 a6 a4 28 00 19 40 13 0e ac fa df ab ff 7b |q%..(..@.......{|
198 0090: 71 25 a6 a4 28 00 19 40 13 0e ac fa df ab ff 7b |q%..(..@.......{|
205 00a0: 3f fb 92 dc 8b 1f 62 bb 9e b7 d7 d9 87 3d 5a 44 |?.....b......=ZD|
199 00a0: 3f fb 92 dc 8b 1f 62 bb 9e b7 d7 d9 87 3d 5a 44 |?.....b......=ZD|
206 00b0: ac 2f b0 a9 c3 66 1e 54 b9 26 08 a7 1a 1b 1a a7 |./...f.T.&......|
200 00b0: ac 2f b0 a9 c3 66 1e 54 b9 26 08 a7 1a 1b 1a a7 |./...f.T.&......|
207 00c0: 25 1b 9a 1b 99 19 9a 5a 18 9b a6 18 19 00 dd 67 |%......Z.......g|
201 00c0: 25 1b 9a 1b 99 19 9a 5a 18 9b a6 18 19 00 dd 67 |%......Z.......g|
208 00d0: 61 61 98 06 f4 80 49 4a 8a 65 52 92 41 9a 81 81 |aa....IJ.eR.A...|
202 00d0: 61 61 98 06 f4 80 49 4a 8a 65 52 92 41 9a 81 81 |aa....IJ.eR.A...|
209 00e0: a5 11 17 50 31 30 58 19 cc 80 98 25 29 b1 08 c4 |...P10X....%)...|
203 00e0: a5 11 17 50 31 30 58 19 cc 80 98 25 29 b1 08 c4 |...P10X....%)...|
210 00f0: 37 07 79 19 88 d9 41 ee 07 8a 41 cd 5d 98 65 fb |7.y...A...A.].e.|
204 00f0: 37 07 79 19 88 d9 41 ee 07 8a 41 cd 5d 98 65 fb |7.y...A...A.].e.|
211 0100: e5 9e 45 bf 8d 7f 9f c6 97 9f 2b 44 34 67 d9 ec |..E.......+D4g..|
205 0100: e5 9e 45 bf 8d 7f 9f c6 97 9f 2b 44 34 67 d9 ec |..E.......+D4g..|
212 0110: 8e 0f a0 61 a8 eb 82 82 2e c9 c2 20 25 d5 34 c5 |...a....... %.4.|
206 0110: 8e 0f a0 61 a8 eb 82 82 2e c9 c2 20 25 d5 34 c5 |...a....... %.4.|
213 0120: d0 d8 c2 dc d4 c2 d4 c4 30 d9 34 cd c0 d4 c8 cc |........0.4.....|
207 0120: d0 d8 c2 dc d4 c2 d4 c4 30 d9 34 cd c0 d4 c8 cc |........0.4.....|
214 0130: 34 31 c5 d0 c4 24 31 c9 32 2d d1 c2 2c c5 30 25 |41...$1.2-..,.0%|
208 0130: 34 31 c5 d0 c4 24 31 c9 32 2d d1 c2 2c c5 30 25 |41...$1.2-..,.0%|
215 0140: 09 e4 ee 85 8f 85 ff 88 ab 89 36 c7 2a c4 47 34 |..........6.*.G4|
209 0140: 09 e4 ee 85 8f 85 ff 88 ab 89 36 c7 2a c4 47 34 |..........6.*.G4|
216 0150: fe f8 ec 7b 73 37 3f c3 24 62 1d 8d 4d 1d 9e 40 |...{s7?.$b..M..@|
210 0150: fe f8 ec 7b 73 37 3f c3 24 62 1d 8d 4d 1d 9e 40 |...{s7?.$b..M..@|
217 0160: 06 3b 10 14 36 a4 38 10 04 d8 21 01 5a b2 83 f7 |.;..6.8...!.Z...|
211 0160: 06 3b 10 14 36 a4 38 10 04 d8 21 01 5a b2 83 f7 |.;..6.8...!.Z...|
218 0170: e9 45 8b d2 56 c7 a3 1f 82 52 d7 8a 78 ed fc d5 |.E..V....R..x...|
212 0170: e9 45 8b d2 56 c7 a3 1f 82 52 d7 8a 78 ed fc d5 |.E..V....R..x...|
219 0180: 76 f1 36 25 81 49 c0 ad 30 c0 0e 49 8f 54 b7 9e |v.6%.I..0..I.T..|
213 0180: 76 f1 36 25 81 49 c0 ad 30 c0 0e 49 8f 54 b7 9e |v.6%.I..0..I.T..|
220 0190: d4 1c 09 00 bb 8d f0 bd |........|
214 0190: d4 1c 09 00 bb 8d f0 bd |........|
221
215
222 $ echo "http://localhost:$HGPORT1/full.hg" > server/.hg/clonebundles.manifest
216 $ echo "http://localhost:$HGPORT1/full.hg" > server/.hg/clonebundles.manifest
223 $ hg clone -U http://localhost:$HGPORT full-bundle
217 $ hg clone -U http://localhost:$HGPORT full-bundle
224 applying clone bundle from http://localhost:$HGPORT1/full.hg
218 applying clone bundle from http://localhost:$HGPORT1/full.hg
225 adding changesets
219 adding changesets
226 adding manifests
220 adding manifests
227 adding file changes
221 adding file changes
228 added 2 changesets with 2 changes to 2 files
222 added 2 changesets with 2 changes to 2 files
229 finished applying clone bundle
223 finished applying clone bundle
230 searching for changes
224 searching for changes
231 no changes found
225 no changes found
232
226
233 Entry with unknown BUNDLESPEC is filtered and not used
227 Entry with unknown BUNDLESPEC is filtered and not used
234
228
235 $ cat > server/.hg/clonebundles.manifest << EOF
229 $ cat > server/.hg/clonebundles.manifest << EOF
236 > http://bad.entry1 BUNDLESPEC=UNKNOWN
230 > http://bad.entry1 BUNDLESPEC=UNKNOWN
237 > http://bad.entry2 BUNDLESPEC=xz-v1
231 > http://bad.entry2 BUNDLESPEC=xz-v1
238 > http://bad.entry3 BUNDLESPEC=none-v100
232 > http://bad.entry3 BUNDLESPEC=none-v100
239 > http://localhost:$HGPORT1/full.hg BUNDLESPEC=gzip-v2
233 > http://localhost:$HGPORT1/full.hg BUNDLESPEC=gzip-v2
240 > EOF
234 > EOF
241
235
242 $ hg clone -U http://localhost:$HGPORT filter-unknown-type
236 $ hg clone -U http://localhost:$HGPORT filter-unknown-type
243 applying clone bundle from http://localhost:$HGPORT1/full.hg
237 applying clone bundle from http://localhost:$HGPORT1/full.hg
244 adding changesets
238 adding changesets
245 adding manifests
239 adding manifests
246 adding file changes
240 adding file changes
247 added 2 changesets with 2 changes to 2 files
241 added 2 changesets with 2 changes to 2 files
248 finished applying clone bundle
242 finished applying clone bundle
249 searching for changes
243 searching for changes
250 no changes found
244 no changes found
251
245
252 Automatic fallback when all entries are filtered
246 Automatic fallback when all entries are filtered
253
247
254 $ cat > server/.hg/clonebundles.manifest << EOF
248 $ cat > server/.hg/clonebundles.manifest << EOF
255 > http://bad.entry BUNDLESPEC=UNKNOWN
249 > http://bad.entry BUNDLESPEC=UNKNOWN
256 > EOF
250 > EOF
257
251
258 $ hg clone -U http://localhost:$HGPORT filter-all
252 $ hg clone -U http://localhost:$HGPORT filter-all
259 no compatible clone bundles available on server; falling back to regular clone
253 no compatible clone bundles available on server; falling back to regular clone
260 (you may want to report this to the server operator)
254 (you may want to report this to the server operator)
261 requesting all changes
255 requesting all changes
262 adding changesets
256 adding changesets
263 adding manifests
257 adding manifests
264 adding file changes
258 adding file changes
265 added 2 changesets with 2 changes to 2 files
259 added 2 changesets with 2 changes to 2 files
266
260
267 URLs requiring SNI are filtered in Python <2.7.9
261 URLs requiring SNI are filtered in Python <2.7.9
268
262
269 $ cp full.hg sni.hg
263 $ cp full.hg sni.hg
270 $ cat > server/.hg/clonebundles.manifest << EOF
264 $ cat > server/.hg/clonebundles.manifest << EOF
271 > http://localhost:$HGPORT1/sni.hg REQUIRESNI=true
265 > http://localhost:$HGPORT1/sni.hg REQUIRESNI=true
272 > http://localhost:$HGPORT1/full.hg
266 > http://localhost:$HGPORT1/full.hg
273 > EOF
267 > EOF
274
268
275 #if sslcontext
269 #if sslcontext
276 Python 2.7.9+ support SNI
270 Python 2.7.9+ support SNI
277
271
278 $ hg clone -U http://localhost:$HGPORT sni-supported
272 $ hg clone -U http://localhost:$HGPORT sni-supported
279 applying clone bundle from http://localhost:$HGPORT1/sni.hg
273 applying clone bundle from http://localhost:$HGPORT1/sni.hg
280 adding changesets
274 adding changesets
281 adding manifests
275 adding manifests
282 adding file changes
276 adding file changes
283 added 2 changesets with 2 changes to 2 files
277 added 2 changesets with 2 changes to 2 files
284 finished applying clone bundle
278 finished applying clone bundle
285 searching for changes
279 searching for changes
286 no changes found
280 no changes found
287 #else
281 #else
288 Python <2.7.9 will filter SNI URLs
282 Python <2.7.9 will filter SNI URLs
289
283
290 $ hg clone -U http://localhost:$HGPORT sni-unsupported
284 $ hg clone -U http://localhost:$HGPORT sni-unsupported
291 applying clone bundle from http://localhost:$HGPORT1/full.hg
285 applying clone bundle from http://localhost:$HGPORT1/full.hg
292 adding changesets
286 adding changesets
293 adding manifests
287 adding manifests
294 adding file changes
288 adding file changes
295 added 2 changesets with 2 changes to 2 files
289 added 2 changesets with 2 changes to 2 files
296 finished applying clone bundle
290 finished applying clone bundle
297 searching for changes
291 searching for changes
298 no changes found
292 no changes found
299 #endif
293 #endif
300
294
301 Stream clone bundles are supported
295 Stream clone bundles are supported
302
296
303 $ hg -R server debugcreatestreamclonebundle packed.hg
297 $ hg -R server debugcreatestreamclonebundle packed.hg
304 writing 613 bytes for 4 files
298 writing 613 bytes for 4 files
305 bundle requirements: revlogv1
299 bundle requirements: revlogv1
306
300
307 No bundle spec should work
301 No bundle spec should work
308
302
309 $ cat > server/.hg/clonebundles.manifest << EOF
303 $ cat > server/.hg/clonebundles.manifest << EOF
310 > http://localhost:$HGPORT1/packed.hg
304 > http://localhost:$HGPORT1/packed.hg
311 > EOF
305 > EOF
312
306
313 $ hg clone -U http://localhost:$HGPORT stream-clone-no-spec
307 $ hg clone -U http://localhost:$HGPORT stream-clone-no-spec
314 applying clone bundle from http://localhost:$HGPORT1/packed.hg
308 applying clone bundle from http://localhost:$HGPORT1/packed.hg
315 4 files to transfer, 613 bytes of data
309 4 files to transfer, 613 bytes of data
316 transferred 613 bytes in *.* seconds (*) (glob)
310 transferred 613 bytes in *.* seconds (*) (glob)
317 finished applying clone bundle
311 finished applying clone bundle
318 searching for changes
312 searching for changes
319 no changes found
313 no changes found
320
314
321 Bundle spec without parameters should work
315 Bundle spec without parameters should work
322
316
323 $ cat > server/.hg/clonebundles.manifest << EOF
317 $ cat > server/.hg/clonebundles.manifest << EOF
324 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1
318 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1
325 > EOF
319 > EOF
326
320
327 $ hg clone -U http://localhost:$HGPORT stream-clone-vanilla-spec
321 $ hg clone -U http://localhost:$HGPORT stream-clone-vanilla-spec
328 applying clone bundle from http://localhost:$HGPORT1/packed.hg
322 applying clone bundle from http://localhost:$HGPORT1/packed.hg
329 4 files to transfer, 613 bytes of data
323 4 files to transfer, 613 bytes of data
330 transferred 613 bytes in *.* seconds (*) (glob)
324 transferred 613 bytes in *.* seconds (*) (glob)
331 finished applying clone bundle
325 finished applying clone bundle
332 searching for changes
326 searching for changes
333 no changes found
327 no changes found
334
328
335 Bundle spec with format requirements should work
329 Bundle spec with format requirements should work
336
330
337 $ cat > server/.hg/clonebundles.manifest << EOF
331 $ cat > server/.hg/clonebundles.manifest << EOF
338 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv1
332 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv1
339 > EOF
333 > EOF
340
334
341 $ hg clone -U http://localhost:$HGPORT stream-clone-supported-requirements
335 $ hg clone -U http://localhost:$HGPORT stream-clone-supported-requirements
342 applying clone bundle from http://localhost:$HGPORT1/packed.hg
336 applying clone bundle from http://localhost:$HGPORT1/packed.hg
343 4 files to transfer, 613 bytes of data
337 4 files to transfer, 613 bytes of data
344 transferred 613 bytes in *.* seconds (*) (glob)
338 transferred 613 bytes in *.* seconds (*) (glob)
345 finished applying clone bundle
339 finished applying clone bundle
346 searching for changes
340 searching for changes
347 no changes found
341 no changes found
348
342
349 Stream bundle spec with unknown requirements should be filtered out
343 Stream bundle spec with unknown requirements should be filtered out
350
344
351 $ cat > server/.hg/clonebundles.manifest << EOF
345 $ cat > server/.hg/clonebundles.manifest << EOF
352 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv42
346 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv42
353 > EOF
347 > EOF
354
348
355 $ hg clone -U http://localhost:$HGPORT stream-clone-unsupported-requirements
349 $ hg clone -U http://localhost:$HGPORT stream-clone-unsupported-requirements
356 no compatible clone bundles available on server; falling back to regular clone
350 no compatible clone bundles available on server; falling back to regular clone
357 (you may want to report this to the server operator)
351 (you may want to report this to the server operator)
358 requesting all changes
352 requesting all changes
359 adding changesets
353 adding changesets
360 adding manifests
354 adding manifests
361 adding file changes
355 adding file changes
362 added 2 changesets with 2 changes to 2 files
356 added 2 changesets with 2 changes to 2 files
363
357
364 Set up manifest for testing preferences
358 Set up manifest for testing preferences
365 (Remember, the TYPE does not have to match reality - the URL is
359 (Remember, the TYPE does not have to match reality - the URL is
366 important)
360 important)
367
361
368 $ cp full.hg gz-a.hg
362 $ cp full.hg gz-a.hg
369 $ cp full.hg gz-b.hg
363 $ cp full.hg gz-b.hg
370 $ cp full.hg bz2-a.hg
364 $ cp full.hg bz2-a.hg
371 $ cp full.hg bz2-b.hg
365 $ cp full.hg bz2-b.hg
372 $ cat > server/.hg/clonebundles.manifest << EOF
366 $ cat > server/.hg/clonebundles.manifest << EOF
373 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2 extra=a
367 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2 extra=a
374 > http://localhost:$HGPORT1/bz2-a.hg BUNDLESPEC=bzip2-v2 extra=a
368 > http://localhost:$HGPORT1/bz2-a.hg BUNDLESPEC=bzip2-v2 extra=a
375 > http://localhost:$HGPORT1/gz-b.hg BUNDLESPEC=gzip-v2 extra=b
369 > http://localhost:$HGPORT1/gz-b.hg BUNDLESPEC=gzip-v2 extra=b
376 > http://localhost:$HGPORT1/bz2-b.hg BUNDLESPEC=bzip2-v2 extra=b
370 > http://localhost:$HGPORT1/bz2-b.hg BUNDLESPEC=bzip2-v2 extra=b
377 > EOF
371 > EOF
378
372
379 Preferring an undefined attribute will take first entry
373 Preferring an undefined attribute will take first entry
380
374
381 $ hg --config experimental.clonebundleprefers=foo=bar clone -U http://localhost:$HGPORT prefer-foo
375 $ hg --config experimental.clonebundleprefers=foo=bar clone -U http://localhost:$HGPORT prefer-foo
382 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
376 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
383 adding changesets
377 adding changesets
384 adding manifests
378 adding manifests
385 adding file changes
379 adding file changes
386 added 2 changesets with 2 changes to 2 files
380 added 2 changesets with 2 changes to 2 files
387 finished applying clone bundle
381 finished applying clone bundle
388 searching for changes
382 searching for changes
389 no changes found
383 no changes found
390
384
391 Preferring bz2 type will download first entry of that type
385 Preferring bz2 type will download first entry of that type
392
386
393 $ hg --config experimental.clonebundleprefers=COMPRESSION=bzip2 clone -U http://localhost:$HGPORT prefer-bz
387 $ hg --config experimental.clonebundleprefers=COMPRESSION=bzip2 clone -U http://localhost:$HGPORT prefer-bz
394 applying clone bundle from http://localhost:$HGPORT1/bz2-a.hg
388 applying clone bundle from http://localhost:$HGPORT1/bz2-a.hg
395 adding changesets
389 adding changesets
396 adding manifests
390 adding manifests
397 adding file changes
391 adding file changes
398 added 2 changesets with 2 changes to 2 files
392 added 2 changesets with 2 changes to 2 files
399 finished applying clone bundle
393 finished applying clone bundle
400 searching for changes
394 searching for changes
401 no changes found
395 no changes found
402
396
403 Preferring multiple values of an option works
397 Preferring multiple values of an option works
404
398
405 $ hg --config experimental.clonebundleprefers=COMPRESSION=unknown,COMPRESSION=bzip2 clone -U http://localhost:$HGPORT prefer-multiple-bz
399 $ hg --config experimental.clonebundleprefers=COMPRESSION=unknown,COMPRESSION=bzip2 clone -U http://localhost:$HGPORT prefer-multiple-bz
406 applying clone bundle from http://localhost:$HGPORT1/bz2-a.hg
400 applying clone bundle from http://localhost:$HGPORT1/bz2-a.hg
407 adding changesets
401 adding changesets
408 adding manifests
402 adding manifests
409 adding file changes
403 adding file changes
410 added 2 changesets with 2 changes to 2 files
404 added 2 changesets with 2 changes to 2 files
411 finished applying clone bundle
405 finished applying clone bundle
412 searching for changes
406 searching for changes
413 no changes found
407 no changes found
414
408
415 Sorting multiple values should get us back to original first entry
409 Sorting multiple values should get us back to original first entry
416
410
417 $ hg --config experimental.clonebundleprefers=BUNDLESPEC=unknown,BUNDLESPEC=gzip-v2,BUNDLESPEC=bzip2-v2 clone -U http://localhost:$HGPORT prefer-multiple-gz
411 $ hg --config experimental.clonebundleprefers=BUNDLESPEC=unknown,BUNDLESPEC=gzip-v2,BUNDLESPEC=bzip2-v2 clone -U http://localhost:$HGPORT prefer-multiple-gz
418 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
412 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
419 adding changesets
413 adding changesets
420 adding manifests
414 adding manifests
421 adding file changes
415 adding file changes
422 added 2 changesets with 2 changes to 2 files
416 added 2 changesets with 2 changes to 2 files
423 finished applying clone bundle
417 finished applying clone bundle
424 searching for changes
418 searching for changes
425 no changes found
419 no changes found
426
420
427 Preferring multiple attributes has correct order
421 Preferring multiple attributes has correct order
428
422
429 $ hg --config experimental.clonebundleprefers=extra=b,BUNDLESPEC=bzip2-v2 clone -U http://localhost:$HGPORT prefer-separate-attributes
423 $ hg --config experimental.clonebundleprefers=extra=b,BUNDLESPEC=bzip2-v2 clone -U http://localhost:$HGPORT prefer-separate-attributes
430 applying clone bundle from http://localhost:$HGPORT1/bz2-b.hg
424 applying clone bundle from http://localhost:$HGPORT1/bz2-b.hg
431 adding changesets
425 adding changesets
432 adding manifests
426 adding manifests
433 adding file changes
427 adding file changes
434 added 2 changesets with 2 changes to 2 files
428 added 2 changesets with 2 changes to 2 files
435 finished applying clone bundle
429 finished applying clone bundle
436 searching for changes
430 searching for changes
437 no changes found
431 no changes found
438
432
439 Test where attribute is missing from some entries
433 Test where attribute is missing from some entries
440
434
441 $ cat > server/.hg/clonebundles.manifest << EOF
435 $ cat > server/.hg/clonebundles.manifest << EOF
442 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
436 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
443 > http://localhost:$HGPORT1/bz2-a.hg BUNDLESPEC=bzip2-v2
437 > http://localhost:$HGPORT1/bz2-a.hg BUNDLESPEC=bzip2-v2
444 > http://localhost:$HGPORT1/gz-b.hg BUNDLESPEC=gzip-v2 extra=b
438 > http://localhost:$HGPORT1/gz-b.hg BUNDLESPEC=gzip-v2 extra=b
445 > http://localhost:$HGPORT1/bz2-b.hg BUNDLESPEC=bzip2-v2 extra=b
439 > http://localhost:$HGPORT1/bz2-b.hg BUNDLESPEC=bzip2-v2 extra=b
446 > EOF
440 > EOF
447
441
448 $ hg --config experimental.clonebundleprefers=extra=b clone -U http://localhost:$HGPORT prefer-partially-defined-attribute
442 $ hg --config experimental.clonebundleprefers=extra=b clone -U http://localhost:$HGPORT prefer-partially-defined-attribute
449 applying clone bundle from http://localhost:$HGPORT1/gz-b.hg
443 applying clone bundle from http://localhost:$HGPORT1/gz-b.hg
450 adding changesets
444 adding changesets
451 adding manifests
445 adding manifests
452 adding file changes
446 adding file changes
453 added 2 changesets with 2 changes to 2 files
447 added 2 changesets with 2 changes to 2 files
454 finished applying clone bundle
448 finished applying clone bundle
455 searching for changes
449 searching for changes
456 no changes found
450 no changes found
General Comments 0
You need to be logged in to leave comments. Login now