##// END OF EJS Templates
exchange: standalone function to determine if bundle2 is requested...
Gregory Szorc -
r27244:709977a4 default
parent child Browse files
Show More
@@ -1,1851 +1,1854 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno, urllib, urllib2
10 import errno, urllib, urllib2
11 import util, scmutil, changegroup, base85, error
11 import util, scmutil, changegroup, base85, error
12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 import lock as lockmod
13 import lock as lockmod
14 import streamclone
14 import streamclone
15 import sslutil
15 import sslutil
16 import tags
16 import tags
17 import url as urlmod
17 import url as urlmod
18
18
19 # Maps bundle compression human names to internal representation.
19 # Maps bundle compression human names to internal representation.
20 _bundlespeccompressions = {'none': None,
20 _bundlespeccompressions = {'none': None,
21 'bzip2': 'BZ',
21 'bzip2': 'BZ',
22 'gzip': 'GZ',
22 'gzip': 'GZ',
23 }
23 }
24
24
25 # Maps bundle version human names to changegroup versions.
25 # Maps bundle version human names to changegroup versions.
26 _bundlespeccgversions = {'v1': '01',
26 _bundlespeccgversions = {'v1': '01',
27 'v2': '02',
27 'v2': '02',
28 'packed1': 's1',
28 'packed1': 's1',
29 'bundle2': '02', #legacy
29 'bundle2': '02', #legacy
30 }
30 }
31
31
32 def parsebundlespec(repo, spec, strict=True, externalnames=False):
32 def parsebundlespec(repo, spec, strict=True, externalnames=False):
33 """Parse a bundle string specification into parts.
33 """Parse a bundle string specification into parts.
34
34
35 Bundle specifications denote a well-defined bundle/exchange format.
35 Bundle specifications denote a well-defined bundle/exchange format.
36 The content of a given specification should not change over time in
36 The content of a given specification should not change over time in
37 order to ensure that bundles produced by a newer version of Mercurial are
37 order to ensure that bundles produced by a newer version of Mercurial are
38 readable from an older version.
38 readable from an older version.
39
39
40 The string currently has the form:
40 The string currently has the form:
41
41
42 <compression>-<type>[;<parameter0>[;<parameter1>]]
42 <compression>-<type>[;<parameter0>[;<parameter1>]]
43
43
44 Where <compression> is one of the supported compression formats
44 Where <compression> is one of the supported compression formats
45 and <type> is (currently) a version string. A ";" can follow the type and
45 and <type> is (currently) a version string. A ";" can follow the type and
46 all text afterwards is interpretted as URI encoded, ";" delimited key=value
46 all text afterwards is interpretted as URI encoded, ";" delimited key=value
47 pairs.
47 pairs.
48
48
49 If ``strict`` is True (the default) <compression> is required. Otherwise,
49 If ``strict`` is True (the default) <compression> is required. Otherwise,
50 it is optional.
50 it is optional.
51
51
52 If ``externalnames`` is False (the default), the human-centric names will
52 If ``externalnames`` is False (the default), the human-centric names will
53 be converted to their internal representation.
53 be converted to their internal representation.
54
54
55 Returns a 3-tuple of (compression, version, parameters). Compression will
55 Returns a 3-tuple of (compression, version, parameters). Compression will
56 be ``None`` if not in strict mode and a compression isn't defined.
56 be ``None`` if not in strict mode and a compression isn't defined.
57
57
58 An ``InvalidBundleSpecification`` is raised when the specification is
58 An ``InvalidBundleSpecification`` is raised when the specification is
59 not syntactically well formed.
59 not syntactically well formed.
60
60
61 An ``UnsupportedBundleSpecification`` is raised when the compression or
61 An ``UnsupportedBundleSpecification`` is raised when the compression or
62 bundle type/version is not recognized.
62 bundle type/version is not recognized.
63
63
64 Note: this function will likely eventually return a more complex data
64 Note: this function will likely eventually return a more complex data
65 structure, including bundle2 part information.
65 structure, including bundle2 part information.
66 """
66 """
67 def parseparams(s):
67 def parseparams(s):
68 if ';' not in s:
68 if ';' not in s:
69 return s, {}
69 return s, {}
70
70
71 params = {}
71 params = {}
72 version, paramstr = s.split(';', 1)
72 version, paramstr = s.split(';', 1)
73
73
74 for p in paramstr.split(';'):
74 for p in paramstr.split(';'):
75 if '=' not in p:
75 if '=' not in p:
76 raise error.InvalidBundleSpecification(
76 raise error.InvalidBundleSpecification(
77 _('invalid bundle specification: '
77 _('invalid bundle specification: '
78 'missing "=" in parameter: %s') % p)
78 'missing "=" in parameter: %s') % p)
79
79
80 key, value = p.split('=', 1)
80 key, value = p.split('=', 1)
81 key = urllib.unquote(key)
81 key = urllib.unquote(key)
82 value = urllib.unquote(value)
82 value = urllib.unquote(value)
83 params[key] = value
83 params[key] = value
84
84
85 return version, params
85 return version, params
86
86
87
87
88 if strict and '-' not in spec:
88 if strict and '-' not in spec:
89 raise error.InvalidBundleSpecification(
89 raise error.InvalidBundleSpecification(
90 _('invalid bundle specification; '
90 _('invalid bundle specification; '
91 'must be prefixed with compression: %s') % spec)
91 'must be prefixed with compression: %s') % spec)
92
92
93 if '-' in spec:
93 if '-' in spec:
94 compression, version = spec.split('-', 1)
94 compression, version = spec.split('-', 1)
95
95
96 if compression not in _bundlespeccompressions:
96 if compression not in _bundlespeccompressions:
97 raise error.UnsupportedBundleSpecification(
97 raise error.UnsupportedBundleSpecification(
98 _('%s compression is not supported') % compression)
98 _('%s compression is not supported') % compression)
99
99
100 version, params = parseparams(version)
100 version, params = parseparams(version)
101
101
102 if version not in _bundlespeccgversions:
102 if version not in _bundlespeccgversions:
103 raise error.UnsupportedBundleSpecification(
103 raise error.UnsupportedBundleSpecification(
104 _('%s is not a recognized bundle version') % version)
104 _('%s is not a recognized bundle version') % version)
105 else:
105 else:
106 # Value could be just the compression or just the version, in which
106 # Value could be just the compression or just the version, in which
107 # case some defaults are assumed (but only when not in strict mode).
107 # case some defaults are assumed (but only when not in strict mode).
108 assert not strict
108 assert not strict
109
109
110 spec, params = parseparams(spec)
110 spec, params = parseparams(spec)
111
111
112 if spec in _bundlespeccompressions:
112 if spec in _bundlespeccompressions:
113 compression = spec
113 compression = spec
114 version = 'v1'
114 version = 'v1'
115 if 'generaldelta' in repo.requirements:
115 if 'generaldelta' in repo.requirements:
116 version = 'v2'
116 version = 'v2'
117 elif spec in _bundlespeccgversions:
117 elif spec in _bundlespeccgversions:
118 if spec == 'packed1':
118 if spec == 'packed1':
119 compression = 'none'
119 compression = 'none'
120 else:
120 else:
121 compression = 'bzip2'
121 compression = 'bzip2'
122 version = spec
122 version = spec
123 else:
123 else:
124 raise error.UnsupportedBundleSpecification(
124 raise error.UnsupportedBundleSpecification(
125 _('%s is not a recognized bundle specification') % spec)
125 _('%s is not a recognized bundle specification') % spec)
126
126
127 # The specification for packed1 can optionally declare the data formats
127 # The specification for packed1 can optionally declare the data formats
128 # required to apply it. If we see this metadata, compare against what the
128 # required to apply it. If we see this metadata, compare against what the
129 # repo supports and error if the bundle isn't compatible.
129 # repo supports and error if the bundle isn't compatible.
130 if version == 'packed1' and 'requirements' in params:
130 if version == 'packed1' and 'requirements' in params:
131 requirements = set(params['requirements'].split(','))
131 requirements = set(params['requirements'].split(','))
132 missingreqs = requirements - repo.supportedformats
132 missingreqs = requirements - repo.supportedformats
133 if missingreqs:
133 if missingreqs:
134 raise error.UnsupportedBundleSpecification(
134 raise error.UnsupportedBundleSpecification(
135 _('missing support for repository features: %s') %
135 _('missing support for repository features: %s') %
136 ', '.join(sorted(missingreqs)))
136 ', '.join(sorted(missingreqs)))
137
137
138 if not externalnames:
138 if not externalnames:
139 compression = _bundlespeccompressions[compression]
139 compression = _bundlespeccompressions[compression]
140 version = _bundlespeccgversions[version]
140 version = _bundlespeccgversions[version]
141 return compression, version, params
141 return compression, version, params
142
142
143 def readbundle(ui, fh, fname, vfs=None):
143 def readbundle(ui, fh, fname, vfs=None):
144 header = changegroup.readexactly(fh, 4)
144 header = changegroup.readexactly(fh, 4)
145
145
146 alg = None
146 alg = None
147 if not fname:
147 if not fname:
148 fname = "stream"
148 fname = "stream"
149 if not header.startswith('HG') and header.startswith('\0'):
149 if not header.startswith('HG') and header.startswith('\0'):
150 fh = changegroup.headerlessfixup(fh, header)
150 fh = changegroup.headerlessfixup(fh, header)
151 header = "HG10"
151 header = "HG10"
152 alg = 'UN'
152 alg = 'UN'
153 elif vfs:
153 elif vfs:
154 fname = vfs.join(fname)
154 fname = vfs.join(fname)
155
155
156 magic, version = header[0:2], header[2:4]
156 magic, version = header[0:2], header[2:4]
157
157
158 if magic != 'HG':
158 if magic != 'HG':
159 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
159 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
160 if version == '10':
160 if version == '10':
161 if alg is None:
161 if alg is None:
162 alg = changegroup.readexactly(fh, 2)
162 alg = changegroup.readexactly(fh, 2)
163 return changegroup.cg1unpacker(fh, alg)
163 return changegroup.cg1unpacker(fh, alg)
164 elif version.startswith('2'):
164 elif version.startswith('2'):
165 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
165 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
166 elif version == 'S1':
166 elif version == 'S1':
167 return streamclone.streamcloneapplier(fh)
167 return streamclone.streamcloneapplier(fh)
168 else:
168 else:
169 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
169 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
170
170
171 def buildobsmarkerspart(bundler, markers):
171 def buildobsmarkerspart(bundler, markers):
172 """add an obsmarker part to the bundler with <markers>
172 """add an obsmarker part to the bundler with <markers>
173
173
174 No part is created if markers is empty.
174 No part is created if markers is empty.
175 Raises ValueError if the bundler doesn't support any known obsmarker format.
175 Raises ValueError if the bundler doesn't support any known obsmarker format.
176 """
176 """
177 if markers:
177 if markers:
178 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
178 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
179 version = obsolete.commonversion(remoteversions)
179 version = obsolete.commonversion(remoteversions)
180 if version is None:
180 if version is None:
181 raise ValueError('bundler does not support common obsmarker format')
181 raise ValueError('bundler does not support common obsmarker format')
182 stream = obsolete.encodemarkers(markers, True, version=version)
182 stream = obsolete.encodemarkers(markers, True, version=version)
183 return bundler.newpart('obsmarkers', data=stream)
183 return bundler.newpart('obsmarkers', data=stream)
184 return None
184 return None
185
185
186 def _canusebundle2(op):
186 def _canusebundle2(op):
187 """return true if a pull/push can use bundle2
187 """return true if a pull/push can use bundle2
188
188
189 Feel free to nuke this function when we drop the experimental option"""
189 Feel free to nuke this function when we drop the experimental option"""
190 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
190 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
191 and op.remote.capable('bundle2'))
191 and op.remote.capable('bundle2'))
192
192
193
193
194 class pushoperation(object):
194 class pushoperation(object):
195 """A object that represent a single push operation
195 """A object that represent a single push operation
196
196
197 It purpose is to carry push related state and very common operation.
197 It purpose is to carry push related state and very common operation.
198
198
199 A new should be created at the beginning of each push and discarded
199 A new should be created at the beginning of each push and discarded
200 afterward.
200 afterward.
201 """
201 """
202
202
203 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
203 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
204 bookmarks=()):
204 bookmarks=()):
205 # repo we push from
205 # repo we push from
206 self.repo = repo
206 self.repo = repo
207 self.ui = repo.ui
207 self.ui = repo.ui
208 # repo we push to
208 # repo we push to
209 self.remote = remote
209 self.remote = remote
210 # force option provided
210 # force option provided
211 self.force = force
211 self.force = force
212 # revs to be pushed (None is "all")
212 # revs to be pushed (None is "all")
213 self.revs = revs
213 self.revs = revs
214 # bookmark explicitly pushed
214 # bookmark explicitly pushed
215 self.bookmarks = bookmarks
215 self.bookmarks = bookmarks
216 # allow push of new branch
216 # allow push of new branch
217 self.newbranch = newbranch
217 self.newbranch = newbranch
218 # did a local lock get acquired?
218 # did a local lock get acquired?
219 self.locallocked = None
219 self.locallocked = None
220 # step already performed
220 # step already performed
221 # (used to check what steps have been already performed through bundle2)
221 # (used to check what steps have been already performed through bundle2)
222 self.stepsdone = set()
222 self.stepsdone = set()
223 # Integer version of the changegroup push result
223 # Integer version of the changegroup push result
224 # - None means nothing to push
224 # - None means nothing to push
225 # - 0 means HTTP error
225 # - 0 means HTTP error
226 # - 1 means we pushed and remote head count is unchanged *or*
226 # - 1 means we pushed and remote head count is unchanged *or*
227 # we have outgoing changesets but refused to push
227 # we have outgoing changesets but refused to push
228 # - other values as described by addchangegroup()
228 # - other values as described by addchangegroup()
229 self.cgresult = None
229 self.cgresult = None
230 # Boolean value for the bookmark push
230 # Boolean value for the bookmark push
231 self.bkresult = None
231 self.bkresult = None
232 # discover.outgoing object (contains common and outgoing data)
232 # discover.outgoing object (contains common and outgoing data)
233 self.outgoing = None
233 self.outgoing = None
234 # all remote heads before the push
234 # all remote heads before the push
235 self.remoteheads = None
235 self.remoteheads = None
236 # testable as a boolean indicating if any nodes are missing locally.
236 # testable as a boolean indicating if any nodes are missing locally.
237 self.incoming = None
237 self.incoming = None
238 # phases changes that must be pushed along side the changesets
238 # phases changes that must be pushed along side the changesets
239 self.outdatedphases = None
239 self.outdatedphases = None
240 # phases changes that must be pushed if changeset push fails
240 # phases changes that must be pushed if changeset push fails
241 self.fallbackoutdatedphases = None
241 self.fallbackoutdatedphases = None
242 # outgoing obsmarkers
242 # outgoing obsmarkers
243 self.outobsmarkers = set()
243 self.outobsmarkers = set()
244 # outgoing bookmarks
244 # outgoing bookmarks
245 self.outbookmarks = []
245 self.outbookmarks = []
246 # transaction manager
246 # transaction manager
247 self.trmanager = None
247 self.trmanager = None
248 # map { pushkey partid -> callback handling failure}
248 # map { pushkey partid -> callback handling failure}
249 # used to handle exception from mandatory pushkey part failure
249 # used to handle exception from mandatory pushkey part failure
250 self.pkfailcb = {}
250 self.pkfailcb = {}
251
251
252 @util.propertycache
252 @util.propertycache
253 def futureheads(self):
253 def futureheads(self):
254 """future remote heads if the changeset push succeeds"""
254 """future remote heads if the changeset push succeeds"""
255 return self.outgoing.missingheads
255 return self.outgoing.missingheads
256
256
257 @util.propertycache
257 @util.propertycache
258 def fallbackheads(self):
258 def fallbackheads(self):
259 """future remote heads if the changeset push fails"""
259 """future remote heads if the changeset push fails"""
260 if self.revs is None:
260 if self.revs is None:
261 # not target to push, all common are relevant
261 # not target to push, all common are relevant
262 return self.outgoing.commonheads
262 return self.outgoing.commonheads
263 unfi = self.repo.unfiltered()
263 unfi = self.repo.unfiltered()
264 # I want cheads = heads(::missingheads and ::commonheads)
264 # I want cheads = heads(::missingheads and ::commonheads)
265 # (missingheads is revs with secret changeset filtered out)
265 # (missingheads is revs with secret changeset filtered out)
266 #
266 #
267 # This can be expressed as:
267 # This can be expressed as:
268 # cheads = ( (missingheads and ::commonheads)
268 # cheads = ( (missingheads and ::commonheads)
269 # + (commonheads and ::missingheads))"
269 # + (commonheads and ::missingheads))"
270 # )
270 # )
271 #
271 #
272 # while trying to push we already computed the following:
272 # while trying to push we already computed the following:
273 # common = (::commonheads)
273 # common = (::commonheads)
274 # missing = ((commonheads::missingheads) - commonheads)
274 # missing = ((commonheads::missingheads) - commonheads)
275 #
275 #
276 # We can pick:
276 # We can pick:
277 # * missingheads part of common (::commonheads)
277 # * missingheads part of common (::commonheads)
278 common = self.outgoing.common
278 common = self.outgoing.common
279 nm = self.repo.changelog.nodemap
279 nm = self.repo.changelog.nodemap
280 cheads = [node for node in self.revs if nm[node] in common]
280 cheads = [node for node in self.revs if nm[node] in common]
281 # and
281 # and
282 # * commonheads parents on missing
282 # * commonheads parents on missing
283 revset = unfi.set('%ln and parents(roots(%ln))',
283 revset = unfi.set('%ln and parents(roots(%ln))',
284 self.outgoing.commonheads,
284 self.outgoing.commonheads,
285 self.outgoing.missing)
285 self.outgoing.missing)
286 cheads.extend(c.node() for c in revset)
286 cheads.extend(c.node() for c in revset)
287 return cheads
287 return cheads
288
288
289 @property
289 @property
290 def commonheads(self):
290 def commonheads(self):
291 """set of all common heads after changeset bundle push"""
291 """set of all common heads after changeset bundle push"""
292 if self.cgresult:
292 if self.cgresult:
293 return self.futureheads
293 return self.futureheads
294 else:
294 else:
295 return self.fallbackheads
295 return self.fallbackheads
296
296
297 # mapping of message used when pushing bookmark
297 # mapping of message used when pushing bookmark
298 bookmsgmap = {'update': (_("updating bookmark %s\n"),
298 bookmsgmap = {'update': (_("updating bookmark %s\n"),
299 _('updating bookmark %s failed!\n')),
299 _('updating bookmark %s failed!\n')),
300 'export': (_("exporting bookmark %s\n"),
300 'export': (_("exporting bookmark %s\n"),
301 _('exporting bookmark %s failed!\n')),
301 _('exporting bookmark %s failed!\n')),
302 'delete': (_("deleting remote bookmark %s\n"),
302 'delete': (_("deleting remote bookmark %s\n"),
303 _('deleting remote bookmark %s failed!\n')),
303 _('deleting remote bookmark %s failed!\n')),
304 }
304 }
305
305
306
306
307 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
307 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
308 opargs=None):
308 opargs=None):
309 '''Push outgoing changesets (limited by revs) from a local
309 '''Push outgoing changesets (limited by revs) from a local
310 repository to remote. Return an integer:
310 repository to remote. Return an integer:
311 - None means nothing to push
311 - None means nothing to push
312 - 0 means HTTP error
312 - 0 means HTTP error
313 - 1 means we pushed and remote head count is unchanged *or*
313 - 1 means we pushed and remote head count is unchanged *or*
314 we have outgoing changesets but refused to push
314 we have outgoing changesets but refused to push
315 - other values as described by addchangegroup()
315 - other values as described by addchangegroup()
316 '''
316 '''
317 if opargs is None:
317 if opargs is None:
318 opargs = {}
318 opargs = {}
319 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
319 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
320 **opargs)
320 **opargs)
321 if pushop.remote.local():
321 if pushop.remote.local():
322 missing = (set(pushop.repo.requirements)
322 missing = (set(pushop.repo.requirements)
323 - pushop.remote.local().supported)
323 - pushop.remote.local().supported)
324 if missing:
324 if missing:
325 msg = _("required features are not"
325 msg = _("required features are not"
326 " supported in the destination:"
326 " supported in the destination:"
327 " %s") % (', '.join(sorted(missing)))
327 " %s") % (', '.join(sorted(missing)))
328 raise error.Abort(msg)
328 raise error.Abort(msg)
329
329
330 # there are two ways to push to remote repo:
330 # there are two ways to push to remote repo:
331 #
331 #
332 # addchangegroup assumes local user can lock remote
332 # addchangegroup assumes local user can lock remote
333 # repo (local filesystem, old ssh servers).
333 # repo (local filesystem, old ssh servers).
334 #
334 #
335 # unbundle assumes local user cannot lock remote repo (new ssh
335 # unbundle assumes local user cannot lock remote repo (new ssh
336 # servers, http servers).
336 # servers, http servers).
337
337
338 if not pushop.remote.canpush():
338 if not pushop.remote.canpush():
339 raise error.Abort(_("destination does not support push"))
339 raise error.Abort(_("destination does not support push"))
340 # get local lock as we might write phase data
340 # get local lock as we might write phase data
341 localwlock = locallock = None
341 localwlock = locallock = None
342 try:
342 try:
343 # bundle2 push may receive a reply bundle touching bookmarks or other
343 # bundle2 push may receive a reply bundle touching bookmarks or other
344 # things requiring the wlock. Take it now to ensure proper ordering.
344 # things requiring the wlock. Take it now to ensure proper ordering.
345 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
345 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
346 if _canusebundle2(pushop) and maypushback:
346 if _canusebundle2(pushop) and maypushback:
347 localwlock = pushop.repo.wlock()
347 localwlock = pushop.repo.wlock()
348 locallock = pushop.repo.lock()
348 locallock = pushop.repo.lock()
349 pushop.locallocked = True
349 pushop.locallocked = True
350 except IOError as err:
350 except IOError as err:
351 pushop.locallocked = False
351 pushop.locallocked = False
352 if err.errno != errno.EACCES:
352 if err.errno != errno.EACCES:
353 raise
353 raise
354 # source repo cannot be locked.
354 # source repo cannot be locked.
355 # We do not abort the push, but just disable the local phase
355 # We do not abort the push, but just disable the local phase
356 # synchronisation.
356 # synchronisation.
357 msg = 'cannot lock source repository: %s\n' % err
357 msg = 'cannot lock source repository: %s\n' % err
358 pushop.ui.debug(msg)
358 pushop.ui.debug(msg)
359 try:
359 try:
360 if pushop.locallocked:
360 if pushop.locallocked:
361 pushop.trmanager = transactionmanager(pushop.repo,
361 pushop.trmanager = transactionmanager(pushop.repo,
362 'push-response',
362 'push-response',
363 pushop.remote.url())
363 pushop.remote.url())
364 pushop.repo.checkpush(pushop)
364 pushop.repo.checkpush(pushop)
365 lock = None
365 lock = None
366 unbundle = pushop.remote.capable('unbundle')
366 unbundle = pushop.remote.capable('unbundle')
367 if not unbundle:
367 if not unbundle:
368 lock = pushop.remote.lock()
368 lock = pushop.remote.lock()
369 try:
369 try:
370 _pushdiscovery(pushop)
370 _pushdiscovery(pushop)
371 if _canusebundle2(pushop):
371 if _canusebundle2(pushop):
372 _pushbundle2(pushop)
372 _pushbundle2(pushop)
373 _pushchangeset(pushop)
373 _pushchangeset(pushop)
374 _pushsyncphase(pushop)
374 _pushsyncphase(pushop)
375 _pushobsolete(pushop)
375 _pushobsolete(pushop)
376 _pushbookmark(pushop)
376 _pushbookmark(pushop)
377 finally:
377 finally:
378 if lock is not None:
378 if lock is not None:
379 lock.release()
379 lock.release()
380 if pushop.trmanager:
380 if pushop.trmanager:
381 pushop.trmanager.close()
381 pushop.trmanager.close()
382 finally:
382 finally:
383 if pushop.trmanager:
383 if pushop.trmanager:
384 pushop.trmanager.release()
384 pushop.trmanager.release()
385 if locallock is not None:
385 if locallock is not None:
386 locallock.release()
386 locallock.release()
387 if localwlock is not None:
387 if localwlock is not None:
388 localwlock.release()
388 localwlock.release()
389
389
390 return pushop
390 return pushop
391
391
392 # list of steps to perform discovery before push
392 # list of steps to perform discovery before push
393 pushdiscoveryorder = []
393 pushdiscoveryorder = []
394
394
395 # Mapping between step name and function
395 # Mapping between step name and function
396 #
396 #
397 # This exists to help extensions wrap steps if necessary
397 # This exists to help extensions wrap steps if necessary
398 pushdiscoverymapping = {}
398 pushdiscoverymapping = {}
399
399
400 def pushdiscovery(stepname):
400 def pushdiscovery(stepname):
401 """decorator for function performing discovery before push
401 """decorator for function performing discovery before push
402
402
403 The function is added to the step -> function mapping and appended to the
403 The function is added to the step -> function mapping and appended to the
404 list of steps. Beware that decorated function will be added in order (this
404 list of steps. Beware that decorated function will be added in order (this
405 may matter).
405 may matter).
406
406
407 You can only use this decorator for a new step, if you want to wrap a step
407 You can only use this decorator for a new step, if you want to wrap a step
408 from an extension, change the pushdiscovery dictionary directly."""
408 from an extension, change the pushdiscovery dictionary directly."""
409 def dec(func):
409 def dec(func):
410 assert stepname not in pushdiscoverymapping
410 assert stepname not in pushdiscoverymapping
411 pushdiscoverymapping[stepname] = func
411 pushdiscoverymapping[stepname] = func
412 pushdiscoveryorder.append(stepname)
412 pushdiscoveryorder.append(stepname)
413 return func
413 return func
414 return dec
414 return dec
415
415
416 def _pushdiscovery(pushop):
416 def _pushdiscovery(pushop):
417 """Run all discovery steps"""
417 """Run all discovery steps"""
418 for stepname in pushdiscoveryorder:
418 for stepname in pushdiscoveryorder:
419 step = pushdiscoverymapping[stepname]
419 step = pushdiscoverymapping[stepname]
420 step(pushop)
420 step(pushop)
421
421
422 @pushdiscovery('changeset')
422 @pushdiscovery('changeset')
423 def _pushdiscoverychangeset(pushop):
423 def _pushdiscoverychangeset(pushop):
424 """discover the changeset that need to be pushed"""
424 """discover the changeset that need to be pushed"""
425 fci = discovery.findcommonincoming
425 fci = discovery.findcommonincoming
426 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
426 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
427 common, inc, remoteheads = commoninc
427 common, inc, remoteheads = commoninc
428 fco = discovery.findcommonoutgoing
428 fco = discovery.findcommonoutgoing
429 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
429 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
430 commoninc=commoninc, force=pushop.force)
430 commoninc=commoninc, force=pushop.force)
431 pushop.outgoing = outgoing
431 pushop.outgoing = outgoing
432 pushop.remoteheads = remoteheads
432 pushop.remoteheads = remoteheads
433 pushop.incoming = inc
433 pushop.incoming = inc
434
434
435 @pushdiscovery('phase')
435 @pushdiscovery('phase')
436 def _pushdiscoveryphase(pushop):
436 def _pushdiscoveryphase(pushop):
437 """discover the phase that needs to be pushed
437 """discover the phase that needs to be pushed
438
438
439 (computed for both success and failure case for changesets push)"""
439 (computed for both success and failure case for changesets push)"""
440 outgoing = pushop.outgoing
440 outgoing = pushop.outgoing
441 unfi = pushop.repo.unfiltered()
441 unfi = pushop.repo.unfiltered()
442 remotephases = pushop.remote.listkeys('phases')
442 remotephases = pushop.remote.listkeys('phases')
443 publishing = remotephases.get('publishing', False)
443 publishing = remotephases.get('publishing', False)
444 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
444 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
445 and remotephases # server supports phases
445 and remotephases # server supports phases
446 and not pushop.outgoing.missing # no changesets to be pushed
446 and not pushop.outgoing.missing # no changesets to be pushed
447 and publishing):
447 and publishing):
448 # When:
448 # When:
449 # - this is a subrepo push
449 # - this is a subrepo push
450 # - and remote support phase
450 # - and remote support phase
451 # - and no changeset are to be pushed
451 # - and no changeset are to be pushed
452 # - and remote is publishing
452 # - and remote is publishing
453 # We may be in issue 3871 case!
453 # We may be in issue 3871 case!
454 # We drop the possible phase synchronisation done by
454 # We drop the possible phase synchronisation done by
455 # courtesy to publish changesets possibly locally draft
455 # courtesy to publish changesets possibly locally draft
456 # on the remote.
456 # on the remote.
457 remotephases = {'publishing': 'True'}
457 remotephases = {'publishing': 'True'}
458 ana = phases.analyzeremotephases(pushop.repo,
458 ana = phases.analyzeremotephases(pushop.repo,
459 pushop.fallbackheads,
459 pushop.fallbackheads,
460 remotephases)
460 remotephases)
461 pheads, droots = ana
461 pheads, droots = ana
462 extracond = ''
462 extracond = ''
463 if not publishing:
463 if not publishing:
464 extracond = ' and public()'
464 extracond = ' and public()'
465 revset = 'heads((%%ln::%%ln) %s)' % extracond
465 revset = 'heads((%%ln::%%ln) %s)' % extracond
466 # Get the list of all revs draft on remote by public here.
466 # Get the list of all revs draft on remote by public here.
467 # XXX Beware that revset break if droots is not strictly
467 # XXX Beware that revset break if droots is not strictly
468 # XXX root we may want to ensure it is but it is costly
468 # XXX root we may want to ensure it is but it is costly
469 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
469 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
470 if not outgoing.missing:
470 if not outgoing.missing:
471 future = fallback
471 future = fallback
472 else:
472 else:
473 # adds changeset we are going to push as draft
473 # adds changeset we are going to push as draft
474 #
474 #
475 # should not be necessary for publishing server, but because of an
475 # should not be necessary for publishing server, but because of an
476 # issue fixed in xxxxx we have to do it anyway.
476 # issue fixed in xxxxx we have to do it anyway.
477 fdroots = list(unfi.set('roots(%ln + %ln::)',
477 fdroots = list(unfi.set('roots(%ln + %ln::)',
478 outgoing.missing, droots))
478 outgoing.missing, droots))
479 fdroots = [f.node() for f in fdroots]
479 fdroots = [f.node() for f in fdroots]
480 future = list(unfi.set(revset, fdroots, pushop.futureheads))
480 future = list(unfi.set(revset, fdroots, pushop.futureheads))
481 pushop.outdatedphases = future
481 pushop.outdatedphases = future
482 pushop.fallbackoutdatedphases = fallback
482 pushop.fallbackoutdatedphases = fallback
483
483
484 @pushdiscovery('obsmarker')
484 @pushdiscovery('obsmarker')
485 def _pushdiscoveryobsmarkers(pushop):
485 def _pushdiscoveryobsmarkers(pushop):
486 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
486 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
487 and pushop.repo.obsstore
487 and pushop.repo.obsstore
488 and 'obsolete' in pushop.remote.listkeys('namespaces')):
488 and 'obsolete' in pushop.remote.listkeys('namespaces')):
489 repo = pushop.repo
489 repo = pushop.repo
490 # very naive computation, that can be quite expensive on big repo.
490 # very naive computation, that can be quite expensive on big repo.
491 # However: evolution is currently slow on them anyway.
491 # However: evolution is currently slow on them anyway.
492 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
492 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
493 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
493 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
494
494
495 @pushdiscovery('bookmarks')
495 @pushdiscovery('bookmarks')
496 def _pushdiscoverybookmarks(pushop):
496 def _pushdiscoverybookmarks(pushop):
497 ui = pushop.ui
497 ui = pushop.ui
498 repo = pushop.repo.unfiltered()
498 repo = pushop.repo.unfiltered()
499 remote = pushop.remote
499 remote = pushop.remote
500 ui.debug("checking for updated bookmarks\n")
500 ui.debug("checking for updated bookmarks\n")
501 ancestors = ()
501 ancestors = ()
502 if pushop.revs:
502 if pushop.revs:
503 revnums = map(repo.changelog.rev, pushop.revs)
503 revnums = map(repo.changelog.rev, pushop.revs)
504 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
504 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
505 remotebookmark = remote.listkeys('bookmarks')
505 remotebookmark = remote.listkeys('bookmarks')
506
506
507 explicit = set(pushop.bookmarks)
507 explicit = set(pushop.bookmarks)
508
508
509 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
509 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
510 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
510 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
511 for b, scid, dcid in advsrc:
511 for b, scid, dcid in advsrc:
512 if b in explicit:
512 if b in explicit:
513 explicit.remove(b)
513 explicit.remove(b)
514 if not ancestors or repo[scid].rev() in ancestors:
514 if not ancestors or repo[scid].rev() in ancestors:
515 pushop.outbookmarks.append((b, dcid, scid))
515 pushop.outbookmarks.append((b, dcid, scid))
516 # search added bookmark
516 # search added bookmark
517 for b, scid, dcid in addsrc:
517 for b, scid, dcid in addsrc:
518 if b in explicit:
518 if b in explicit:
519 explicit.remove(b)
519 explicit.remove(b)
520 pushop.outbookmarks.append((b, '', scid))
520 pushop.outbookmarks.append((b, '', scid))
521 # search for overwritten bookmark
521 # search for overwritten bookmark
522 for b, scid, dcid in advdst + diverge + differ:
522 for b, scid, dcid in advdst + diverge + differ:
523 if b in explicit:
523 if b in explicit:
524 explicit.remove(b)
524 explicit.remove(b)
525 pushop.outbookmarks.append((b, dcid, scid))
525 pushop.outbookmarks.append((b, dcid, scid))
526 # search for bookmark to delete
526 # search for bookmark to delete
527 for b, scid, dcid in adddst:
527 for b, scid, dcid in adddst:
528 if b in explicit:
528 if b in explicit:
529 explicit.remove(b)
529 explicit.remove(b)
530 # treat as "deleted locally"
530 # treat as "deleted locally"
531 pushop.outbookmarks.append((b, dcid, ''))
531 pushop.outbookmarks.append((b, dcid, ''))
532 # identical bookmarks shouldn't get reported
532 # identical bookmarks shouldn't get reported
533 for b, scid, dcid in same:
533 for b, scid, dcid in same:
534 if b in explicit:
534 if b in explicit:
535 explicit.remove(b)
535 explicit.remove(b)
536
536
537 if explicit:
537 if explicit:
538 explicit = sorted(explicit)
538 explicit = sorted(explicit)
539 # we should probably list all of them
539 # we should probably list all of them
540 ui.warn(_('bookmark %s does not exist on the local '
540 ui.warn(_('bookmark %s does not exist on the local '
541 'or remote repository!\n') % explicit[0])
541 'or remote repository!\n') % explicit[0])
542 pushop.bkresult = 2
542 pushop.bkresult = 2
543
543
544 pushop.outbookmarks.sort()
544 pushop.outbookmarks.sort()
545
545
546 def _pushcheckoutgoing(pushop):
546 def _pushcheckoutgoing(pushop):
547 outgoing = pushop.outgoing
547 outgoing = pushop.outgoing
548 unfi = pushop.repo.unfiltered()
548 unfi = pushop.repo.unfiltered()
549 if not outgoing.missing:
549 if not outgoing.missing:
550 # nothing to push
550 # nothing to push
551 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
551 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
552 return False
552 return False
553 # something to push
553 # something to push
554 if not pushop.force:
554 if not pushop.force:
555 # if repo.obsstore == False --> no obsolete
555 # if repo.obsstore == False --> no obsolete
556 # then, save the iteration
556 # then, save the iteration
557 if unfi.obsstore:
557 if unfi.obsstore:
558 # this message are here for 80 char limit reason
558 # this message are here for 80 char limit reason
559 mso = _("push includes obsolete changeset: %s!")
559 mso = _("push includes obsolete changeset: %s!")
560 mst = {"unstable": _("push includes unstable changeset: %s!"),
560 mst = {"unstable": _("push includes unstable changeset: %s!"),
561 "bumped": _("push includes bumped changeset: %s!"),
561 "bumped": _("push includes bumped changeset: %s!"),
562 "divergent": _("push includes divergent changeset: %s!")}
562 "divergent": _("push includes divergent changeset: %s!")}
563 # If we are to push if there is at least one
563 # If we are to push if there is at least one
564 # obsolete or unstable changeset in missing, at
564 # obsolete or unstable changeset in missing, at
565 # least one of the missinghead will be obsolete or
565 # least one of the missinghead will be obsolete or
566 # unstable. So checking heads only is ok
566 # unstable. So checking heads only is ok
567 for node in outgoing.missingheads:
567 for node in outgoing.missingheads:
568 ctx = unfi[node]
568 ctx = unfi[node]
569 if ctx.obsolete():
569 if ctx.obsolete():
570 raise error.Abort(mso % ctx)
570 raise error.Abort(mso % ctx)
571 elif ctx.troubled():
571 elif ctx.troubled():
572 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
572 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
573
573
574 discovery.checkheads(pushop)
574 discovery.checkheads(pushop)
575 return True
575 return True
576
576
577 # List of names of steps to perform for an outgoing bundle2, order matters.
577 # List of names of steps to perform for an outgoing bundle2, order matters.
578 b2partsgenorder = []
578 b2partsgenorder = []
579
579
580 # Mapping between step name and function
580 # Mapping between step name and function
581 #
581 #
582 # This exists to help extensions wrap steps if necessary
582 # This exists to help extensions wrap steps if necessary
583 b2partsgenmapping = {}
583 b2partsgenmapping = {}
584
584
585 def b2partsgenerator(stepname, idx=None):
585 def b2partsgenerator(stepname, idx=None):
586 """decorator for function generating bundle2 part
586 """decorator for function generating bundle2 part
587
587
588 The function is added to the step -> function mapping and appended to the
588 The function is added to the step -> function mapping and appended to the
589 list of steps. Beware that decorated functions will be added in order
589 list of steps. Beware that decorated functions will be added in order
590 (this may matter).
590 (this may matter).
591
591
592 You can only use this decorator for new steps, if you want to wrap a step
592 You can only use this decorator for new steps, if you want to wrap a step
593 from an extension, attack the b2partsgenmapping dictionary directly."""
593 from an extension, attack the b2partsgenmapping dictionary directly."""
594 def dec(func):
594 def dec(func):
595 assert stepname not in b2partsgenmapping
595 assert stepname not in b2partsgenmapping
596 b2partsgenmapping[stepname] = func
596 b2partsgenmapping[stepname] = func
597 if idx is None:
597 if idx is None:
598 b2partsgenorder.append(stepname)
598 b2partsgenorder.append(stepname)
599 else:
599 else:
600 b2partsgenorder.insert(idx, stepname)
600 b2partsgenorder.insert(idx, stepname)
601 return func
601 return func
602 return dec
602 return dec
603
603
604 def _pushb2ctxcheckheads(pushop, bundler):
604 def _pushb2ctxcheckheads(pushop, bundler):
605 """Generate race condition checking parts
605 """Generate race condition checking parts
606
606
607 Exists as an independent function to aid extensions
607 Exists as an independent function to aid extensions
608 """
608 """
609 if not pushop.force:
609 if not pushop.force:
610 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
610 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
611
611
612 @b2partsgenerator('changeset')
612 @b2partsgenerator('changeset')
613 def _pushb2ctx(pushop, bundler):
613 def _pushb2ctx(pushop, bundler):
614 """handle changegroup push through bundle2
614 """handle changegroup push through bundle2
615
615
616 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
616 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
617 """
617 """
618 if 'changesets' in pushop.stepsdone:
618 if 'changesets' in pushop.stepsdone:
619 return
619 return
620 pushop.stepsdone.add('changesets')
620 pushop.stepsdone.add('changesets')
621 # Send known heads to the server for race detection.
621 # Send known heads to the server for race detection.
622 if not _pushcheckoutgoing(pushop):
622 if not _pushcheckoutgoing(pushop):
623 return
623 return
624 pushop.repo.prepushoutgoinghooks(pushop.repo,
624 pushop.repo.prepushoutgoinghooks(pushop.repo,
625 pushop.remote,
625 pushop.remote,
626 pushop.outgoing)
626 pushop.outgoing)
627
627
628 _pushb2ctxcheckheads(pushop, bundler)
628 _pushb2ctxcheckheads(pushop, bundler)
629
629
630 b2caps = bundle2.bundle2caps(pushop.remote)
630 b2caps = bundle2.bundle2caps(pushop.remote)
631 version = None
631 version = None
632 cgversions = b2caps.get('changegroup')
632 cgversions = b2caps.get('changegroup')
633 if not cgversions: # 3.1 and 3.2 ship with an empty value
633 if not cgversions: # 3.1 and 3.2 ship with an empty value
634 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
634 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
635 pushop.outgoing)
635 pushop.outgoing)
636 else:
636 else:
637 cgversions = [v for v in cgversions if v in changegroup.packermap]
637 cgversions = [v for v in cgversions if v in changegroup.packermap]
638 if not cgversions:
638 if not cgversions:
639 raise ValueError(_('no common changegroup version'))
639 raise ValueError(_('no common changegroup version'))
640 version = max(cgversions)
640 version = max(cgversions)
641 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
641 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
642 pushop.outgoing,
642 pushop.outgoing,
643 version=version)
643 version=version)
644 cgpart = bundler.newpart('changegroup', data=cg)
644 cgpart = bundler.newpart('changegroup', data=cg)
645 if version is not None:
645 if version is not None:
646 cgpart.addparam('version', version)
646 cgpart.addparam('version', version)
647 def handlereply(op):
647 def handlereply(op):
648 """extract addchangegroup returns from server reply"""
648 """extract addchangegroup returns from server reply"""
649 cgreplies = op.records.getreplies(cgpart.id)
649 cgreplies = op.records.getreplies(cgpart.id)
650 assert len(cgreplies['changegroup']) == 1
650 assert len(cgreplies['changegroup']) == 1
651 pushop.cgresult = cgreplies['changegroup'][0]['return']
651 pushop.cgresult = cgreplies['changegroup'][0]['return']
652 return handlereply
652 return handlereply
653
653
654 @b2partsgenerator('phase')
654 @b2partsgenerator('phase')
655 def _pushb2phases(pushop, bundler):
655 def _pushb2phases(pushop, bundler):
656 """handle phase push through bundle2"""
656 """handle phase push through bundle2"""
657 if 'phases' in pushop.stepsdone:
657 if 'phases' in pushop.stepsdone:
658 return
658 return
659 b2caps = bundle2.bundle2caps(pushop.remote)
659 b2caps = bundle2.bundle2caps(pushop.remote)
660 if not 'pushkey' in b2caps:
660 if not 'pushkey' in b2caps:
661 return
661 return
662 pushop.stepsdone.add('phases')
662 pushop.stepsdone.add('phases')
663 part2node = []
663 part2node = []
664
664
665 def handlefailure(pushop, exc):
665 def handlefailure(pushop, exc):
666 targetid = int(exc.partid)
666 targetid = int(exc.partid)
667 for partid, node in part2node:
667 for partid, node in part2node:
668 if partid == targetid:
668 if partid == targetid:
669 raise error.Abort(_('updating %s to public failed') % node)
669 raise error.Abort(_('updating %s to public failed') % node)
670
670
671 enc = pushkey.encode
671 enc = pushkey.encode
672 for newremotehead in pushop.outdatedphases:
672 for newremotehead in pushop.outdatedphases:
673 part = bundler.newpart('pushkey')
673 part = bundler.newpart('pushkey')
674 part.addparam('namespace', enc('phases'))
674 part.addparam('namespace', enc('phases'))
675 part.addparam('key', enc(newremotehead.hex()))
675 part.addparam('key', enc(newremotehead.hex()))
676 part.addparam('old', enc(str(phases.draft)))
676 part.addparam('old', enc(str(phases.draft)))
677 part.addparam('new', enc(str(phases.public)))
677 part.addparam('new', enc(str(phases.public)))
678 part2node.append((part.id, newremotehead))
678 part2node.append((part.id, newremotehead))
679 pushop.pkfailcb[part.id] = handlefailure
679 pushop.pkfailcb[part.id] = handlefailure
680
680
681 def handlereply(op):
681 def handlereply(op):
682 for partid, node in part2node:
682 for partid, node in part2node:
683 partrep = op.records.getreplies(partid)
683 partrep = op.records.getreplies(partid)
684 results = partrep['pushkey']
684 results = partrep['pushkey']
685 assert len(results) <= 1
685 assert len(results) <= 1
686 msg = None
686 msg = None
687 if not results:
687 if not results:
688 msg = _('server ignored update of %s to public!\n') % node
688 msg = _('server ignored update of %s to public!\n') % node
689 elif not int(results[0]['return']):
689 elif not int(results[0]['return']):
690 msg = _('updating %s to public failed!\n') % node
690 msg = _('updating %s to public failed!\n') % node
691 if msg is not None:
691 if msg is not None:
692 pushop.ui.warn(msg)
692 pushop.ui.warn(msg)
693 return handlereply
693 return handlereply
694
694
695 @b2partsgenerator('obsmarkers')
695 @b2partsgenerator('obsmarkers')
696 def _pushb2obsmarkers(pushop, bundler):
696 def _pushb2obsmarkers(pushop, bundler):
697 if 'obsmarkers' in pushop.stepsdone:
697 if 'obsmarkers' in pushop.stepsdone:
698 return
698 return
699 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
699 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
700 if obsolete.commonversion(remoteversions) is None:
700 if obsolete.commonversion(remoteversions) is None:
701 return
701 return
702 pushop.stepsdone.add('obsmarkers')
702 pushop.stepsdone.add('obsmarkers')
703 if pushop.outobsmarkers:
703 if pushop.outobsmarkers:
704 markers = sorted(pushop.outobsmarkers)
704 markers = sorted(pushop.outobsmarkers)
705 buildobsmarkerspart(bundler, markers)
705 buildobsmarkerspart(bundler, markers)
706
706
707 @b2partsgenerator('bookmarks')
707 @b2partsgenerator('bookmarks')
708 def _pushb2bookmarks(pushop, bundler):
708 def _pushb2bookmarks(pushop, bundler):
709 """handle bookmark push through bundle2"""
709 """handle bookmark push through bundle2"""
710 if 'bookmarks' in pushop.stepsdone:
710 if 'bookmarks' in pushop.stepsdone:
711 return
711 return
712 b2caps = bundle2.bundle2caps(pushop.remote)
712 b2caps = bundle2.bundle2caps(pushop.remote)
713 if 'pushkey' not in b2caps:
713 if 'pushkey' not in b2caps:
714 return
714 return
715 pushop.stepsdone.add('bookmarks')
715 pushop.stepsdone.add('bookmarks')
716 part2book = []
716 part2book = []
717 enc = pushkey.encode
717 enc = pushkey.encode
718
718
719 def handlefailure(pushop, exc):
719 def handlefailure(pushop, exc):
720 targetid = int(exc.partid)
720 targetid = int(exc.partid)
721 for partid, book, action in part2book:
721 for partid, book, action in part2book:
722 if partid == targetid:
722 if partid == targetid:
723 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
723 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
724 # we should not be called for part we did not generated
724 # we should not be called for part we did not generated
725 assert False
725 assert False
726
726
727 for book, old, new in pushop.outbookmarks:
727 for book, old, new in pushop.outbookmarks:
728 part = bundler.newpart('pushkey')
728 part = bundler.newpart('pushkey')
729 part.addparam('namespace', enc('bookmarks'))
729 part.addparam('namespace', enc('bookmarks'))
730 part.addparam('key', enc(book))
730 part.addparam('key', enc(book))
731 part.addparam('old', enc(old))
731 part.addparam('old', enc(old))
732 part.addparam('new', enc(new))
732 part.addparam('new', enc(new))
733 action = 'update'
733 action = 'update'
734 if not old:
734 if not old:
735 action = 'export'
735 action = 'export'
736 elif not new:
736 elif not new:
737 action = 'delete'
737 action = 'delete'
738 part2book.append((part.id, book, action))
738 part2book.append((part.id, book, action))
739 pushop.pkfailcb[part.id] = handlefailure
739 pushop.pkfailcb[part.id] = handlefailure
740
740
741 def handlereply(op):
741 def handlereply(op):
742 ui = pushop.ui
742 ui = pushop.ui
743 for partid, book, action in part2book:
743 for partid, book, action in part2book:
744 partrep = op.records.getreplies(partid)
744 partrep = op.records.getreplies(partid)
745 results = partrep['pushkey']
745 results = partrep['pushkey']
746 assert len(results) <= 1
746 assert len(results) <= 1
747 if not results:
747 if not results:
748 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
748 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
749 else:
749 else:
750 ret = int(results[0]['return'])
750 ret = int(results[0]['return'])
751 if ret:
751 if ret:
752 ui.status(bookmsgmap[action][0] % book)
752 ui.status(bookmsgmap[action][0] % book)
753 else:
753 else:
754 ui.warn(bookmsgmap[action][1] % book)
754 ui.warn(bookmsgmap[action][1] % book)
755 if pushop.bkresult is not None:
755 if pushop.bkresult is not None:
756 pushop.bkresult = 1
756 pushop.bkresult = 1
757 return handlereply
757 return handlereply
758
758
759
759
760 def _pushbundle2(pushop):
760 def _pushbundle2(pushop):
761 """push data to the remote using bundle2
761 """push data to the remote using bundle2
762
762
763 The only currently supported type of data is changegroup but this will
763 The only currently supported type of data is changegroup but this will
764 evolve in the future."""
764 evolve in the future."""
765 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
765 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
766 pushback = (pushop.trmanager
766 pushback = (pushop.trmanager
767 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
767 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
768
768
769 # create reply capability
769 # create reply capability
770 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
770 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
771 allowpushback=pushback))
771 allowpushback=pushback))
772 bundler.newpart('replycaps', data=capsblob)
772 bundler.newpart('replycaps', data=capsblob)
773 replyhandlers = []
773 replyhandlers = []
774 for partgenname in b2partsgenorder:
774 for partgenname in b2partsgenorder:
775 partgen = b2partsgenmapping[partgenname]
775 partgen = b2partsgenmapping[partgenname]
776 ret = partgen(pushop, bundler)
776 ret = partgen(pushop, bundler)
777 if callable(ret):
777 if callable(ret):
778 replyhandlers.append(ret)
778 replyhandlers.append(ret)
779 # do not push if nothing to push
779 # do not push if nothing to push
780 if bundler.nbparts <= 1:
780 if bundler.nbparts <= 1:
781 return
781 return
782 stream = util.chunkbuffer(bundler.getchunks())
782 stream = util.chunkbuffer(bundler.getchunks())
783 try:
783 try:
784 try:
784 try:
785 reply = pushop.remote.unbundle(stream, ['force'], 'push')
785 reply = pushop.remote.unbundle(stream, ['force'], 'push')
786 except error.BundleValueError as exc:
786 except error.BundleValueError as exc:
787 raise error.Abort('missing support for %s' % exc)
787 raise error.Abort('missing support for %s' % exc)
788 try:
788 try:
789 trgetter = None
789 trgetter = None
790 if pushback:
790 if pushback:
791 trgetter = pushop.trmanager.transaction
791 trgetter = pushop.trmanager.transaction
792 op = bundle2.processbundle(pushop.repo, reply, trgetter)
792 op = bundle2.processbundle(pushop.repo, reply, trgetter)
793 except error.BundleValueError as exc:
793 except error.BundleValueError as exc:
794 raise error.Abort('missing support for %s' % exc)
794 raise error.Abort('missing support for %s' % exc)
795 except bundle2.AbortFromPart as exc:
795 except bundle2.AbortFromPart as exc:
796 pushop.ui.status(_('remote: %s\n') % exc)
796 pushop.ui.status(_('remote: %s\n') % exc)
797 raise error.Abort(_('push failed on remote'), hint=exc.hint)
797 raise error.Abort(_('push failed on remote'), hint=exc.hint)
798 except error.PushkeyFailed as exc:
798 except error.PushkeyFailed as exc:
799 partid = int(exc.partid)
799 partid = int(exc.partid)
800 if partid not in pushop.pkfailcb:
800 if partid not in pushop.pkfailcb:
801 raise
801 raise
802 pushop.pkfailcb[partid](pushop, exc)
802 pushop.pkfailcb[partid](pushop, exc)
803 for rephand in replyhandlers:
803 for rephand in replyhandlers:
804 rephand(op)
804 rephand(op)
805
805
806 def _pushchangeset(pushop):
806 def _pushchangeset(pushop):
807 """Make the actual push of changeset bundle to remote repo"""
807 """Make the actual push of changeset bundle to remote repo"""
808 if 'changesets' in pushop.stepsdone:
808 if 'changesets' in pushop.stepsdone:
809 return
809 return
810 pushop.stepsdone.add('changesets')
810 pushop.stepsdone.add('changesets')
811 if not _pushcheckoutgoing(pushop):
811 if not _pushcheckoutgoing(pushop):
812 return
812 return
813 pushop.repo.prepushoutgoinghooks(pushop.repo,
813 pushop.repo.prepushoutgoinghooks(pushop.repo,
814 pushop.remote,
814 pushop.remote,
815 pushop.outgoing)
815 pushop.outgoing)
816 outgoing = pushop.outgoing
816 outgoing = pushop.outgoing
817 unbundle = pushop.remote.capable('unbundle')
817 unbundle = pushop.remote.capable('unbundle')
818 # TODO: get bundlecaps from remote
818 # TODO: get bundlecaps from remote
819 bundlecaps = None
819 bundlecaps = None
820 # create a changegroup from local
820 # create a changegroup from local
821 if pushop.revs is None and not (outgoing.excluded
821 if pushop.revs is None and not (outgoing.excluded
822 or pushop.repo.changelog.filteredrevs):
822 or pushop.repo.changelog.filteredrevs):
823 # push everything,
823 # push everything,
824 # use the fast path, no race possible on push
824 # use the fast path, no race possible on push
825 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
825 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
826 cg = changegroup.getsubset(pushop.repo,
826 cg = changegroup.getsubset(pushop.repo,
827 outgoing,
827 outgoing,
828 bundler,
828 bundler,
829 'push',
829 'push',
830 fastpath=True)
830 fastpath=True)
831 else:
831 else:
832 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
832 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
833 bundlecaps)
833 bundlecaps)
834
834
835 # apply changegroup to remote
835 # apply changegroup to remote
836 if unbundle:
836 if unbundle:
837 # local repo finds heads on server, finds out what
837 # local repo finds heads on server, finds out what
838 # revs it must push. once revs transferred, if server
838 # revs it must push. once revs transferred, if server
839 # finds it has different heads (someone else won
839 # finds it has different heads (someone else won
840 # commit/push race), server aborts.
840 # commit/push race), server aborts.
841 if pushop.force:
841 if pushop.force:
842 remoteheads = ['force']
842 remoteheads = ['force']
843 else:
843 else:
844 remoteheads = pushop.remoteheads
844 remoteheads = pushop.remoteheads
845 # ssh: return remote's addchangegroup()
845 # ssh: return remote's addchangegroup()
846 # http: return remote's addchangegroup() or 0 for error
846 # http: return remote's addchangegroup() or 0 for error
847 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
847 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
848 pushop.repo.url())
848 pushop.repo.url())
849 else:
849 else:
850 # we return an integer indicating remote head count
850 # we return an integer indicating remote head count
851 # change
851 # change
852 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
852 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
853 pushop.repo.url())
853 pushop.repo.url())
854
854
855 def _pushsyncphase(pushop):
855 def _pushsyncphase(pushop):
856 """synchronise phase information locally and remotely"""
856 """synchronise phase information locally and remotely"""
857 cheads = pushop.commonheads
857 cheads = pushop.commonheads
858 # even when we don't push, exchanging phase data is useful
858 # even when we don't push, exchanging phase data is useful
859 remotephases = pushop.remote.listkeys('phases')
859 remotephases = pushop.remote.listkeys('phases')
860 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
860 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
861 and remotephases # server supports phases
861 and remotephases # server supports phases
862 and pushop.cgresult is None # nothing was pushed
862 and pushop.cgresult is None # nothing was pushed
863 and remotephases.get('publishing', False)):
863 and remotephases.get('publishing', False)):
864 # When:
864 # When:
865 # - this is a subrepo push
865 # - this is a subrepo push
866 # - and remote support phase
866 # - and remote support phase
867 # - and no changeset was pushed
867 # - and no changeset was pushed
868 # - and remote is publishing
868 # - and remote is publishing
869 # We may be in issue 3871 case!
869 # We may be in issue 3871 case!
870 # We drop the possible phase synchronisation done by
870 # We drop the possible phase synchronisation done by
871 # courtesy to publish changesets possibly locally draft
871 # courtesy to publish changesets possibly locally draft
872 # on the remote.
872 # on the remote.
873 remotephases = {'publishing': 'True'}
873 remotephases = {'publishing': 'True'}
874 if not remotephases: # old server or public only reply from non-publishing
874 if not remotephases: # old server or public only reply from non-publishing
875 _localphasemove(pushop, cheads)
875 _localphasemove(pushop, cheads)
876 # don't push any phase data as there is nothing to push
876 # don't push any phase data as there is nothing to push
877 else:
877 else:
878 ana = phases.analyzeremotephases(pushop.repo, cheads,
878 ana = phases.analyzeremotephases(pushop.repo, cheads,
879 remotephases)
879 remotephases)
880 pheads, droots = ana
880 pheads, droots = ana
881 ### Apply remote phase on local
881 ### Apply remote phase on local
882 if remotephases.get('publishing', False):
882 if remotephases.get('publishing', False):
883 _localphasemove(pushop, cheads)
883 _localphasemove(pushop, cheads)
884 else: # publish = False
884 else: # publish = False
885 _localphasemove(pushop, pheads)
885 _localphasemove(pushop, pheads)
886 _localphasemove(pushop, cheads, phases.draft)
886 _localphasemove(pushop, cheads, phases.draft)
887 ### Apply local phase on remote
887 ### Apply local phase on remote
888
888
889 if pushop.cgresult:
889 if pushop.cgresult:
890 if 'phases' in pushop.stepsdone:
890 if 'phases' in pushop.stepsdone:
891 # phases already pushed though bundle2
891 # phases already pushed though bundle2
892 return
892 return
893 outdated = pushop.outdatedphases
893 outdated = pushop.outdatedphases
894 else:
894 else:
895 outdated = pushop.fallbackoutdatedphases
895 outdated = pushop.fallbackoutdatedphases
896
896
897 pushop.stepsdone.add('phases')
897 pushop.stepsdone.add('phases')
898
898
899 # filter heads already turned public by the push
899 # filter heads already turned public by the push
900 outdated = [c for c in outdated if c.node() not in pheads]
900 outdated = [c for c in outdated if c.node() not in pheads]
901 # fallback to independent pushkey command
901 # fallback to independent pushkey command
902 for newremotehead in outdated:
902 for newremotehead in outdated:
903 r = pushop.remote.pushkey('phases',
903 r = pushop.remote.pushkey('phases',
904 newremotehead.hex(),
904 newremotehead.hex(),
905 str(phases.draft),
905 str(phases.draft),
906 str(phases.public))
906 str(phases.public))
907 if not r:
907 if not r:
908 pushop.ui.warn(_('updating %s to public failed!\n')
908 pushop.ui.warn(_('updating %s to public failed!\n')
909 % newremotehead)
909 % newremotehead)
910
910
911 def _localphasemove(pushop, nodes, phase=phases.public):
911 def _localphasemove(pushop, nodes, phase=phases.public):
912 """move <nodes> to <phase> in the local source repo"""
912 """move <nodes> to <phase> in the local source repo"""
913 if pushop.trmanager:
913 if pushop.trmanager:
914 phases.advanceboundary(pushop.repo,
914 phases.advanceboundary(pushop.repo,
915 pushop.trmanager.transaction(),
915 pushop.trmanager.transaction(),
916 phase,
916 phase,
917 nodes)
917 nodes)
918 else:
918 else:
919 # repo is not locked, do not change any phases!
919 # repo is not locked, do not change any phases!
920 # Informs the user that phases should have been moved when
920 # Informs the user that phases should have been moved when
921 # applicable.
921 # applicable.
922 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
922 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
923 phasestr = phases.phasenames[phase]
923 phasestr = phases.phasenames[phase]
924 if actualmoves:
924 if actualmoves:
925 pushop.ui.status(_('cannot lock source repo, skipping '
925 pushop.ui.status(_('cannot lock source repo, skipping '
926 'local %s phase update\n') % phasestr)
926 'local %s phase update\n') % phasestr)
927
927
928 def _pushobsolete(pushop):
928 def _pushobsolete(pushop):
929 """utility function to push obsolete markers to a remote"""
929 """utility function to push obsolete markers to a remote"""
930 if 'obsmarkers' in pushop.stepsdone:
930 if 'obsmarkers' in pushop.stepsdone:
931 return
931 return
932 repo = pushop.repo
932 repo = pushop.repo
933 remote = pushop.remote
933 remote = pushop.remote
934 pushop.stepsdone.add('obsmarkers')
934 pushop.stepsdone.add('obsmarkers')
935 if pushop.outobsmarkers:
935 if pushop.outobsmarkers:
936 pushop.ui.debug('try to push obsolete markers to remote\n')
936 pushop.ui.debug('try to push obsolete markers to remote\n')
937 rslts = []
937 rslts = []
938 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
938 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
939 for key in sorted(remotedata, reverse=True):
939 for key in sorted(remotedata, reverse=True):
940 # reverse sort to ensure we end with dump0
940 # reverse sort to ensure we end with dump0
941 data = remotedata[key]
941 data = remotedata[key]
942 rslts.append(remote.pushkey('obsolete', key, '', data))
942 rslts.append(remote.pushkey('obsolete', key, '', data))
943 if [r for r in rslts if not r]:
943 if [r for r in rslts if not r]:
944 msg = _('failed to push some obsolete markers!\n')
944 msg = _('failed to push some obsolete markers!\n')
945 repo.ui.warn(msg)
945 repo.ui.warn(msg)
946
946
947 def _pushbookmark(pushop):
947 def _pushbookmark(pushop):
948 """Update bookmark position on remote"""
948 """Update bookmark position on remote"""
949 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
949 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
950 return
950 return
951 pushop.stepsdone.add('bookmarks')
951 pushop.stepsdone.add('bookmarks')
952 ui = pushop.ui
952 ui = pushop.ui
953 remote = pushop.remote
953 remote = pushop.remote
954
954
955 for b, old, new in pushop.outbookmarks:
955 for b, old, new in pushop.outbookmarks:
956 action = 'update'
956 action = 'update'
957 if not old:
957 if not old:
958 action = 'export'
958 action = 'export'
959 elif not new:
959 elif not new:
960 action = 'delete'
960 action = 'delete'
961 if remote.pushkey('bookmarks', b, old, new):
961 if remote.pushkey('bookmarks', b, old, new):
962 ui.status(bookmsgmap[action][0] % b)
962 ui.status(bookmsgmap[action][0] % b)
963 else:
963 else:
964 ui.warn(bookmsgmap[action][1] % b)
964 ui.warn(bookmsgmap[action][1] % b)
965 # discovery can have set the value form invalid entry
965 # discovery can have set the value form invalid entry
966 if pushop.bkresult is not None:
966 if pushop.bkresult is not None:
967 pushop.bkresult = 1
967 pushop.bkresult = 1
968
968
969 class pulloperation(object):
969 class pulloperation(object):
970 """A object that represent a single pull operation
970 """A object that represent a single pull operation
971
971
972 It purpose is to carry pull related state and very common operation.
972 It purpose is to carry pull related state and very common operation.
973
973
974 A new should be created at the beginning of each pull and discarded
974 A new should be created at the beginning of each pull and discarded
975 afterward.
975 afterward.
976 """
976 """
977
977
978 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
978 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
979 remotebookmarks=None, streamclonerequested=None):
979 remotebookmarks=None, streamclonerequested=None):
980 # repo we pull into
980 # repo we pull into
981 self.repo = repo
981 self.repo = repo
982 # repo we pull from
982 # repo we pull from
983 self.remote = remote
983 self.remote = remote
984 # revision we try to pull (None is "all")
984 # revision we try to pull (None is "all")
985 self.heads = heads
985 self.heads = heads
986 # bookmark pulled explicitly
986 # bookmark pulled explicitly
987 self.explicitbookmarks = bookmarks
987 self.explicitbookmarks = bookmarks
988 # do we force pull?
988 # do we force pull?
989 self.force = force
989 self.force = force
990 # whether a streaming clone was requested
990 # whether a streaming clone was requested
991 self.streamclonerequested = streamclonerequested
991 self.streamclonerequested = streamclonerequested
992 # transaction manager
992 # transaction manager
993 self.trmanager = None
993 self.trmanager = None
994 # set of common changeset between local and remote before pull
994 # set of common changeset between local and remote before pull
995 self.common = None
995 self.common = None
996 # set of pulled head
996 # set of pulled head
997 self.rheads = None
997 self.rheads = None
998 # list of missing changeset to fetch remotely
998 # list of missing changeset to fetch remotely
999 self.fetch = None
999 self.fetch = None
1000 # remote bookmarks data
1000 # remote bookmarks data
1001 self.remotebookmarks = remotebookmarks
1001 self.remotebookmarks = remotebookmarks
1002 # result of changegroup pulling (used as return code by pull)
1002 # result of changegroup pulling (used as return code by pull)
1003 self.cgresult = None
1003 self.cgresult = None
1004 # list of step already done
1004 # list of step already done
1005 self.stepsdone = set()
1005 self.stepsdone = set()
1006 # Whether we attempted a clone from pre-generated bundles.
1006 # Whether we attempted a clone from pre-generated bundles.
1007 self.clonebundleattempted = False
1007 self.clonebundleattempted = False
1008
1008
1009 @util.propertycache
1009 @util.propertycache
1010 def pulledsubset(self):
1010 def pulledsubset(self):
1011 """heads of the set of changeset target by the pull"""
1011 """heads of the set of changeset target by the pull"""
1012 # compute target subset
1012 # compute target subset
1013 if self.heads is None:
1013 if self.heads is None:
1014 # We pulled every thing possible
1014 # We pulled every thing possible
1015 # sync on everything common
1015 # sync on everything common
1016 c = set(self.common)
1016 c = set(self.common)
1017 ret = list(self.common)
1017 ret = list(self.common)
1018 for n in self.rheads:
1018 for n in self.rheads:
1019 if n not in c:
1019 if n not in c:
1020 ret.append(n)
1020 ret.append(n)
1021 return ret
1021 return ret
1022 else:
1022 else:
1023 # We pulled a specific subset
1023 # We pulled a specific subset
1024 # sync on this subset
1024 # sync on this subset
1025 return self.heads
1025 return self.heads
1026
1026
1027 @util.propertycache
1027 @util.propertycache
1028 def canusebundle2(self):
1028 def canusebundle2(self):
1029 return _canusebundle2(self)
1029 return _canusebundle2(self)
1030
1030
1031 @util.propertycache
1031 @util.propertycache
1032 def remotebundle2caps(self):
1032 def remotebundle2caps(self):
1033 return bundle2.bundle2caps(self.remote)
1033 return bundle2.bundle2caps(self.remote)
1034
1034
1035 def gettransaction(self):
1035 def gettransaction(self):
1036 # deprecated; talk to trmanager directly
1036 # deprecated; talk to trmanager directly
1037 return self.trmanager.transaction()
1037 return self.trmanager.transaction()
1038
1038
1039 class transactionmanager(object):
1039 class transactionmanager(object):
1040 """An object to manage the life cycle of a transaction
1040 """An object to manage the life cycle of a transaction
1041
1041
1042 It creates the transaction on demand and calls the appropriate hooks when
1042 It creates the transaction on demand and calls the appropriate hooks when
1043 closing the transaction."""
1043 closing the transaction."""
1044 def __init__(self, repo, source, url):
1044 def __init__(self, repo, source, url):
1045 self.repo = repo
1045 self.repo = repo
1046 self.source = source
1046 self.source = source
1047 self.url = url
1047 self.url = url
1048 self._tr = None
1048 self._tr = None
1049
1049
1050 def transaction(self):
1050 def transaction(self):
1051 """Return an open transaction object, constructing if necessary"""
1051 """Return an open transaction object, constructing if necessary"""
1052 if not self._tr:
1052 if not self._tr:
1053 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1053 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1054 self._tr = self.repo.transaction(trname)
1054 self._tr = self.repo.transaction(trname)
1055 self._tr.hookargs['source'] = self.source
1055 self._tr.hookargs['source'] = self.source
1056 self._tr.hookargs['url'] = self.url
1056 self._tr.hookargs['url'] = self.url
1057 return self._tr
1057 return self._tr
1058
1058
1059 def close(self):
1059 def close(self):
1060 """close transaction if created"""
1060 """close transaction if created"""
1061 if self._tr is not None:
1061 if self._tr is not None:
1062 self._tr.close()
1062 self._tr.close()
1063
1063
1064 def release(self):
1064 def release(self):
1065 """release transaction if created"""
1065 """release transaction if created"""
1066 if self._tr is not None:
1066 if self._tr is not None:
1067 self._tr.release()
1067 self._tr.release()
1068
1068
1069 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1069 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1070 streamclonerequested=None):
1070 streamclonerequested=None):
1071 """Fetch repository data from a remote.
1071 """Fetch repository data from a remote.
1072
1072
1073 This is the main function used to retrieve data from a remote repository.
1073 This is the main function used to retrieve data from a remote repository.
1074
1074
1075 ``repo`` is the local repository to clone into.
1075 ``repo`` is the local repository to clone into.
1076 ``remote`` is a peer instance.
1076 ``remote`` is a peer instance.
1077 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1077 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1078 default) means to pull everything from the remote.
1078 default) means to pull everything from the remote.
1079 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1079 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1080 default, all remote bookmarks are pulled.
1080 default, all remote bookmarks are pulled.
1081 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1081 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1082 initialization.
1082 initialization.
1083 ``streamclonerequested`` is a boolean indicating whether a "streaming
1083 ``streamclonerequested`` is a boolean indicating whether a "streaming
1084 clone" is requested. A "streaming clone" is essentially a raw file copy
1084 clone" is requested. A "streaming clone" is essentially a raw file copy
1085 of revlogs from the server. This only works when the local repository is
1085 of revlogs from the server. This only works when the local repository is
1086 empty. The default value of ``None`` means to respect the server
1086 empty. The default value of ``None`` means to respect the server
1087 configuration for preferring stream clones.
1087 configuration for preferring stream clones.
1088
1088
1089 Returns the ``pulloperation`` created for this pull.
1089 Returns the ``pulloperation`` created for this pull.
1090 """
1090 """
1091 if opargs is None:
1091 if opargs is None:
1092 opargs = {}
1092 opargs = {}
1093 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1093 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1094 streamclonerequested=streamclonerequested, **opargs)
1094 streamclonerequested=streamclonerequested, **opargs)
1095 if pullop.remote.local():
1095 if pullop.remote.local():
1096 missing = set(pullop.remote.requirements) - pullop.repo.supported
1096 missing = set(pullop.remote.requirements) - pullop.repo.supported
1097 if missing:
1097 if missing:
1098 msg = _("required features are not"
1098 msg = _("required features are not"
1099 " supported in the destination:"
1099 " supported in the destination:"
1100 " %s") % (', '.join(sorted(missing)))
1100 " %s") % (', '.join(sorted(missing)))
1101 raise error.Abort(msg)
1101 raise error.Abort(msg)
1102
1102
1103 lock = pullop.repo.lock()
1103 lock = pullop.repo.lock()
1104 try:
1104 try:
1105 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1105 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1106 streamclone.maybeperformlegacystreamclone(pullop)
1106 streamclone.maybeperformlegacystreamclone(pullop)
1107 # This should ideally be in _pullbundle2(). However, it needs to run
1107 # This should ideally be in _pullbundle2(). However, it needs to run
1108 # before discovery to avoid extra work.
1108 # before discovery to avoid extra work.
1109 _maybeapplyclonebundle(pullop)
1109 _maybeapplyclonebundle(pullop)
1110 _pulldiscovery(pullop)
1110 _pulldiscovery(pullop)
1111 if pullop.canusebundle2:
1111 if pullop.canusebundle2:
1112 _pullbundle2(pullop)
1112 _pullbundle2(pullop)
1113 _pullchangeset(pullop)
1113 _pullchangeset(pullop)
1114 _pullphase(pullop)
1114 _pullphase(pullop)
1115 _pullbookmarks(pullop)
1115 _pullbookmarks(pullop)
1116 _pullobsolete(pullop)
1116 _pullobsolete(pullop)
1117 pullop.trmanager.close()
1117 pullop.trmanager.close()
1118 finally:
1118 finally:
1119 pullop.trmanager.release()
1119 pullop.trmanager.release()
1120 lock.release()
1120 lock.release()
1121
1121
1122 return pullop
1122 return pullop
1123
1123
1124 # list of steps to perform discovery before pull
1124 # list of steps to perform discovery before pull
1125 pulldiscoveryorder = []
1125 pulldiscoveryorder = []
1126
1126
1127 # Mapping between step name and function
1127 # Mapping between step name and function
1128 #
1128 #
1129 # This exists to help extensions wrap steps if necessary
1129 # This exists to help extensions wrap steps if necessary
1130 pulldiscoverymapping = {}
1130 pulldiscoverymapping = {}
1131
1131
1132 def pulldiscovery(stepname):
1132 def pulldiscovery(stepname):
1133 """decorator for function performing discovery before pull
1133 """decorator for function performing discovery before pull
1134
1134
1135 The function is added to the step -> function mapping and appended to the
1135 The function is added to the step -> function mapping and appended to the
1136 list of steps. Beware that decorated function will be added in order (this
1136 list of steps. Beware that decorated function will be added in order (this
1137 may matter).
1137 may matter).
1138
1138
1139 You can only use this decorator for a new step, if you want to wrap a step
1139 You can only use this decorator for a new step, if you want to wrap a step
1140 from an extension, change the pulldiscovery dictionary directly."""
1140 from an extension, change the pulldiscovery dictionary directly."""
1141 def dec(func):
1141 def dec(func):
1142 assert stepname not in pulldiscoverymapping
1142 assert stepname not in pulldiscoverymapping
1143 pulldiscoverymapping[stepname] = func
1143 pulldiscoverymapping[stepname] = func
1144 pulldiscoveryorder.append(stepname)
1144 pulldiscoveryorder.append(stepname)
1145 return func
1145 return func
1146 return dec
1146 return dec
1147
1147
1148 def _pulldiscovery(pullop):
1148 def _pulldiscovery(pullop):
1149 """Run all discovery steps"""
1149 """Run all discovery steps"""
1150 for stepname in pulldiscoveryorder:
1150 for stepname in pulldiscoveryorder:
1151 step = pulldiscoverymapping[stepname]
1151 step = pulldiscoverymapping[stepname]
1152 step(pullop)
1152 step(pullop)
1153
1153
1154 @pulldiscovery('b1:bookmarks')
1154 @pulldiscovery('b1:bookmarks')
1155 def _pullbookmarkbundle1(pullop):
1155 def _pullbookmarkbundle1(pullop):
1156 """fetch bookmark data in bundle1 case
1156 """fetch bookmark data in bundle1 case
1157
1157
1158 If not using bundle2, we have to fetch bookmarks before changeset
1158 If not using bundle2, we have to fetch bookmarks before changeset
1159 discovery to reduce the chance and impact of race conditions."""
1159 discovery to reduce the chance and impact of race conditions."""
1160 if pullop.remotebookmarks is not None:
1160 if pullop.remotebookmarks is not None:
1161 return
1161 return
1162 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1162 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1163 # all known bundle2 servers now support listkeys, but lets be nice with
1163 # all known bundle2 servers now support listkeys, but lets be nice with
1164 # new implementation.
1164 # new implementation.
1165 return
1165 return
1166 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1166 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1167
1167
1168
1168
1169 @pulldiscovery('changegroup')
1169 @pulldiscovery('changegroup')
1170 def _pulldiscoverychangegroup(pullop):
1170 def _pulldiscoverychangegroup(pullop):
1171 """discovery phase for the pull
1171 """discovery phase for the pull
1172
1172
1173 Current handle changeset discovery only, will change handle all discovery
1173 Current handle changeset discovery only, will change handle all discovery
1174 at some point."""
1174 at some point."""
1175 tmp = discovery.findcommonincoming(pullop.repo,
1175 tmp = discovery.findcommonincoming(pullop.repo,
1176 pullop.remote,
1176 pullop.remote,
1177 heads=pullop.heads,
1177 heads=pullop.heads,
1178 force=pullop.force)
1178 force=pullop.force)
1179 common, fetch, rheads = tmp
1179 common, fetch, rheads = tmp
1180 nm = pullop.repo.unfiltered().changelog.nodemap
1180 nm = pullop.repo.unfiltered().changelog.nodemap
1181 if fetch and rheads:
1181 if fetch and rheads:
1182 # If a remote heads in filtered locally, lets drop it from the unknown
1182 # If a remote heads in filtered locally, lets drop it from the unknown
1183 # remote heads and put in back in common.
1183 # remote heads and put in back in common.
1184 #
1184 #
1185 # This is a hackish solution to catch most of "common but locally
1185 # This is a hackish solution to catch most of "common but locally
1186 # hidden situation". We do not performs discovery on unfiltered
1186 # hidden situation". We do not performs discovery on unfiltered
1187 # repository because it end up doing a pathological amount of round
1187 # repository because it end up doing a pathological amount of round
1188 # trip for w huge amount of changeset we do not care about.
1188 # trip for w huge amount of changeset we do not care about.
1189 #
1189 #
1190 # If a set of such "common but filtered" changeset exist on the server
1190 # If a set of such "common but filtered" changeset exist on the server
1191 # but are not including a remote heads, we'll not be able to detect it,
1191 # but are not including a remote heads, we'll not be able to detect it,
1192 scommon = set(common)
1192 scommon = set(common)
1193 filteredrheads = []
1193 filteredrheads = []
1194 for n in rheads:
1194 for n in rheads:
1195 if n in nm:
1195 if n in nm:
1196 if n not in scommon:
1196 if n not in scommon:
1197 common.append(n)
1197 common.append(n)
1198 else:
1198 else:
1199 filteredrheads.append(n)
1199 filteredrheads.append(n)
1200 if not filteredrheads:
1200 if not filteredrheads:
1201 fetch = []
1201 fetch = []
1202 rheads = filteredrheads
1202 rheads = filteredrheads
1203 pullop.common = common
1203 pullop.common = common
1204 pullop.fetch = fetch
1204 pullop.fetch = fetch
1205 pullop.rheads = rheads
1205 pullop.rheads = rheads
1206
1206
1207 def _pullbundle2(pullop):
1207 def _pullbundle2(pullop):
1208 """pull data using bundle2
1208 """pull data using bundle2
1209
1209
1210 For now, the only supported data are changegroup."""
1210 For now, the only supported data are changegroup."""
1211 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1211 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1212
1212
1213 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1213 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1214
1214
1215 # pulling changegroup
1215 # pulling changegroup
1216 pullop.stepsdone.add('changegroup')
1216 pullop.stepsdone.add('changegroup')
1217
1217
1218 kwargs['common'] = pullop.common
1218 kwargs['common'] = pullop.common
1219 kwargs['heads'] = pullop.heads or pullop.rheads
1219 kwargs['heads'] = pullop.heads or pullop.rheads
1220 kwargs['cg'] = pullop.fetch
1220 kwargs['cg'] = pullop.fetch
1221 if 'listkeys' in pullop.remotebundle2caps:
1221 if 'listkeys' in pullop.remotebundle2caps:
1222 kwargs['listkeys'] = ['phase']
1222 kwargs['listkeys'] = ['phase']
1223 if pullop.remotebookmarks is None:
1223 if pullop.remotebookmarks is None:
1224 # make sure to always includes bookmark data when migrating
1224 # make sure to always includes bookmark data when migrating
1225 # `hg incoming --bundle` to using this function.
1225 # `hg incoming --bundle` to using this function.
1226 kwargs['listkeys'].append('bookmarks')
1226 kwargs['listkeys'].append('bookmarks')
1227
1227
1228 # If this is a full pull / clone and the server supports the clone bundles
1228 # If this is a full pull / clone and the server supports the clone bundles
1229 # feature, tell the server whether we attempted a clone bundle. The
1229 # feature, tell the server whether we attempted a clone bundle. The
1230 # presence of this flag indicates the client supports clone bundles. This
1230 # presence of this flag indicates the client supports clone bundles. This
1231 # will enable the server to treat clients that support clone bundles
1231 # will enable the server to treat clients that support clone bundles
1232 # differently from those that don't.
1232 # differently from those that don't.
1233 if (pullop.remote.capable('clonebundles')
1233 if (pullop.remote.capable('clonebundles')
1234 and pullop.heads is None and list(pullop.common) == [nullid]):
1234 and pullop.heads is None and list(pullop.common) == [nullid]):
1235 kwargs['cbattempted'] = pullop.clonebundleattempted
1235 kwargs['cbattempted'] = pullop.clonebundleattempted
1236
1236
1237 if streaming:
1237 if streaming:
1238 pullop.repo.ui.status(_('streaming all changes\n'))
1238 pullop.repo.ui.status(_('streaming all changes\n'))
1239 elif not pullop.fetch:
1239 elif not pullop.fetch:
1240 pullop.repo.ui.status(_("no changes found\n"))
1240 pullop.repo.ui.status(_("no changes found\n"))
1241 pullop.cgresult = 0
1241 pullop.cgresult = 0
1242 else:
1242 else:
1243 if pullop.heads is None and list(pullop.common) == [nullid]:
1243 if pullop.heads is None and list(pullop.common) == [nullid]:
1244 pullop.repo.ui.status(_("requesting all changes\n"))
1244 pullop.repo.ui.status(_("requesting all changes\n"))
1245 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1245 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1246 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1246 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1247 if obsolete.commonversion(remoteversions) is not None:
1247 if obsolete.commonversion(remoteversions) is not None:
1248 kwargs['obsmarkers'] = True
1248 kwargs['obsmarkers'] = True
1249 pullop.stepsdone.add('obsmarkers')
1249 pullop.stepsdone.add('obsmarkers')
1250 _pullbundle2extraprepare(pullop, kwargs)
1250 _pullbundle2extraprepare(pullop, kwargs)
1251 bundle = pullop.remote.getbundle('pull', **kwargs)
1251 bundle = pullop.remote.getbundle('pull', **kwargs)
1252 try:
1252 try:
1253 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1253 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1254 except error.BundleValueError as exc:
1254 except error.BundleValueError as exc:
1255 raise error.Abort('missing support for %s' % exc)
1255 raise error.Abort('missing support for %s' % exc)
1256
1256
1257 if pullop.fetch:
1257 if pullop.fetch:
1258 results = [cg['return'] for cg in op.records['changegroup']]
1258 results = [cg['return'] for cg in op.records['changegroup']]
1259 pullop.cgresult = changegroup.combineresults(results)
1259 pullop.cgresult = changegroup.combineresults(results)
1260
1260
1261 # processing phases change
1261 # processing phases change
1262 for namespace, value in op.records['listkeys']:
1262 for namespace, value in op.records['listkeys']:
1263 if namespace == 'phases':
1263 if namespace == 'phases':
1264 _pullapplyphases(pullop, value)
1264 _pullapplyphases(pullop, value)
1265
1265
1266 # processing bookmark update
1266 # processing bookmark update
1267 for namespace, value in op.records['listkeys']:
1267 for namespace, value in op.records['listkeys']:
1268 if namespace == 'bookmarks':
1268 if namespace == 'bookmarks':
1269 pullop.remotebookmarks = value
1269 pullop.remotebookmarks = value
1270
1270
1271 # bookmark data were either already there or pulled in the bundle
1271 # bookmark data were either already there or pulled in the bundle
1272 if pullop.remotebookmarks is not None:
1272 if pullop.remotebookmarks is not None:
1273 _pullbookmarks(pullop)
1273 _pullbookmarks(pullop)
1274
1274
1275 def _pullbundle2extraprepare(pullop, kwargs):
1275 def _pullbundle2extraprepare(pullop, kwargs):
1276 """hook function so that extensions can extend the getbundle call"""
1276 """hook function so that extensions can extend the getbundle call"""
1277 pass
1277 pass
1278
1278
1279 def _pullchangeset(pullop):
1279 def _pullchangeset(pullop):
1280 """pull changeset from unbundle into the local repo"""
1280 """pull changeset from unbundle into the local repo"""
1281 # We delay the open of the transaction as late as possible so we
1281 # We delay the open of the transaction as late as possible so we
1282 # don't open transaction for nothing or you break future useful
1282 # don't open transaction for nothing or you break future useful
1283 # rollback call
1283 # rollback call
1284 if 'changegroup' in pullop.stepsdone:
1284 if 'changegroup' in pullop.stepsdone:
1285 return
1285 return
1286 pullop.stepsdone.add('changegroup')
1286 pullop.stepsdone.add('changegroup')
1287 if not pullop.fetch:
1287 if not pullop.fetch:
1288 pullop.repo.ui.status(_("no changes found\n"))
1288 pullop.repo.ui.status(_("no changes found\n"))
1289 pullop.cgresult = 0
1289 pullop.cgresult = 0
1290 return
1290 return
1291 pullop.gettransaction()
1291 pullop.gettransaction()
1292 if pullop.heads is None and list(pullop.common) == [nullid]:
1292 if pullop.heads is None and list(pullop.common) == [nullid]:
1293 pullop.repo.ui.status(_("requesting all changes\n"))
1293 pullop.repo.ui.status(_("requesting all changes\n"))
1294 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1294 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1295 # issue1320, avoid a race if remote changed after discovery
1295 # issue1320, avoid a race if remote changed after discovery
1296 pullop.heads = pullop.rheads
1296 pullop.heads = pullop.rheads
1297
1297
1298 if pullop.remote.capable('getbundle'):
1298 if pullop.remote.capable('getbundle'):
1299 # TODO: get bundlecaps from remote
1299 # TODO: get bundlecaps from remote
1300 cg = pullop.remote.getbundle('pull', common=pullop.common,
1300 cg = pullop.remote.getbundle('pull', common=pullop.common,
1301 heads=pullop.heads or pullop.rheads)
1301 heads=pullop.heads or pullop.rheads)
1302 elif pullop.heads is None:
1302 elif pullop.heads is None:
1303 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1303 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1304 elif not pullop.remote.capable('changegroupsubset'):
1304 elif not pullop.remote.capable('changegroupsubset'):
1305 raise error.Abort(_("partial pull cannot be done because "
1305 raise error.Abort(_("partial pull cannot be done because "
1306 "other repository doesn't support "
1306 "other repository doesn't support "
1307 "changegroupsubset."))
1307 "changegroupsubset."))
1308 else:
1308 else:
1309 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1309 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1310 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1310 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1311
1311
1312 def _pullphase(pullop):
1312 def _pullphase(pullop):
1313 # Get remote phases data from remote
1313 # Get remote phases data from remote
1314 if 'phases' in pullop.stepsdone:
1314 if 'phases' in pullop.stepsdone:
1315 return
1315 return
1316 remotephases = pullop.remote.listkeys('phases')
1316 remotephases = pullop.remote.listkeys('phases')
1317 _pullapplyphases(pullop, remotephases)
1317 _pullapplyphases(pullop, remotephases)
1318
1318
1319 def _pullapplyphases(pullop, remotephases):
1319 def _pullapplyphases(pullop, remotephases):
1320 """apply phase movement from observed remote state"""
1320 """apply phase movement from observed remote state"""
1321 if 'phases' in pullop.stepsdone:
1321 if 'phases' in pullop.stepsdone:
1322 return
1322 return
1323 pullop.stepsdone.add('phases')
1323 pullop.stepsdone.add('phases')
1324 publishing = bool(remotephases.get('publishing', False))
1324 publishing = bool(remotephases.get('publishing', False))
1325 if remotephases and not publishing:
1325 if remotephases and not publishing:
1326 # remote is new and unpublishing
1326 # remote is new and unpublishing
1327 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1327 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1328 pullop.pulledsubset,
1328 pullop.pulledsubset,
1329 remotephases)
1329 remotephases)
1330 dheads = pullop.pulledsubset
1330 dheads = pullop.pulledsubset
1331 else:
1331 else:
1332 # Remote is old or publishing all common changesets
1332 # Remote is old or publishing all common changesets
1333 # should be seen as public
1333 # should be seen as public
1334 pheads = pullop.pulledsubset
1334 pheads = pullop.pulledsubset
1335 dheads = []
1335 dheads = []
1336 unfi = pullop.repo.unfiltered()
1336 unfi = pullop.repo.unfiltered()
1337 phase = unfi._phasecache.phase
1337 phase = unfi._phasecache.phase
1338 rev = unfi.changelog.nodemap.get
1338 rev = unfi.changelog.nodemap.get
1339 public = phases.public
1339 public = phases.public
1340 draft = phases.draft
1340 draft = phases.draft
1341
1341
1342 # exclude changesets already public locally and update the others
1342 # exclude changesets already public locally and update the others
1343 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1343 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1344 if pheads:
1344 if pheads:
1345 tr = pullop.gettransaction()
1345 tr = pullop.gettransaction()
1346 phases.advanceboundary(pullop.repo, tr, public, pheads)
1346 phases.advanceboundary(pullop.repo, tr, public, pheads)
1347
1347
1348 # exclude changesets already draft locally and update the others
1348 # exclude changesets already draft locally and update the others
1349 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1349 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1350 if dheads:
1350 if dheads:
1351 tr = pullop.gettransaction()
1351 tr = pullop.gettransaction()
1352 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1352 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1353
1353
1354 def _pullbookmarks(pullop):
1354 def _pullbookmarks(pullop):
1355 """process the remote bookmark information to update the local one"""
1355 """process the remote bookmark information to update the local one"""
1356 if 'bookmarks' in pullop.stepsdone:
1356 if 'bookmarks' in pullop.stepsdone:
1357 return
1357 return
1358 pullop.stepsdone.add('bookmarks')
1358 pullop.stepsdone.add('bookmarks')
1359 repo = pullop.repo
1359 repo = pullop.repo
1360 remotebookmarks = pullop.remotebookmarks
1360 remotebookmarks = pullop.remotebookmarks
1361 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1361 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1362 pullop.remote.url(),
1362 pullop.remote.url(),
1363 pullop.gettransaction,
1363 pullop.gettransaction,
1364 explicit=pullop.explicitbookmarks)
1364 explicit=pullop.explicitbookmarks)
1365
1365
1366 def _pullobsolete(pullop):
1366 def _pullobsolete(pullop):
1367 """utility function to pull obsolete markers from a remote
1367 """utility function to pull obsolete markers from a remote
1368
1368
1369 The `gettransaction` is function that return the pull transaction, creating
1369 The `gettransaction` is function that return the pull transaction, creating
1370 one if necessary. We return the transaction to inform the calling code that
1370 one if necessary. We return the transaction to inform the calling code that
1371 a new transaction have been created (when applicable).
1371 a new transaction have been created (when applicable).
1372
1372
1373 Exists mostly to allow overriding for experimentation purpose"""
1373 Exists mostly to allow overriding for experimentation purpose"""
1374 if 'obsmarkers' in pullop.stepsdone:
1374 if 'obsmarkers' in pullop.stepsdone:
1375 return
1375 return
1376 pullop.stepsdone.add('obsmarkers')
1376 pullop.stepsdone.add('obsmarkers')
1377 tr = None
1377 tr = None
1378 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1378 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1379 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1379 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1380 remoteobs = pullop.remote.listkeys('obsolete')
1380 remoteobs = pullop.remote.listkeys('obsolete')
1381 if 'dump0' in remoteobs:
1381 if 'dump0' in remoteobs:
1382 tr = pullop.gettransaction()
1382 tr = pullop.gettransaction()
1383 for key in sorted(remoteobs, reverse=True):
1383 for key in sorted(remoteobs, reverse=True):
1384 if key.startswith('dump'):
1384 if key.startswith('dump'):
1385 data = base85.b85decode(remoteobs[key])
1385 data = base85.b85decode(remoteobs[key])
1386 pullop.repo.obsstore.mergemarkers(tr, data)
1386 pullop.repo.obsstore.mergemarkers(tr, data)
1387 pullop.repo.invalidatevolatilesets()
1387 pullop.repo.invalidatevolatilesets()
1388 return tr
1388 return tr
1389
1389
1390 def caps20to10(repo):
1390 def caps20to10(repo):
1391 """return a set with appropriate options to use bundle20 during getbundle"""
1391 """return a set with appropriate options to use bundle20 during getbundle"""
1392 caps = set(['HG20'])
1392 caps = set(['HG20'])
1393 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1393 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1394 caps.add('bundle2=' + urllib.quote(capsblob))
1394 caps.add('bundle2=' + urllib.quote(capsblob))
1395 return caps
1395 return caps
1396
1396
1397 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1397 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1398 getbundle2partsorder = []
1398 getbundle2partsorder = []
1399
1399
1400 # Mapping between step name and function
1400 # Mapping between step name and function
1401 #
1401 #
1402 # This exists to help extensions wrap steps if necessary
1402 # This exists to help extensions wrap steps if necessary
1403 getbundle2partsmapping = {}
1403 getbundle2partsmapping = {}
1404
1404
1405 def getbundle2partsgenerator(stepname, idx=None):
1405 def getbundle2partsgenerator(stepname, idx=None):
1406 """decorator for function generating bundle2 part for getbundle
1406 """decorator for function generating bundle2 part for getbundle
1407
1407
1408 The function is added to the step -> function mapping and appended to the
1408 The function is added to the step -> function mapping and appended to the
1409 list of steps. Beware that decorated functions will be added in order
1409 list of steps. Beware that decorated functions will be added in order
1410 (this may matter).
1410 (this may matter).
1411
1411
1412 You can only use this decorator for new steps, if you want to wrap a step
1412 You can only use this decorator for new steps, if you want to wrap a step
1413 from an extension, attack the getbundle2partsmapping dictionary directly."""
1413 from an extension, attack the getbundle2partsmapping dictionary directly."""
1414 def dec(func):
1414 def dec(func):
1415 assert stepname not in getbundle2partsmapping
1415 assert stepname not in getbundle2partsmapping
1416 getbundle2partsmapping[stepname] = func
1416 getbundle2partsmapping[stepname] = func
1417 if idx is None:
1417 if idx is None:
1418 getbundle2partsorder.append(stepname)
1418 getbundle2partsorder.append(stepname)
1419 else:
1419 else:
1420 getbundle2partsorder.insert(idx, stepname)
1420 getbundle2partsorder.insert(idx, stepname)
1421 return func
1421 return func
1422 return dec
1422 return dec
1423
1423
1424 def bundle2requested(bundlecaps):
1425 if bundlecaps is not None:
1426 return any(cap.startswith('HG2') for cap in bundlecaps)
1427 return False
1428
1424 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1429 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1425 **kwargs):
1430 **kwargs):
1426 """return a full bundle (with potentially multiple kind of parts)
1431 """return a full bundle (with potentially multiple kind of parts)
1427
1432
1428 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1433 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1429 passed. For now, the bundle can contain only changegroup, but this will
1434 passed. For now, the bundle can contain only changegroup, but this will
1430 changes when more part type will be available for bundle2.
1435 changes when more part type will be available for bundle2.
1431
1436
1432 This is different from changegroup.getchangegroup that only returns an HG10
1437 This is different from changegroup.getchangegroup that only returns an HG10
1433 changegroup bundle. They may eventually get reunited in the future when we
1438 changegroup bundle. They may eventually get reunited in the future when we
1434 have a clearer idea of the API we what to query different data.
1439 have a clearer idea of the API we what to query different data.
1435
1440
1436 The implementation is at a very early stage and will get massive rework
1441 The implementation is at a very early stage and will get massive rework
1437 when the API of bundle is refined.
1442 when the API of bundle is refined.
1438 """
1443 """
1444 usebundle2 = bundle2requested(bundlecaps)
1439 # bundle10 case
1445 # bundle10 case
1440 usebundle2 = False
1441 if bundlecaps is not None:
1442 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1443 if not usebundle2:
1446 if not usebundle2:
1444 if bundlecaps and not kwargs.get('cg', True):
1447 if bundlecaps and not kwargs.get('cg', True):
1445 raise ValueError(_('request for bundle10 must include changegroup'))
1448 raise ValueError(_('request for bundle10 must include changegroup'))
1446
1449
1447 if kwargs:
1450 if kwargs:
1448 raise ValueError(_('unsupported getbundle arguments: %s')
1451 raise ValueError(_('unsupported getbundle arguments: %s')
1449 % ', '.join(sorted(kwargs.keys())))
1452 % ', '.join(sorted(kwargs.keys())))
1450 return changegroup.getchangegroup(repo, source, heads=heads,
1453 return changegroup.getchangegroup(repo, source, heads=heads,
1451 common=common, bundlecaps=bundlecaps)
1454 common=common, bundlecaps=bundlecaps)
1452
1455
1453 # bundle20 case
1456 # bundle20 case
1454 b2caps = {}
1457 b2caps = {}
1455 for bcaps in bundlecaps:
1458 for bcaps in bundlecaps:
1456 if bcaps.startswith('bundle2='):
1459 if bcaps.startswith('bundle2='):
1457 blob = urllib.unquote(bcaps[len('bundle2='):])
1460 blob = urllib.unquote(bcaps[len('bundle2='):])
1458 b2caps.update(bundle2.decodecaps(blob))
1461 b2caps.update(bundle2.decodecaps(blob))
1459 bundler = bundle2.bundle20(repo.ui, b2caps)
1462 bundler = bundle2.bundle20(repo.ui, b2caps)
1460
1463
1461 kwargs['heads'] = heads
1464 kwargs['heads'] = heads
1462 kwargs['common'] = common
1465 kwargs['common'] = common
1463
1466
1464 for name in getbundle2partsorder:
1467 for name in getbundle2partsorder:
1465 func = getbundle2partsmapping[name]
1468 func = getbundle2partsmapping[name]
1466 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1469 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1467 **kwargs)
1470 **kwargs)
1468
1471
1469 return util.chunkbuffer(bundler.getchunks())
1472 return util.chunkbuffer(bundler.getchunks())
1470
1473
1471 @getbundle2partsgenerator('changegroup')
1474 @getbundle2partsgenerator('changegroup')
1472 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1475 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1473 b2caps=None, heads=None, common=None, **kwargs):
1476 b2caps=None, heads=None, common=None, **kwargs):
1474 """add a changegroup part to the requested bundle"""
1477 """add a changegroup part to the requested bundle"""
1475 cg = None
1478 cg = None
1476 if kwargs.get('cg', True):
1479 if kwargs.get('cg', True):
1477 # build changegroup bundle here.
1480 # build changegroup bundle here.
1478 version = None
1481 version = None
1479 cgversions = b2caps.get('changegroup')
1482 cgversions = b2caps.get('changegroup')
1480 getcgkwargs = {}
1483 getcgkwargs = {}
1481 if cgversions: # 3.1 and 3.2 ship with an empty value
1484 if cgversions: # 3.1 and 3.2 ship with an empty value
1482 cgversions = [v for v in cgversions if v in changegroup.packermap]
1485 cgversions = [v for v in cgversions if v in changegroup.packermap]
1483 if not cgversions:
1486 if not cgversions:
1484 raise ValueError(_('no common changegroup version'))
1487 raise ValueError(_('no common changegroup version'))
1485 version = getcgkwargs['version'] = max(cgversions)
1488 version = getcgkwargs['version'] = max(cgversions)
1486 outgoing = changegroup.computeoutgoing(repo, heads, common)
1489 outgoing = changegroup.computeoutgoing(repo, heads, common)
1487 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1490 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1488 bundlecaps=bundlecaps,
1491 bundlecaps=bundlecaps,
1489 **getcgkwargs)
1492 **getcgkwargs)
1490
1493
1491 if cg:
1494 if cg:
1492 part = bundler.newpart('changegroup', data=cg)
1495 part = bundler.newpart('changegroup', data=cg)
1493 if version is not None:
1496 if version is not None:
1494 part.addparam('version', version)
1497 part.addparam('version', version)
1495 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1498 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1496
1499
1497 @getbundle2partsgenerator('listkeys')
1500 @getbundle2partsgenerator('listkeys')
1498 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1501 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1499 b2caps=None, **kwargs):
1502 b2caps=None, **kwargs):
1500 """add parts containing listkeys namespaces to the requested bundle"""
1503 """add parts containing listkeys namespaces to the requested bundle"""
1501 listkeys = kwargs.get('listkeys', ())
1504 listkeys = kwargs.get('listkeys', ())
1502 for namespace in listkeys:
1505 for namespace in listkeys:
1503 part = bundler.newpart('listkeys')
1506 part = bundler.newpart('listkeys')
1504 part.addparam('namespace', namespace)
1507 part.addparam('namespace', namespace)
1505 keys = repo.listkeys(namespace).items()
1508 keys = repo.listkeys(namespace).items()
1506 part.data = pushkey.encodekeys(keys)
1509 part.data = pushkey.encodekeys(keys)
1507
1510
1508 @getbundle2partsgenerator('obsmarkers')
1511 @getbundle2partsgenerator('obsmarkers')
1509 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1512 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1510 b2caps=None, heads=None, **kwargs):
1513 b2caps=None, heads=None, **kwargs):
1511 """add an obsolescence markers part to the requested bundle"""
1514 """add an obsolescence markers part to the requested bundle"""
1512 if kwargs.get('obsmarkers', False):
1515 if kwargs.get('obsmarkers', False):
1513 if heads is None:
1516 if heads is None:
1514 heads = repo.heads()
1517 heads = repo.heads()
1515 subset = [c.node() for c in repo.set('::%ln', heads)]
1518 subset = [c.node() for c in repo.set('::%ln', heads)]
1516 markers = repo.obsstore.relevantmarkers(subset)
1519 markers = repo.obsstore.relevantmarkers(subset)
1517 markers = sorted(markers)
1520 markers = sorted(markers)
1518 buildobsmarkerspart(bundler, markers)
1521 buildobsmarkerspart(bundler, markers)
1519
1522
1520 @getbundle2partsgenerator('hgtagsfnodes')
1523 @getbundle2partsgenerator('hgtagsfnodes')
1521 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1524 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1522 b2caps=None, heads=None, common=None,
1525 b2caps=None, heads=None, common=None,
1523 **kwargs):
1526 **kwargs):
1524 """Transfer the .hgtags filenodes mapping.
1527 """Transfer the .hgtags filenodes mapping.
1525
1528
1526 Only values for heads in this bundle will be transferred.
1529 Only values for heads in this bundle will be transferred.
1527
1530
1528 The part data consists of pairs of 20 byte changeset node and .hgtags
1531 The part data consists of pairs of 20 byte changeset node and .hgtags
1529 filenodes raw values.
1532 filenodes raw values.
1530 """
1533 """
1531 # Don't send unless:
1534 # Don't send unless:
1532 # - changeset are being exchanged,
1535 # - changeset are being exchanged,
1533 # - the client supports it.
1536 # - the client supports it.
1534 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1537 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1535 return
1538 return
1536
1539
1537 outgoing = changegroup.computeoutgoing(repo, heads, common)
1540 outgoing = changegroup.computeoutgoing(repo, heads, common)
1538
1541
1539 if not outgoing.missingheads:
1542 if not outgoing.missingheads:
1540 return
1543 return
1541
1544
1542 cache = tags.hgtagsfnodescache(repo.unfiltered())
1545 cache = tags.hgtagsfnodescache(repo.unfiltered())
1543 chunks = []
1546 chunks = []
1544
1547
1545 # .hgtags fnodes are only relevant for head changesets. While we could
1548 # .hgtags fnodes are only relevant for head changesets. While we could
1546 # transfer values for all known nodes, there will likely be little to
1549 # transfer values for all known nodes, there will likely be little to
1547 # no benefit.
1550 # no benefit.
1548 #
1551 #
1549 # We don't bother using a generator to produce output data because
1552 # We don't bother using a generator to produce output data because
1550 # a) we only have 40 bytes per head and even esoteric numbers of heads
1553 # a) we only have 40 bytes per head and even esoteric numbers of heads
1551 # consume little memory (1M heads is 40MB) b) we don't want to send the
1554 # consume little memory (1M heads is 40MB) b) we don't want to send the
1552 # part if we don't have entries and knowing if we have entries requires
1555 # part if we don't have entries and knowing if we have entries requires
1553 # cache lookups.
1556 # cache lookups.
1554 for node in outgoing.missingheads:
1557 for node in outgoing.missingheads:
1555 # Don't compute missing, as this may slow down serving.
1558 # Don't compute missing, as this may slow down serving.
1556 fnode = cache.getfnode(node, computemissing=False)
1559 fnode = cache.getfnode(node, computemissing=False)
1557 if fnode is not None:
1560 if fnode is not None:
1558 chunks.extend([node, fnode])
1561 chunks.extend([node, fnode])
1559
1562
1560 if chunks:
1563 if chunks:
1561 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1564 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1562
1565
1563 def check_heads(repo, their_heads, context):
1566 def check_heads(repo, their_heads, context):
1564 """check if the heads of a repo have been modified
1567 """check if the heads of a repo have been modified
1565
1568
1566 Used by peer for unbundling.
1569 Used by peer for unbundling.
1567 """
1570 """
1568 heads = repo.heads()
1571 heads = repo.heads()
1569 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1572 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1570 if not (their_heads == ['force'] or their_heads == heads or
1573 if not (their_heads == ['force'] or their_heads == heads or
1571 their_heads == ['hashed', heads_hash]):
1574 their_heads == ['hashed', heads_hash]):
1572 # someone else committed/pushed/unbundled while we
1575 # someone else committed/pushed/unbundled while we
1573 # were transferring data
1576 # were transferring data
1574 raise error.PushRaced('repository changed while %s - '
1577 raise error.PushRaced('repository changed while %s - '
1575 'please try again' % context)
1578 'please try again' % context)
1576
1579
1577 def unbundle(repo, cg, heads, source, url):
1580 def unbundle(repo, cg, heads, source, url):
1578 """Apply a bundle to a repo.
1581 """Apply a bundle to a repo.
1579
1582
1580 this function makes sure the repo is locked during the application and have
1583 this function makes sure the repo is locked during the application and have
1581 mechanism to check that no push race occurred between the creation of the
1584 mechanism to check that no push race occurred between the creation of the
1582 bundle and its application.
1585 bundle and its application.
1583
1586
1584 If the push was raced as PushRaced exception is raised."""
1587 If the push was raced as PushRaced exception is raised."""
1585 r = 0
1588 r = 0
1586 # need a transaction when processing a bundle2 stream
1589 # need a transaction when processing a bundle2 stream
1587 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1590 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1588 lockandtr = [None, None, None]
1591 lockandtr = [None, None, None]
1589 recordout = None
1592 recordout = None
1590 # quick fix for output mismatch with bundle2 in 3.4
1593 # quick fix for output mismatch with bundle2 in 3.4
1591 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1594 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1592 False)
1595 False)
1593 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1596 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1594 captureoutput = True
1597 captureoutput = True
1595 try:
1598 try:
1596 check_heads(repo, heads, 'uploading changes')
1599 check_heads(repo, heads, 'uploading changes')
1597 # push can proceed
1600 # push can proceed
1598 if util.safehasattr(cg, 'params'):
1601 if util.safehasattr(cg, 'params'):
1599 r = None
1602 r = None
1600 try:
1603 try:
1601 def gettransaction():
1604 def gettransaction():
1602 if not lockandtr[2]:
1605 if not lockandtr[2]:
1603 lockandtr[0] = repo.wlock()
1606 lockandtr[0] = repo.wlock()
1604 lockandtr[1] = repo.lock()
1607 lockandtr[1] = repo.lock()
1605 lockandtr[2] = repo.transaction(source)
1608 lockandtr[2] = repo.transaction(source)
1606 lockandtr[2].hookargs['source'] = source
1609 lockandtr[2].hookargs['source'] = source
1607 lockandtr[2].hookargs['url'] = url
1610 lockandtr[2].hookargs['url'] = url
1608 lockandtr[2].hookargs['bundle2'] = '1'
1611 lockandtr[2].hookargs['bundle2'] = '1'
1609 return lockandtr[2]
1612 return lockandtr[2]
1610
1613
1611 # Do greedy locking by default until we're satisfied with lazy
1614 # Do greedy locking by default until we're satisfied with lazy
1612 # locking.
1615 # locking.
1613 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1616 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1614 gettransaction()
1617 gettransaction()
1615
1618
1616 op = bundle2.bundleoperation(repo, gettransaction,
1619 op = bundle2.bundleoperation(repo, gettransaction,
1617 captureoutput=captureoutput)
1620 captureoutput=captureoutput)
1618 try:
1621 try:
1619 op = bundle2.processbundle(repo, cg, op=op)
1622 op = bundle2.processbundle(repo, cg, op=op)
1620 finally:
1623 finally:
1621 r = op.reply
1624 r = op.reply
1622 if captureoutput and r is not None:
1625 if captureoutput and r is not None:
1623 repo.ui.pushbuffer(error=True, subproc=True)
1626 repo.ui.pushbuffer(error=True, subproc=True)
1624 def recordout(output):
1627 def recordout(output):
1625 r.newpart('output', data=output, mandatory=False)
1628 r.newpart('output', data=output, mandatory=False)
1626 if lockandtr[2] is not None:
1629 if lockandtr[2] is not None:
1627 lockandtr[2].close()
1630 lockandtr[2].close()
1628 except BaseException as exc:
1631 except BaseException as exc:
1629 exc.duringunbundle2 = True
1632 exc.duringunbundle2 = True
1630 if captureoutput and r is not None:
1633 if captureoutput and r is not None:
1631 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1634 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1632 def recordout(output):
1635 def recordout(output):
1633 part = bundle2.bundlepart('output', data=output,
1636 part = bundle2.bundlepart('output', data=output,
1634 mandatory=False)
1637 mandatory=False)
1635 parts.append(part)
1638 parts.append(part)
1636 raise
1639 raise
1637 else:
1640 else:
1638 lockandtr[1] = repo.lock()
1641 lockandtr[1] = repo.lock()
1639 r = cg.apply(repo, source, url)
1642 r = cg.apply(repo, source, url)
1640 finally:
1643 finally:
1641 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1644 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1642 if recordout is not None:
1645 if recordout is not None:
1643 recordout(repo.ui.popbuffer())
1646 recordout(repo.ui.popbuffer())
1644 return r
1647 return r
1645
1648
1646 def _maybeapplyclonebundle(pullop):
1649 def _maybeapplyclonebundle(pullop):
1647 """Apply a clone bundle from a remote, if possible."""
1650 """Apply a clone bundle from a remote, if possible."""
1648
1651
1649 repo = pullop.repo
1652 repo = pullop.repo
1650 remote = pullop.remote
1653 remote = pullop.remote
1651
1654
1652 if not repo.ui.configbool('experimental', 'clonebundles', False):
1655 if not repo.ui.configbool('experimental', 'clonebundles', False):
1653 return
1656 return
1654
1657
1655 # Only run if local repo is empty.
1658 # Only run if local repo is empty.
1656 if len(repo):
1659 if len(repo):
1657 return
1660 return
1658
1661
1659 if pullop.heads:
1662 if pullop.heads:
1660 return
1663 return
1661
1664
1662 if not remote.capable('clonebundles'):
1665 if not remote.capable('clonebundles'):
1663 return
1666 return
1664
1667
1665 res = remote._call('clonebundles')
1668 res = remote._call('clonebundles')
1666
1669
1667 # If we call the wire protocol command, that's good enough to record the
1670 # If we call the wire protocol command, that's good enough to record the
1668 # attempt.
1671 # attempt.
1669 pullop.clonebundleattempted = True
1672 pullop.clonebundleattempted = True
1670
1673
1671 entries = parseclonebundlesmanifest(repo, res)
1674 entries = parseclonebundlesmanifest(repo, res)
1672 if not entries:
1675 if not entries:
1673 repo.ui.note(_('no clone bundles available on remote; '
1676 repo.ui.note(_('no clone bundles available on remote; '
1674 'falling back to regular clone\n'))
1677 'falling back to regular clone\n'))
1675 return
1678 return
1676
1679
1677 entries = filterclonebundleentries(repo, entries)
1680 entries = filterclonebundleentries(repo, entries)
1678 if not entries:
1681 if not entries:
1679 # There is a thundering herd concern here. However, if a server
1682 # There is a thundering herd concern here. However, if a server
1680 # operator doesn't advertise bundles appropriate for its clients,
1683 # operator doesn't advertise bundles appropriate for its clients,
1681 # they deserve what's coming. Furthermore, from a client's
1684 # they deserve what's coming. Furthermore, from a client's
1682 # perspective, no automatic fallback would mean not being able to
1685 # perspective, no automatic fallback would mean not being able to
1683 # clone!
1686 # clone!
1684 repo.ui.warn(_('no compatible clone bundles available on server; '
1687 repo.ui.warn(_('no compatible clone bundles available on server; '
1685 'falling back to regular clone\n'))
1688 'falling back to regular clone\n'))
1686 repo.ui.warn(_('(you may want to report this to the server '
1689 repo.ui.warn(_('(you may want to report this to the server '
1687 'operator)\n'))
1690 'operator)\n'))
1688 return
1691 return
1689
1692
1690 entries = sortclonebundleentries(repo.ui, entries)
1693 entries = sortclonebundleentries(repo.ui, entries)
1691
1694
1692 url = entries[0]['URL']
1695 url = entries[0]['URL']
1693 repo.ui.status(_('applying clone bundle from %s\n') % url)
1696 repo.ui.status(_('applying clone bundle from %s\n') % url)
1694 if trypullbundlefromurl(repo.ui, repo, url):
1697 if trypullbundlefromurl(repo.ui, repo, url):
1695 repo.ui.status(_('finished applying clone bundle\n'))
1698 repo.ui.status(_('finished applying clone bundle\n'))
1696 # Bundle failed.
1699 # Bundle failed.
1697 #
1700 #
1698 # We abort by default to avoid the thundering herd of
1701 # We abort by default to avoid the thundering herd of
1699 # clients flooding a server that was expecting expensive
1702 # clients flooding a server that was expecting expensive
1700 # clone load to be offloaded.
1703 # clone load to be offloaded.
1701 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1704 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1702 repo.ui.warn(_('falling back to normal clone\n'))
1705 repo.ui.warn(_('falling back to normal clone\n'))
1703 else:
1706 else:
1704 raise error.Abort(_('error applying bundle'),
1707 raise error.Abort(_('error applying bundle'),
1705 hint=_('if this error persists, consider contacting '
1708 hint=_('if this error persists, consider contacting '
1706 'the server operator or disable clone '
1709 'the server operator or disable clone '
1707 'bundles via '
1710 'bundles via '
1708 '"--config experimental.clonebundles=false"'))
1711 '"--config experimental.clonebundles=false"'))
1709
1712
1710 def parseclonebundlesmanifest(repo, s):
1713 def parseclonebundlesmanifest(repo, s):
1711 """Parses the raw text of a clone bundles manifest.
1714 """Parses the raw text of a clone bundles manifest.
1712
1715
1713 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1716 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1714 to the URL and other keys are the attributes for the entry.
1717 to the URL and other keys are the attributes for the entry.
1715 """
1718 """
1716 m = []
1719 m = []
1717 for line in s.splitlines():
1720 for line in s.splitlines():
1718 fields = line.split()
1721 fields = line.split()
1719 if not fields:
1722 if not fields:
1720 continue
1723 continue
1721 attrs = {'URL': fields[0]}
1724 attrs = {'URL': fields[0]}
1722 for rawattr in fields[1:]:
1725 for rawattr in fields[1:]:
1723 key, value = rawattr.split('=', 1)
1726 key, value = rawattr.split('=', 1)
1724 key = urllib.unquote(key)
1727 key = urllib.unquote(key)
1725 value = urllib.unquote(value)
1728 value = urllib.unquote(value)
1726 attrs[key] = value
1729 attrs[key] = value
1727
1730
1728 # Parse BUNDLESPEC into components. This makes client-side
1731 # Parse BUNDLESPEC into components. This makes client-side
1729 # preferences easier to specify since you can prefer a single
1732 # preferences easier to specify since you can prefer a single
1730 # component of the BUNDLESPEC.
1733 # component of the BUNDLESPEC.
1731 if key == 'BUNDLESPEC':
1734 if key == 'BUNDLESPEC':
1732 try:
1735 try:
1733 comp, version, params = parsebundlespec(repo, value,
1736 comp, version, params = parsebundlespec(repo, value,
1734 externalnames=True)
1737 externalnames=True)
1735 attrs['COMPRESSION'] = comp
1738 attrs['COMPRESSION'] = comp
1736 attrs['VERSION'] = version
1739 attrs['VERSION'] = version
1737 except error.InvalidBundleSpecification:
1740 except error.InvalidBundleSpecification:
1738 pass
1741 pass
1739 except error.UnsupportedBundleSpecification:
1742 except error.UnsupportedBundleSpecification:
1740 pass
1743 pass
1741
1744
1742 m.append(attrs)
1745 m.append(attrs)
1743
1746
1744 return m
1747 return m
1745
1748
1746 def filterclonebundleentries(repo, entries):
1749 def filterclonebundleentries(repo, entries):
1747 """Remove incompatible clone bundle manifest entries.
1750 """Remove incompatible clone bundle manifest entries.
1748
1751
1749 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1752 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1750 and returns a new list consisting of only the entries that this client
1753 and returns a new list consisting of only the entries that this client
1751 should be able to apply.
1754 should be able to apply.
1752
1755
1753 There is no guarantee we'll be able to apply all returned entries because
1756 There is no guarantee we'll be able to apply all returned entries because
1754 the metadata we use to filter on may be missing or wrong.
1757 the metadata we use to filter on may be missing or wrong.
1755 """
1758 """
1756 newentries = []
1759 newentries = []
1757 for entry in entries:
1760 for entry in entries:
1758 spec = entry.get('BUNDLESPEC')
1761 spec = entry.get('BUNDLESPEC')
1759 if spec:
1762 if spec:
1760 try:
1763 try:
1761 parsebundlespec(repo, spec, strict=True)
1764 parsebundlespec(repo, spec, strict=True)
1762 except error.InvalidBundleSpecification as e:
1765 except error.InvalidBundleSpecification as e:
1763 repo.ui.debug(str(e) + '\n')
1766 repo.ui.debug(str(e) + '\n')
1764 continue
1767 continue
1765 except error.UnsupportedBundleSpecification as e:
1768 except error.UnsupportedBundleSpecification as e:
1766 repo.ui.debug('filtering %s because unsupported bundle '
1769 repo.ui.debug('filtering %s because unsupported bundle '
1767 'spec: %s\n' % (entry['URL'], str(e)))
1770 'spec: %s\n' % (entry['URL'], str(e)))
1768 continue
1771 continue
1769
1772
1770 if 'REQUIRESNI' in entry and not sslutil.hassni:
1773 if 'REQUIRESNI' in entry and not sslutil.hassni:
1771 repo.ui.debug('filtering %s because SNI not supported\n' %
1774 repo.ui.debug('filtering %s because SNI not supported\n' %
1772 entry['URL'])
1775 entry['URL'])
1773 continue
1776 continue
1774
1777
1775 newentries.append(entry)
1778 newentries.append(entry)
1776
1779
1777 return newentries
1780 return newentries
1778
1781
1779 def sortclonebundleentries(ui, entries):
1782 def sortclonebundleentries(ui, entries):
1780 # experimental config: experimental.clonebundleprefers
1783 # experimental config: experimental.clonebundleprefers
1781 prefers = ui.configlist('experimental', 'clonebundleprefers', default=[])
1784 prefers = ui.configlist('experimental', 'clonebundleprefers', default=[])
1782 if not prefers:
1785 if not prefers:
1783 return list(entries)
1786 return list(entries)
1784
1787
1785 prefers = [p.split('=', 1) for p in prefers]
1788 prefers = [p.split('=', 1) for p in prefers]
1786
1789
1787 # Our sort function.
1790 # Our sort function.
1788 def compareentry(a, b):
1791 def compareentry(a, b):
1789 for prefkey, prefvalue in prefers:
1792 for prefkey, prefvalue in prefers:
1790 avalue = a.get(prefkey)
1793 avalue = a.get(prefkey)
1791 bvalue = b.get(prefkey)
1794 bvalue = b.get(prefkey)
1792
1795
1793 # Special case for b missing attribute and a matches exactly.
1796 # Special case for b missing attribute and a matches exactly.
1794 if avalue is not None and bvalue is None and avalue == prefvalue:
1797 if avalue is not None and bvalue is None and avalue == prefvalue:
1795 return -1
1798 return -1
1796
1799
1797 # Special case for a missing attribute and b matches exactly.
1800 # Special case for a missing attribute and b matches exactly.
1798 if bvalue is not None and avalue is None and bvalue == prefvalue:
1801 if bvalue is not None and avalue is None and bvalue == prefvalue:
1799 return 1
1802 return 1
1800
1803
1801 # We can't compare unless attribute present on both.
1804 # We can't compare unless attribute present on both.
1802 if avalue is None or bvalue is None:
1805 if avalue is None or bvalue is None:
1803 continue
1806 continue
1804
1807
1805 # Same values should fall back to next attribute.
1808 # Same values should fall back to next attribute.
1806 if avalue == bvalue:
1809 if avalue == bvalue:
1807 continue
1810 continue
1808
1811
1809 # Exact matches come first.
1812 # Exact matches come first.
1810 if avalue == prefvalue:
1813 if avalue == prefvalue:
1811 return -1
1814 return -1
1812 if bvalue == prefvalue:
1815 if bvalue == prefvalue:
1813 return 1
1816 return 1
1814
1817
1815 # Fall back to next attribute.
1818 # Fall back to next attribute.
1816 continue
1819 continue
1817
1820
1818 # If we got here we couldn't sort by attributes and prefers. Fall
1821 # If we got here we couldn't sort by attributes and prefers. Fall
1819 # back to index order.
1822 # back to index order.
1820 return 0
1823 return 0
1821
1824
1822 return sorted(entries, cmp=compareentry)
1825 return sorted(entries, cmp=compareentry)
1823
1826
1824 def trypullbundlefromurl(ui, repo, url):
1827 def trypullbundlefromurl(ui, repo, url):
1825 """Attempt to apply a bundle from a URL."""
1828 """Attempt to apply a bundle from a URL."""
1826 lock = repo.lock()
1829 lock = repo.lock()
1827 try:
1830 try:
1828 tr = repo.transaction('bundleurl')
1831 tr = repo.transaction('bundleurl')
1829 try:
1832 try:
1830 try:
1833 try:
1831 fh = urlmod.open(ui, url)
1834 fh = urlmod.open(ui, url)
1832 cg = readbundle(ui, fh, 'stream')
1835 cg = readbundle(ui, fh, 'stream')
1833
1836
1834 if isinstance(cg, bundle2.unbundle20):
1837 if isinstance(cg, bundle2.unbundle20):
1835 bundle2.processbundle(repo, cg, lambda: tr)
1838 bundle2.processbundle(repo, cg, lambda: tr)
1836 elif isinstance(cg, streamclone.streamcloneapplier):
1839 elif isinstance(cg, streamclone.streamcloneapplier):
1837 cg.apply(repo)
1840 cg.apply(repo)
1838 else:
1841 else:
1839 cg.apply(repo, 'clonebundles', url)
1842 cg.apply(repo, 'clonebundles', url)
1840 tr.close()
1843 tr.close()
1841 return True
1844 return True
1842 except urllib2.HTTPError as e:
1845 except urllib2.HTTPError as e:
1843 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1846 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1844 except urllib2.URLError as e:
1847 except urllib2.URLError as e:
1845 ui.warn(_('error fetching bundle: %s\n') % e.reason[1])
1848 ui.warn(_('error fetching bundle: %s\n') % e.reason[1])
1846
1849
1847 return False
1850 return False
1848 finally:
1851 finally:
1849 tr.release()
1852 tr.release()
1850 finally:
1853 finally:
1851 lock.release()
1854 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now