##// END OF EJS Templates
exchange: use pushop.repo instead of repo
Sean Farley -
r26672:90df14eb default
parent child Browse files
Show More
@@ -1,1773 +1,1773 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno, urllib, urllib2
10 import errno, urllib, urllib2
11 import util, scmutil, changegroup, base85, error
11 import util, scmutil, changegroup, base85, error
12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 import lock as lockmod
13 import lock as lockmod
14 import streamclone
14 import streamclone
15 import sslutil
15 import sslutil
16 import tags
16 import tags
17 import url as urlmod
17 import url as urlmod
18
18
19 # Maps bundle compression human names to internal representation.
19 # Maps bundle compression human names to internal representation.
20 _bundlespeccompressions = {'none': None,
20 _bundlespeccompressions = {'none': None,
21 'bzip2': 'BZ',
21 'bzip2': 'BZ',
22 'gzip': 'GZ',
22 'gzip': 'GZ',
23 }
23 }
24
24
25 # Maps bundle version human names to changegroup versions.
25 # Maps bundle version human names to changegroup versions.
26 _bundlespeccgversions = {'v1': '01',
26 _bundlespeccgversions = {'v1': '01',
27 'v2': '02',
27 'v2': '02',
28 'bundle2': '02', #legacy
28 'bundle2': '02', #legacy
29 }
29 }
30
30
31 def parsebundlespec(repo, spec, strict=True, externalnames=False):
31 def parsebundlespec(repo, spec, strict=True, externalnames=False):
32 """Parse a bundle string specification into parts.
32 """Parse a bundle string specification into parts.
33
33
34 Bundle specifications denote a well-defined bundle/exchange format.
34 Bundle specifications denote a well-defined bundle/exchange format.
35 The content of a given specification should not change over time in
35 The content of a given specification should not change over time in
36 order to ensure that bundles produced by a newer version of Mercurial are
36 order to ensure that bundles produced by a newer version of Mercurial are
37 readable from an older version.
37 readable from an older version.
38
38
39 The string currently has the form:
39 The string currently has the form:
40
40
41 <compression>-<type>
41 <compression>-<type>
42
42
43 Where <compression> is one of the supported compression formats
43 Where <compression> is one of the supported compression formats
44 and <type> is (currently) a version string.
44 and <type> is (currently) a version string.
45
45
46 If ``strict`` is True (the default) <compression> is required. Otherwise,
46 If ``strict`` is True (the default) <compression> is required. Otherwise,
47 it is optional.
47 it is optional.
48
48
49 If ``externalnames`` is False (the default), the human-centric names will
49 If ``externalnames`` is False (the default), the human-centric names will
50 be converted to their internal representation.
50 be converted to their internal representation.
51
51
52 Returns a 2-tuple of (compression, version). Compression will be ``None``
52 Returns a 2-tuple of (compression, version). Compression will be ``None``
53 if not in strict mode and a compression isn't defined.
53 if not in strict mode and a compression isn't defined.
54
54
55 An ``InvalidBundleSpecification`` is raised when the specification is
55 An ``InvalidBundleSpecification`` is raised when the specification is
56 not syntactically well formed.
56 not syntactically well formed.
57
57
58 An ``UnsupportedBundleSpecification`` is raised when the compression or
58 An ``UnsupportedBundleSpecification`` is raised when the compression or
59 bundle type/version is not recognized.
59 bundle type/version is not recognized.
60
60
61 Note: this function will likely eventually return a more complex data
61 Note: this function will likely eventually return a more complex data
62 structure, including bundle2 part information.
62 structure, including bundle2 part information.
63 """
63 """
64 if strict and '-' not in spec:
64 if strict and '-' not in spec:
65 raise error.InvalidBundleSpecification(
65 raise error.InvalidBundleSpecification(
66 _('invalid bundle specification; '
66 _('invalid bundle specification; '
67 'must be prefixed with compression: %s') % spec)
67 'must be prefixed with compression: %s') % spec)
68
68
69 if '-' in spec:
69 if '-' in spec:
70 compression, version = spec.split('-', 1)
70 compression, version = spec.split('-', 1)
71
71
72 if compression not in _bundlespeccompressions:
72 if compression not in _bundlespeccompressions:
73 raise error.UnsupportedBundleSpecification(
73 raise error.UnsupportedBundleSpecification(
74 _('%s compression is not supported') % compression)
74 _('%s compression is not supported') % compression)
75
75
76 if version not in _bundlespeccgversions:
76 if version not in _bundlespeccgversions:
77 raise error.UnsupportedBundleSpecification(
77 raise error.UnsupportedBundleSpecification(
78 _('%s is not a recognized bundle version') % version)
78 _('%s is not a recognized bundle version') % version)
79 else:
79 else:
80 # Value could be just the compression or just the version, in which
80 # Value could be just the compression or just the version, in which
81 # case some defaults are assumed (but only when not in strict mode).
81 # case some defaults are assumed (but only when not in strict mode).
82 assert not strict
82 assert not strict
83
83
84 if spec in _bundlespeccompressions:
84 if spec in _bundlespeccompressions:
85 compression = spec
85 compression = spec
86 version = 'v1'
86 version = 'v1'
87 if 'generaldelta' in repo.requirements:
87 if 'generaldelta' in repo.requirements:
88 version = 'v2'
88 version = 'v2'
89 elif spec in _bundlespeccgversions:
89 elif spec in _bundlespeccgversions:
90 compression = 'bzip2'
90 compression = 'bzip2'
91 version = spec
91 version = spec
92 else:
92 else:
93 raise error.UnsupportedBundleSpecification(
93 raise error.UnsupportedBundleSpecification(
94 _('%s is not a recognized bundle specification') % spec)
94 _('%s is not a recognized bundle specification') % spec)
95
95
96 if not externalnames:
96 if not externalnames:
97 compression = _bundlespeccompressions[compression]
97 compression = _bundlespeccompressions[compression]
98 version = _bundlespeccgversions[version]
98 version = _bundlespeccgversions[version]
99 return compression, version
99 return compression, version
100
100
101 def readbundle(ui, fh, fname, vfs=None):
101 def readbundle(ui, fh, fname, vfs=None):
102 header = changegroup.readexactly(fh, 4)
102 header = changegroup.readexactly(fh, 4)
103
103
104 alg = None
104 alg = None
105 if not fname:
105 if not fname:
106 fname = "stream"
106 fname = "stream"
107 if not header.startswith('HG') and header.startswith('\0'):
107 if not header.startswith('HG') and header.startswith('\0'):
108 fh = changegroup.headerlessfixup(fh, header)
108 fh = changegroup.headerlessfixup(fh, header)
109 header = "HG10"
109 header = "HG10"
110 alg = 'UN'
110 alg = 'UN'
111 elif vfs:
111 elif vfs:
112 fname = vfs.join(fname)
112 fname = vfs.join(fname)
113
113
114 magic, version = header[0:2], header[2:4]
114 magic, version = header[0:2], header[2:4]
115
115
116 if magic != 'HG':
116 if magic != 'HG':
117 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
117 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
118 if version == '10':
118 if version == '10':
119 if alg is None:
119 if alg is None:
120 alg = changegroup.readexactly(fh, 2)
120 alg = changegroup.readexactly(fh, 2)
121 return changegroup.cg1unpacker(fh, alg)
121 return changegroup.cg1unpacker(fh, alg)
122 elif version.startswith('2'):
122 elif version.startswith('2'):
123 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
123 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
124 else:
124 else:
125 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
125 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
126
126
127 def buildobsmarkerspart(bundler, markers):
127 def buildobsmarkerspart(bundler, markers):
128 """add an obsmarker part to the bundler with <markers>
128 """add an obsmarker part to the bundler with <markers>
129
129
130 No part is created if markers is empty.
130 No part is created if markers is empty.
131 Raises ValueError if the bundler doesn't support any known obsmarker format.
131 Raises ValueError if the bundler doesn't support any known obsmarker format.
132 """
132 """
133 if markers:
133 if markers:
134 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
134 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
135 version = obsolete.commonversion(remoteversions)
135 version = obsolete.commonversion(remoteversions)
136 if version is None:
136 if version is None:
137 raise ValueError('bundler do not support common obsmarker format')
137 raise ValueError('bundler do not support common obsmarker format')
138 stream = obsolete.encodemarkers(markers, True, version=version)
138 stream = obsolete.encodemarkers(markers, True, version=version)
139 return bundler.newpart('obsmarkers', data=stream)
139 return bundler.newpart('obsmarkers', data=stream)
140 return None
140 return None
141
141
142 def _canusebundle2(op):
142 def _canusebundle2(op):
143 """return true if a pull/push can use bundle2
143 """return true if a pull/push can use bundle2
144
144
145 Feel free to nuke this function when we drop the experimental option"""
145 Feel free to nuke this function when we drop the experimental option"""
146 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
146 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
147 and op.remote.capable('bundle2'))
147 and op.remote.capable('bundle2'))
148
148
149
149
150 class pushoperation(object):
150 class pushoperation(object):
151 """A object that represent a single push operation
151 """A object that represent a single push operation
152
152
153 It purpose is to carry push related state and very common operation.
153 It purpose is to carry push related state and very common operation.
154
154
155 A new should be created at the beginning of each push and discarded
155 A new should be created at the beginning of each push and discarded
156 afterward.
156 afterward.
157 """
157 """
158
158
159 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
159 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
160 bookmarks=()):
160 bookmarks=()):
161 # repo we push from
161 # repo we push from
162 self.repo = repo
162 self.repo = repo
163 self.ui = repo.ui
163 self.ui = repo.ui
164 # repo we push to
164 # repo we push to
165 self.remote = remote
165 self.remote = remote
166 # force option provided
166 # force option provided
167 self.force = force
167 self.force = force
168 # revs to be pushed (None is "all")
168 # revs to be pushed (None is "all")
169 self.revs = revs
169 self.revs = revs
170 # bookmark explicitly pushed
170 # bookmark explicitly pushed
171 self.bookmarks = bookmarks
171 self.bookmarks = bookmarks
172 # allow push of new branch
172 # allow push of new branch
173 self.newbranch = newbranch
173 self.newbranch = newbranch
174 # did a local lock get acquired?
174 # did a local lock get acquired?
175 self.locallocked = None
175 self.locallocked = None
176 # step already performed
176 # step already performed
177 # (used to check what steps have been already performed through bundle2)
177 # (used to check what steps have been already performed through bundle2)
178 self.stepsdone = set()
178 self.stepsdone = set()
179 # Integer version of the changegroup push result
179 # Integer version of the changegroup push result
180 # - None means nothing to push
180 # - None means nothing to push
181 # - 0 means HTTP error
181 # - 0 means HTTP error
182 # - 1 means we pushed and remote head count is unchanged *or*
182 # - 1 means we pushed and remote head count is unchanged *or*
183 # we have outgoing changesets but refused to push
183 # we have outgoing changesets but refused to push
184 # - other values as described by addchangegroup()
184 # - other values as described by addchangegroup()
185 self.cgresult = None
185 self.cgresult = None
186 # Boolean value for the bookmark push
186 # Boolean value for the bookmark push
187 self.bkresult = None
187 self.bkresult = None
188 # discover.outgoing object (contains common and outgoing data)
188 # discover.outgoing object (contains common and outgoing data)
189 self.outgoing = None
189 self.outgoing = None
190 # all remote heads before the push
190 # all remote heads before the push
191 self.remoteheads = None
191 self.remoteheads = None
192 # testable as a boolean indicating if any nodes are missing locally.
192 # testable as a boolean indicating if any nodes are missing locally.
193 self.incoming = None
193 self.incoming = None
194 # phases changes that must be pushed along side the changesets
194 # phases changes that must be pushed along side the changesets
195 self.outdatedphases = None
195 self.outdatedphases = None
196 # phases changes that must be pushed if changeset push fails
196 # phases changes that must be pushed if changeset push fails
197 self.fallbackoutdatedphases = None
197 self.fallbackoutdatedphases = None
198 # outgoing obsmarkers
198 # outgoing obsmarkers
199 self.outobsmarkers = set()
199 self.outobsmarkers = set()
200 # outgoing bookmarks
200 # outgoing bookmarks
201 self.outbookmarks = []
201 self.outbookmarks = []
202 # transaction manager
202 # transaction manager
203 self.trmanager = None
203 self.trmanager = None
204 # map { pushkey partid -> callback handling failure}
204 # map { pushkey partid -> callback handling failure}
205 # used to handle exception from mandatory pushkey part failure
205 # used to handle exception from mandatory pushkey part failure
206 self.pkfailcb = {}
206 self.pkfailcb = {}
207
207
208 @util.propertycache
208 @util.propertycache
209 def futureheads(self):
209 def futureheads(self):
210 """future remote heads if the changeset push succeeds"""
210 """future remote heads if the changeset push succeeds"""
211 return self.outgoing.missingheads
211 return self.outgoing.missingheads
212
212
213 @util.propertycache
213 @util.propertycache
214 def fallbackheads(self):
214 def fallbackheads(self):
215 """future remote heads if the changeset push fails"""
215 """future remote heads if the changeset push fails"""
216 if self.revs is None:
216 if self.revs is None:
217 # not target to push, all common are relevant
217 # not target to push, all common are relevant
218 return self.outgoing.commonheads
218 return self.outgoing.commonheads
219 unfi = self.repo.unfiltered()
219 unfi = self.repo.unfiltered()
220 # I want cheads = heads(::missingheads and ::commonheads)
220 # I want cheads = heads(::missingheads and ::commonheads)
221 # (missingheads is revs with secret changeset filtered out)
221 # (missingheads is revs with secret changeset filtered out)
222 #
222 #
223 # This can be expressed as:
223 # This can be expressed as:
224 # cheads = ( (missingheads and ::commonheads)
224 # cheads = ( (missingheads and ::commonheads)
225 # + (commonheads and ::missingheads))"
225 # + (commonheads and ::missingheads))"
226 # )
226 # )
227 #
227 #
228 # while trying to push we already computed the following:
228 # while trying to push we already computed the following:
229 # common = (::commonheads)
229 # common = (::commonheads)
230 # missing = ((commonheads::missingheads) - commonheads)
230 # missing = ((commonheads::missingheads) - commonheads)
231 #
231 #
232 # We can pick:
232 # We can pick:
233 # * missingheads part of common (::commonheads)
233 # * missingheads part of common (::commonheads)
234 common = self.outgoing.common
234 common = self.outgoing.common
235 nm = self.repo.changelog.nodemap
235 nm = self.repo.changelog.nodemap
236 cheads = [node for node in self.revs if nm[node] in common]
236 cheads = [node for node in self.revs if nm[node] in common]
237 # and
237 # and
238 # * commonheads parents on missing
238 # * commonheads parents on missing
239 revset = unfi.set('%ln and parents(roots(%ln))',
239 revset = unfi.set('%ln and parents(roots(%ln))',
240 self.outgoing.commonheads,
240 self.outgoing.commonheads,
241 self.outgoing.missing)
241 self.outgoing.missing)
242 cheads.extend(c.node() for c in revset)
242 cheads.extend(c.node() for c in revset)
243 return cheads
243 return cheads
244
244
245 @property
245 @property
246 def commonheads(self):
246 def commonheads(self):
247 """set of all common heads after changeset bundle push"""
247 """set of all common heads after changeset bundle push"""
248 if self.cgresult:
248 if self.cgresult:
249 return self.futureheads
249 return self.futureheads
250 else:
250 else:
251 return self.fallbackheads
251 return self.fallbackheads
252
252
253 # mapping of message used when pushing bookmark
253 # mapping of message used when pushing bookmark
254 bookmsgmap = {'update': (_("updating bookmark %s\n"),
254 bookmsgmap = {'update': (_("updating bookmark %s\n"),
255 _('updating bookmark %s failed!\n')),
255 _('updating bookmark %s failed!\n')),
256 'export': (_("exporting bookmark %s\n"),
256 'export': (_("exporting bookmark %s\n"),
257 _('exporting bookmark %s failed!\n')),
257 _('exporting bookmark %s failed!\n')),
258 'delete': (_("deleting remote bookmark %s\n"),
258 'delete': (_("deleting remote bookmark %s\n"),
259 _('deleting remote bookmark %s failed!\n')),
259 _('deleting remote bookmark %s failed!\n')),
260 }
260 }
261
261
262
262
263 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
263 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
264 '''Push outgoing changesets (limited by revs) from a local
264 '''Push outgoing changesets (limited by revs) from a local
265 repository to remote. Return an integer:
265 repository to remote. Return an integer:
266 - None means nothing to push
266 - None means nothing to push
267 - 0 means HTTP error
267 - 0 means HTTP error
268 - 1 means we pushed and remote head count is unchanged *or*
268 - 1 means we pushed and remote head count is unchanged *or*
269 we have outgoing changesets but refused to push
269 we have outgoing changesets but refused to push
270 - other values as described by addchangegroup()
270 - other values as described by addchangegroup()
271 '''
271 '''
272 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
272 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
273 if pushop.remote.local():
273 if pushop.remote.local():
274 missing = (set(pushop.repo.requirements)
274 missing = (set(pushop.repo.requirements)
275 - pushop.remote.local().supported)
275 - pushop.remote.local().supported)
276 if missing:
276 if missing:
277 msg = _("required features are not"
277 msg = _("required features are not"
278 " supported in the destination:"
278 " supported in the destination:"
279 " %s") % (', '.join(sorted(missing)))
279 " %s") % (', '.join(sorted(missing)))
280 raise error.Abort(msg)
280 raise error.Abort(msg)
281
281
282 # there are two ways to push to remote repo:
282 # there are two ways to push to remote repo:
283 #
283 #
284 # addchangegroup assumes local user can lock remote
284 # addchangegroup assumes local user can lock remote
285 # repo (local filesystem, old ssh servers).
285 # repo (local filesystem, old ssh servers).
286 #
286 #
287 # unbundle assumes local user cannot lock remote repo (new ssh
287 # unbundle assumes local user cannot lock remote repo (new ssh
288 # servers, http servers).
288 # servers, http servers).
289
289
290 if not pushop.remote.canpush():
290 if not pushop.remote.canpush():
291 raise error.Abort(_("destination does not support push"))
291 raise error.Abort(_("destination does not support push"))
292 # get local lock as we might write phase data
292 # get local lock as we might write phase data
293 localwlock = locallock = None
293 localwlock = locallock = None
294 try:
294 try:
295 # bundle2 push may receive a reply bundle touching bookmarks or other
295 # bundle2 push may receive a reply bundle touching bookmarks or other
296 # things requiring the wlock. Take it now to ensure proper ordering.
296 # things requiring the wlock. Take it now to ensure proper ordering.
297 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
297 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
298 if _canusebundle2(pushop) and maypushback:
298 if _canusebundle2(pushop) and maypushback:
299 localwlock = pushop.repo.wlock()
299 localwlock = pushop.repo.wlock()
300 locallock = pushop.repo.lock()
300 locallock = pushop.repo.lock()
301 pushop.locallocked = True
301 pushop.locallocked = True
302 except IOError as err:
302 except IOError as err:
303 pushop.locallocked = False
303 pushop.locallocked = False
304 if err.errno != errno.EACCES:
304 if err.errno != errno.EACCES:
305 raise
305 raise
306 # source repo cannot be locked.
306 # source repo cannot be locked.
307 # We do not abort the push, but just disable the local phase
307 # We do not abort the push, but just disable the local phase
308 # synchronisation.
308 # synchronisation.
309 msg = 'cannot lock source repository: %s\n' % err
309 msg = 'cannot lock source repository: %s\n' % err
310 pushop.ui.debug(msg)
310 pushop.ui.debug(msg)
311 try:
311 try:
312 if pushop.locallocked:
312 if pushop.locallocked:
313 pushop.trmanager = transactionmanager(repo,
313 pushop.trmanager = transactionmanager(pushop.repo,
314 'push-response',
314 'push-response',
315 pushop.remote.url())
315 pushop.remote.url())
316 pushop.repo.checkpush(pushop)
316 pushop.repo.checkpush(pushop)
317 lock = None
317 lock = None
318 unbundle = pushop.remote.capable('unbundle')
318 unbundle = pushop.remote.capable('unbundle')
319 if not unbundle:
319 if not unbundle:
320 lock = pushop.remote.lock()
320 lock = pushop.remote.lock()
321 try:
321 try:
322 _pushdiscovery(pushop)
322 _pushdiscovery(pushop)
323 if _canusebundle2(pushop):
323 if _canusebundle2(pushop):
324 _pushbundle2(pushop)
324 _pushbundle2(pushop)
325 _pushchangeset(pushop)
325 _pushchangeset(pushop)
326 _pushsyncphase(pushop)
326 _pushsyncphase(pushop)
327 _pushobsolete(pushop)
327 _pushobsolete(pushop)
328 _pushbookmark(pushop)
328 _pushbookmark(pushop)
329 finally:
329 finally:
330 if lock is not None:
330 if lock is not None:
331 lock.release()
331 lock.release()
332 if pushop.trmanager:
332 if pushop.trmanager:
333 pushop.trmanager.close()
333 pushop.trmanager.close()
334 finally:
334 finally:
335 if pushop.trmanager:
335 if pushop.trmanager:
336 pushop.trmanager.release()
336 pushop.trmanager.release()
337 if locallock is not None:
337 if locallock is not None:
338 locallock.release()
338 locallock.release()
339 if localwlock is not None:
339 if localwlock is not None:
340 localwlock.release()
340 localwlock.release()
341
341
342 return pushop
342 return pushop
343
343
344 # list of steps to perform discovery before push
344 # list of steps to perform discovery before push
345 pushdiscoveryorder = []
345 pushdiscoveryorder = []
346
346
347 # Mapping between step name and function
347 # Mapping between step name and function
348 #
348 #
349 # This exists to help extensions wrap steps if necessary
349 # This exists to help extensions wrap steps if necessary
350 pushdiscoverymapping = {}
350 pushdiscoverymapping = {}
351
351
352 def pushdiscovery(stepname):
352 def pushdiscovery(stepname):
353 """decorator for function performing discovery before push
353 """decorator for function performing discovery before push
354
354
355 The function is added to the step -> function mapping and appended to the
355 The function is added to the step -> function mapping and appended to the
356 list of steps. Beware that decorated function will be added in order (this
356 list of steps. Beware that decorated function will be added in order (this
357 may matter).
357 may matter).
358
358
359 You can only use this decorator for a new step, if you want to wrap a step
359 You can only use this decorator for a new step, if you want to wrap a step
360 from an extension, change the pushdiscovery dictionary directly."""
360 from an extension, change the pushdiscovery dictionary directly."""
361 def dec(func):
361 def dec(func):
362 assert stepname not in pushdiscoverymapping
362 assert stepname not in pushdiscoverymapping
363 pushdiscoverymapping[stepname] = func
363 pushdiscoverymapping[stepname] = func
364 pushdiscoveryorder.append(stepname)
364 pushdiscoveryorder.append(stepname)
365 return func
365 return func
366 return dec
366 return dec
367
367
368 def _pushdiscovery(pushop):
368 def _pushdiscovery(pushop):
369 """Run all discovery steps"""
369 """Run all discovery steps"""
370 for stepname in pushdiscoveryorder:
370 for stepname in pushdiscoveryorder:
371 step = pushdiscoverymapping[stepname]
371 step = pushdiscoverymapping[stepname]
372 step(pushop)
372 step(pushop)
373
373
374 @pushdiscovery('changeset')
374 @pushdiscovery('changeset')
375 def _pushdiscoverychangeset(pushop):
375 def _pushdiscoverychangeset(pushop):
376 """discover the changeset that need to be pushed"""
376 """discover the changeset that need to be pushed"""
377 fci = discovery.findcommonincoming
377 fci = discovery.findcommonincoming
378 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
378 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
379 common, inc, remoteheads = commoninc
379 common, inc, remoteheads = commoninc
380 fco = discovery.findcommonoutgoing
380 fco = discovery.findcommonoutgoing
381 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
381 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
382 commoninc=commoninc, force=pushop.force)
382 commoninc=commoninc, force=pushop.force)
383 pushop.outgoing = outgoing
383 pushop.outgoing = outgoing
384 pushop.remoteheads = remoteheads
384 pushop.remoteheads = remoteheads
385 pushop.incoming = inc
385 pushop.incoming = inc
386
386
387 @pushdiscovery('phase')
387 @pushdiscovery('phase')
388 def _pushdiscoveryphase(pushop):
388 def _pushdiscoveryphase(pushop):
389 """discover the phase that needs to be pushed
389 """discover the phase that needs to be pushed
390
390
391 (computed for both success and failure case for changesets push)"""
391 (computed for both success and failure case for changesets push)"""
392 outgoing = pushop.outgoing
392 outgoing = pushop.outgoing
393 unfi = pushop.repo.unfiltered()
393 unfi = pushop.repo.unfiltered()
394 remotephases = pushop.remote.listkeys('phases')
394 remotephases = pushop.remote.listkeys('phases')
395 publishing = remotephases.get('publishing', False)
395 publishing = remotephases.get('publishing', False)
396 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
396 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
397 and remotephases # server supports phases
397 and remotephases # server supports phases
398 and not pushop.outgoing.missing # no changesets to be pushed
398 and not pushop.outgoing.missing # no changesets to be pushed
399 and publishing):
399 and publishing):
400 # When:
400 # When:
401 # - this is a subrepo push
401 # - this is a subrepo push
402 # - and remote support phase
402 # - and remote support phase
403 # - and no changeset are to be pushed
403 # - and no changeset are to be pushed
404 # - and remote is publishing
404 # - and remote is publishing
405 # We may be in issue 3871 case!
405 # We may be in issue 3871 case!
406 # We drop the possible phase synchronisation done by
406 # We drop the possible phase synchronisation done by
407 # courtesy to publish changesets possibly locally draft
407 # courtesy to publish changesets possibly locally draft
408 # on the remote.
408 # on the remote.
409 remotephases = {'publishing': 'True'}
409 remotephases = {'publishing': 'True'}
410 ana = phases.analyzeremotephases(pushop.repo,
410 ana = phases.analyzeremotephases(pushop.repo,
411 pushop.fallbackheads,
411 pushop.fallbackheads,
412 remotephases)
412 remotephases)
413 pheads, droots = ana
413 pheads, droots = ana
414 extracond = ''
414 extracond = ''
415 if not publishing:
415 if not publishing:
416 extracond = ' and public()'
416 extracond = ' and public()'
417 revset = 'heads((%%ln::%%ln) %s)' % extracond
417 revset = 'heads((%%ln::%%ln) %s)' % extracond
418 # Get the list of all revs draft on remote by public here.
418 # Get the list of all revs draft on remote by public here.
419 # XXX Beware that revset break if droots is not strictly
419 # XXX Beware that revset break if droots is not strictly
420 # XXX root we may want to ensure it is but it is costly
420 # XXX root we may want to ensure it is but it is costly
421 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
421 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
422 if not outgoing.missing:
422 if not outgoing.missing:
423 future = fallback
423 future = fallback
424 else:
424 else:
425 # adds changeset we are going to push as draft
425 # adds changeset we are going to push as draft
426 #
426 #
427 # should not be necessary for publishing server, but because of an
427 # should not be necessary for publishing server, but because of an
428 # issue fixed in xxxxx we have to do it anyway.
428 # issue fixed in xxxxx we have to do it anyway.
429 fdroots = list(unfi.set('roots(%ln + %ln::)',
429 fdroots = list(unfi.set('roots(%ln + %ln::)',
430 outgoing.missing, droots))
430 outgoing.missing, droots))
431 fdroots = [f.node() for f in fdroots]
431 fdroots = [f.node() for f in fdroots]
432 future = list(unfi.set(revset, fdroots, pushop.futureheads))
432 future = list(unfi.set(revset, fdroots, pushop.futureheads))
433 pushop.outdatedphases = future
433 pushop.outdatedphases = future
434 pushop.fallbackoutdatedphases = fallback
434 pushop.fallbackoutdatedphases = fallback
435
435
436 @pushdiscovery('obsmarker')
436 @pushdiscovery('obsmarker')
437 def _pushdiscoveryobsmarkers(pushop):
437 def _pushdiscoveryobsmarkers(pushop):
438 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
438 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
439 and pushop.repo.obsstore
439 and pushop.repo.obsstore
440 and 'obsolete' in pushop.remote.listkeys('namespaces')):
440 and 'obsolete' in pushop.remote.listkeys('namespaces')):
441 repo = pushop.repo
441 repo = pushop.repo
442 # very naive computation, that can be quite expensive on big repo.
442 # very naive computation, that can be quite expensive on big repo.
443 # However: evolution is currently slow on them anyway.
443 # However: evolution is currently slow on them anyway.
444 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
444 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
445 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
445 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
446
446
447 @pushdiscovery('bookmarks')
447 @pushdiscovery('bookmarks')
448 def _pushdiscoverybookmarks(pushop):
448 def _pushdiscoverybookmarks(pushop):
449 ui = pushop.ui
449 ui = pushop.ui
450 repo = pushop.repo.unfiltered()
450 repo = pushop.repo.unfiltered()
451 remote = pushop.remote
451 remote = pushop.remote
452 ui.debug("checking for updated bookmarks\n")
452 ui.debug("checking for updated bookmarks\n")
453 ancestors = ()
453 ancestors = ()
454 if pushop.revs:
454 if pushop.revs:
455 revnums = map(repo.changelog.rev, pushop.revs)
455 revnums = map(repo.changelog.rev, pushop.revs)
456 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
456 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
457 remotebookmark = remote.listkeys('bookmarks')
457 remotebookmark = remote.listkeys('bookmarks')
458
458
459 explicit = set(pushop.bookmarks)
459 explicit = set(pushop.bookmarks)
460
460
461 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
461 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
462 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
462 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
463 for b, scid, dcid in advsrc:
463 for b, scid, dcid in advsrc:
464 if b in explicit:
464 if b in explicit:
465 explicit.remove(b)
465 explicit.remove(b)
466 if not ancestors or repo[scid].rev() in ancestors:
466 if not ancestors or repo[scid].rev() in ancestors:
467 pushop.outbookmarks.append((b, dcid, scid))
467 pushop.outbookmarks.append((b, dcid, scid))
468 # search added bookmark
468 # search added bookmark
469 for b, scid, dcid in addsrc:
469 for b, scid, dcid in addsrc:
470 if b in explicit:
470 if b in explicit:
471 explicit.remove(b)
471 explicit.remove(b)
472 pushop.outbookmarks.append((b, '', scid))
472 pushop.outbookmarks.append((b, '', scid))
473 # search for overwritten bookmark
473 # search for overwritten bookmark
474 for b, scid, dcid in advdst + diverge + differ:
474 for b, scid, dcid in advdst + diverge + differ:
475 if b in explicit:
475 if b in explicit:
476 explicit.remove(b)
476 explicit.remove(b)
477 pushop.outbookmarks.append((b, dcid, scid))
477 pushop.outbookmarks.append((b, dcid, scid))
478 # search for bookmark to delete
478 # search for bookmark to delete
479 for b, scid, dcid in adddst:
479 for b, scid, dcid in adddst:
480 if b in explicit:
480 if b in explicit:
481 explicit.remove(b)
481 explicit.remove(b)
482 # treat as "deleted locally"
482 # treat as "deleted locally"
483 pushop.outbookmarks.append((b, dcid, ''))
483 pushop.outbookmarks.append((b, dcid, ''))
484 # identical bookmarks shouldn't get reported
484 # identical bookmarks shouldn't get reported
485 for b, scid, dcid in same:
485 for b, scid, dcid in same:
486 if b in explicit:
486 if b in explicit:
487 explicit.remove(b)
487 explicit.remove(b)
488
488
489 if explicit:
489 if explicit:
490 explicit = sorted(explicit)
490 explicit = sorted(explicit)
491 # we should probably list all of them
491 # we should probably list all of them
492 ui.warn(_('bookmark %s does not exist on the local '
492 ui.warn(_('bookmark %s does not exist on the local '
493 'or remote repository!\n') % explicit[0])
493 'or remote repository!\n') % explicit[0])
494 pushop.bkresult = 2
494 pushop.bkresult = 2
495
495
496 pushop.outbookmarks.sort()
496 pushop.outbookmarks.sort()
497
497
498 def _pushcheckoutgoing(pushop):
498 def _pushcheckoutgoing(pushop):
499 outgoing = pushop.outgoing
499 outgoing = pushop.outgoing
500 unfi = pushop.repo.unfiltered()
500 unfi = pushop.repo.unfiltered()
501 if not outgoing.missing:
501 if not outgoing.missing:
502 # nothing to push
502 # nothing to push
503 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
503 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
504 return False
504 return False
505 # something to push
505 # something to push
506 if not pushop.force:
506 if not pushop.force:
507 # if repo.obsstore == False --> no obsolete
507 # if repo.obsstore == False --> no obsolete
508 # then, save the iteration
508 # then, save the iteration
509 if unfi.obsstore:
509 if unfi.obsstore:
510 # this message are here for 80 char limit reason
510 # this message are here for 80 char limit reason
511 mso = _("push includes obsolete changeset: %s!")
511 mso = _("push includes obsolete changeset: %s!")
512 mst = {"unstable": _("push includes unstable changeset: %s!"),
512 mst = {"unstable": _("push includes unstable changeset: %s!"),
513 "bumped": _("push includes bumped changeset: %s!"),
513 "bumped": _("push includes bumped changeset: %s!"),
514 "divergent": _("push includes divergent changeset: %s!")}
514 "divergent": _("push includes divergent changeset: %s!")}
515 # If we are to push if there is at least one
515 # If we are to push if there is at least one
516 # obsolete or unstable changeset in missing, at
516 # obsolete or unstable changeset in missing, at
517 # least one of the missinghead will be obsolete or
517 # least one of the missinghead will be obsolete or
518 # unstable. So checking heads only is ok
518 # unstable. So checking heads only is ok
519 for node in outgoing.missingheads:
519 for node in outgoing.missingheads:
520 ctx = unfi[node]
520 ctx = unfi[node]
521 if ctx.obsolete():
521 if ctx.obsolete():
522 raise error.Abort(mso % ctx)
522 raise error.Abort(mso % ctx)
523 elif ctx.troubled():
523 elif ctx.troubled():
524 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
524 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
525
525
526 # internal config: bookmarks.pushing
526 # internal config: bookmarks.pushing
527 newbm = pushop.ui.configlist('bookmarks', 'pushing')
527 newbm = pushop.ui.configlist('bookmarks', 'pushing')
528 discovery.checkheads(unfi, pushop.remote, outgoing,
528 discovery.checkheads(unfi, pushop.remote, outgoing,
529 pushop.remoteheads,
529 pushop.remoteheads,
530 pushop.newbranch,
530 pushop.newbranch,
531 bool(pushop.incoming),
531 bool(pushop.incoming),
532 newbm)
532 newbm)
533 return True
533 return True
534
534
535 # List of names of steps to perform for an outgoing bundle2, order matters.
535 # List of names of steps to perform for an outgoing bundle2, order matters.
536 b2partsgenorder = []
536 b2partsgenorder = []
537
537
538 # Mapping between step name and function
538 # Mapping between step name and function
539 #
539 #
540 # This exists to help extensions wrap steps if necessary
540 # This exists to help extensions wrap steps if necessary
541 b2partsgenmapping = {}
541 b2partsgenmapping = {}
542
542
543 def b2partsgenerator(stepname, idx=None):
543 def b2partsgenerator(stepname, idx=None):
544 """decorator for function generating bundle2 part
544 """decorator for function generating bundle2 part
545
545
546 The function is added to the step -> function mapping and appended to the
546 The function is added to the step -> function mapping and appended to the
547 list of steps. Beware that decorated functions will be added in order
547 list of steps. Beware that decorated functions will be added in order
548 (this may matter).
548 (this may matter).
549
549
550 You can only use this decorator for new steps, if you want to wrap a step
550 You can only use this decorator for new steps, if you want to wrap a step
551 from an extension, attack the b2partsgenmapping dictionary directly."""
551 from an extension, attack the b2partsgenmapping dictionary directly."""
552 def dec(func):
552 def dec(func):
553 assert stepname not in b2partsgenmapping
553 assert stepname not in b2partsgenmapping
554 b2partsgenmapping[stepname] = func
554 b2partsgenmapping[stepname] = func
555 if idx is None:
555 if idx is None:
556 b2partsgenorder.append(stepname)
556 b2partsgenorder.append(stepname)
557 else:
557 else:
558 b2partsgenorder.insert(idx, stepname)
558 b2partsgenorder.insert(idx, stepname)
559 return func
559 return func
560 return dec
560 return dec
561
561
562 def _pushb2ctxcheckheads(pushop, bundler):
562 def _pushb2ctxcheckheads(pushop, bundler):
563 """Generate race condition checking parts
563 """Generate race condition checking parts
564
564
565 Exists as an indepedent function to aid extensions
565 Exists as an indepedent function to aid extensions
566 """
566 """
567 if not pushop.force:
567 if not pushop.force:
568 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
568 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
569
569
570 @b2partsgenerator('changeset')
570 @b2partsgenerator('changeset')
571 def _pushb2ctx(pushop, bundler):
571 def _pushb2ctx(pushop, bundler):
572 """handle changegroup push through bundle2
572 """handle changegroup push through bundle2
573
573
574 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
574 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
575 """
575 """
576 if 'changesets' in pushop.stepsdone:
576 if 'changesets' in pushop.stepsdone:
577 return
577 return
578 pushop.stepsdone.add('changesets')
578 pushop.stepsdone.add('changesets')
579 # Send known heads to the server for race detection.
579 # Send known heads to the server for race detection.
580 if not _pushcheckoutgoing(pushop):
580 if not _pushcheckoutgoing(pushop):
581 return
581 return
582 pushop.repo.prepushoutgoinghooks(pushop.repo,
582 pushop.repo.prepushoutgoinghooks(pushop.repo,
583 pushop.remote,
583 pushop.remote,
584 pushop.outgoing)
584 pushop.outgoing)
585
585
586 _pushb2ctxcheckheads(pushop, bundler)
586 _pushb2ctxcheckheads(pushop, bundler)
587
587
588 b2caps = bundle2.bundle2caps(pushop.remote)
588 b2caps = bundle2.bundle2caps(pushop.remote)
589 version = None
589 version = None
590 cgversions = b2caps.get('changegroup')
590 cgversions = b2caps.get('changegroup')
591 if not cgversions: # 3.1 and 3.2 ship with an empty value
591 if not cgversions: # 3.1 and 3.2 ship with an empty value
592 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
592 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
593 pushop.outgoing)
593 pushop.outgoing)
594 else:
594 else:
595 cgversions = [v for v in cgversions if v in changegroup.packermap]
595 cgversions = [v for v in cgversions if v in changegroup.packermap]
596 if not cgversions:
596 if not cgversions:
597 raise ValueError(_('no common changegroup version'))
597 raise ValueError(_('no common changegroup version'))
598 version = max(cgversions)
598 version = max(cgversions)
599 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
599 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
600 pushop.outgoing,
600 pushop.outgoing,
601 version=version)
601 version=version)
602 cgpart = bundler.newpart('changegroup', data=cg)
602 cgpart = bundler.newpart('changegroup', data=cg)
603 if version is not None:
603 if version is not None:
604 cgpart.addparam('version', version)
604 cgpart.addparam('version', version)
605 def handlereply(op):
605 def handlereply(op):
606 """extract addchangegroup returns from server reply"""
606 """extract addchangegroup returns from server reply"""
607 cgreplies = op.records.getreplies(cgpart.id)
607 cgreplies = op.records.getreplies(cgpart.id)
608 assert len(cgreplies['changegroup']) == 1
608 assert len(cgreplies['changegroup']) == 1
609 pushop.cgresult = cgreplies['changegroup'][0]['return']
609 pushop.cgresult = cgreplies['changegroup'][0]['return']
610 return handlereply
610 return handlereply
611
611
612 @b2partsgenerator('phase')
612 @b2partsgenerator('phase')
613 def _pushb2phases(pushop, bundler):
613 def _pushb2phases(pushop, bundler):
614 """handle phase push through bundle2"""
614 """handle phase push through bundle2"""
615 if 'phases' in pushop.stepsdone:
615 if 'phases' in pushop.stepsdone:
616 return
616 return
617 b2caps = bundle2.bundle2caps(pushop.remote)
617 b2caps = bundle2.bundle2caps(pushop.remote)
618 if not 'pushkey' in b2caps:
618 if not 'pushkey' in b2caps:
619 return
619 return
620 pushop.stepsdone.add('phases')
620 pushop.stepsdone.add('phases')
621 part2node = []
621 part2node = []
622
622
623 def handlefailure(pushop, exc):
623 def handlefailure(pushop, exc):
624 targetid = int(exc.partid)
624 targetid = int(exc.partid)
625 for partid, node in part2node:
625 for partid, node in part2node:
626 if partid == targetid:
626 if partid == targetid:
627 raise error.Abort(_('updating %s to public failed') % node)
627 raise error.Abort(_('updating %s to public failed') % node)
628
628
629 enc = pushkey.encode
629 enc = pushkey.encode
630 for newremotehead in pushop.outdatedphases:
630 for newremotehead in pushop.outdatedphases:
631 part = bundler.newpart('pushkey')
631 part = bundler.newpart('pushkey')
632 part.addparam('namespace', enc('phases'))
632 part.addparam('namespace', enc('phases'))
633 part.addparam('key', enc(newremotehead.hex()))
633 part.addparam('key', enc(newremotehead.hex()))
634 part.addparam('old', enc(str(phases.draft)))
634 part.addparam('old', enc(str(phases.draft)))
635 part.addparam('new', enc(str(phases.public)))
635 part.addparam('new', enc(str(phases.public)))
636 part2node.append((part.id, newremotehead))
636 part2node.append((part.id, newremotehead))
637 pushop.pkfailcb[part.id] = handlefailure
637 pushop.pkfailcb[part.id] = handlefailure
638
638
639 def handlereply(op):
639 def handlereply(op):
640 for partid, node in part2node:
640 for partid, node in part2node:
641 partrep = op.records.getreplies(partid)
641 partrep = op.records.getreplies(partid)
642 results = partrep['pushkey']
642 results = partrep['pushkey']
643 assert len(results) <= 1
643 assert len(results) <= 1
644 msg = None
644 msg = None
645 if not results:
645 if not results:
646 msg = _('server ignored update of %s to public!\n') % node
646 msg = _('server ignored update of %s to public!\n') % node
647 elif not int(results[0]['return']):
647 elif not int(results[0]['return']):
648 msg = _('updating %s to public failed!\n') % node
648 msg = _('updating %s to public failed!\n') % node
649 if msg is not None:
649 if msg is not None:
650 pushop.ui.warn(msg)
650 pushop.ui.warn(msg)
651 return handlereply
651 return handlereply
652
652
653 @b2partsgenerator('obsmarkers')
653 @b2partsgenerator('obsmarkers')
654 def _pushb2obsmarkers(pushop, bundler):
654 def _pushb2obsmarkers(pushop, bundler):
655 if 'obsmarkers' in pushop.stepsdone:
655 if 'obsmarkers' in pushop.stepsdone:
656 return
656 return
657 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
657 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
658 if obsolete.commonversion(remoteversions) is None:
658 if obsolete.commonversion(remoteversions) is None:
659 return
659 return
660 pushop.stepsdone.add('obsmarkers')
660 pushop.stepsdone.add('obsmarkers')
661 if pushop.outobsmarkers:
661 if pushop.outobsmarkers:
662 markers = sorted(pushop.outobsmarkers)
662 markers = sorted(pushop.outobsmarkers)
663 buildobsmarkerspart(bundler, markers)
663 buildobsmarkerspart(bundler, markers)
664
664
665 @b2partsgenerator('bookmarks')
665 @b2partsgenerator('bookmarks')
666 def _pushb2bookmarks(pushop, bundler):
666 def _pushb2bookmarks(pushop, bundler):
667 """handle bookmark push through bundle2"""
667 """handle bookmark push through bundle2"""
668 if 'bookmarks' in pushop.stepsdone:
668 if 'bookmarks' in pushop.stepsdone:
669 return
669 return
670 b2caps = bundle2.bundle2caps(pushop.remote)
670 b2caps = bundle2.bundle2caps(pushop.remote)
671 if 'pushkey' not in b2caps:
671 if 'pushkey' not in b2caps:
672 return
672 return
673 pushop.stepsdone.add('bookmarks')
673 pushop.stepsdone.add('bookmarks')
674 part2book = []
674 part2book = []
675 enc = pushkey.encode
675 enc = pushkey.encode
676
676
677 def handlefailure(pushop, exc):
677 def handlefailure(pushop, exc):
678 targetid = int(exc.partid)
678 targetid = int(exc.partid)
679 for partid, book, action in part2book:
679 for partid, book, action in part2book:
680 if partid == targetid:
680 if partid == targetid:
681 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
681 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
682 # we should not be called for part we did not generated
682 # we should not be called for part we did not generated
683 assert False
683 assert False
684
684
685 for book, old, new in pushop.outbookmarks:
685 for book, old, new in pushop.outbookmarks:
686 part = bundler.newpart('pushkey')
686 part = bundler.newpart('pushkey')
687 part.addparam('namespace', enc('bookmarks'))
687 part.addparam('namespace', enc('bookmarks'))
688 part.addparam('key', enc(book))
688 part.addparam('key', enc(book))
689 part.addparam('old', enc(old))
689 part.addparam('old', enc(old))
690 part.addparam('new', enc(new))
690 part.addparam('new', enc(new))
691 action = 'update'
691 action = 'update'
692 if not old:
692 if not old:
693 action = 'export'
693 action = 'export'
694 elif not new:
694 elif not new:
695 action = 'delete'
695 action = 'delete'
696 part2book.append((part.id, book, action))
696 part2book.append((part.id, book, action))
697 pushop.pkfailcb[part.id] = handlefailure
697 pushop.pkfailcb[part.id] = handlefailure
698
698
699 def handlereply(op):
699 def handlereply(op):
700 ui = pushop.ui
700 ui = pushop.ui
701 for partid, book, action in part2book:
701 for partid, book, action in part2book:
702 partrep = op.records.getreplies(partid)
702 partrep = op.records.getreplies(partid)
703 results = partrep['pushkey']
703 results = partrep['pushkey']
704 assert len(results) <= 1
704 assert len(results) <= 1
705 if not results:
705 if not results:
706 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
706 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
707 else:
707 else:
708 ret = int(results[0]['return'])
708 ret = int(results[0]['return'])
709 if ret:
709 if ret:
710 ui.status(bookmsgmap[action][0] % book)
710 ui.status(bookmsgmap[action][0] % book)
711 else:
711 else:
712 ui.warn(bookmsgmap[action][1] % book)
712 ui.warn(bookmsgmap[action][1] % book)
713 if pushop.bkresult is not None:
713 if pushop.bkresult is not None:
714 pushop.bkresult = 1
714 pushop.bkresult = 1
715 return handlereply
715 return handlereply
716
716
717
717
718 def _pushbundle2(pushop):
718 def _pushbundle2(pushop):
719 """push data to the remote using bundle2
719 """push data to the remote using bundle2
720
720
721 The only currently supported type of data is changegroup but this will
721 The only currently supported type of data is changegroup but this will
722 evolve in the future."""
722 evolve in the future."""
723 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
723 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
724 pushback = (pushop.trmanager
724 pushback = (pushop.trmanager
725 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
725 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
726
726
727 # create reply capability
727 # create reply capability
728 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
728 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
729 allowpushback=pushback))
729 allowpushback=pushback))
730 bundler.newpart('replycaps', data=capsblob)
730 bundler.newpart('replycaps', data=capsblob)
731 replyhandlers = []
731 replyhandlers = []
732 for partgenname in b2partsgenorder:
732 for partgenname in b2partsgenorder:
733 partgen = b2partsgenmapping[partgenname]
733 partgen = b2partsgenmapping[partgenname]
734 ret = partgen(pushop, bundler)
734 ret = partgen(pushop, bundler)
735 if callable(ret):
735 if callable(ret):
736 replyhandlers.append(ret)
736 replyhandlers.append(ret)
737 # do not push if nothing to push
737 # do not push if nothing to push
738 if bundler.nbparts <= 1:
738 if bundler.nbparts <= 1:
739 return
739 return
740 stream = util.chunkbuffer(bundler.getchunks())
740 stream = util.chunkbuffer(bundler.getchunks())
741 try:
741 try:
742 try:
742 try:
743 reply = pushop.remote.unbundle(stream, ['force'], 'push')
743 reply = pushop.remote.unbundle(stream, ['force'], 'push')
744 except error.BundleValueError as exc:
744 except error.BundleValueError as exc:
745 raise error.Abort('missing support for %s' % exc)
745 raise error.Abort('missing support for %s' % exc)
746 try:
746 try:
747 trgetter = None
747 trgetter = None
748 if pushback:
748 if pushback:
749 trgetter = pushop.trmanager.transaction
749 trgetter = pushop.trmanager.transaction
750 op = bundle2.processbundle(pushop.repo, reply, trgetter)
750 op = bundle2.processbundle(pushop.repo, reply, trgetter)
751 except error.BundleValueError as exc:
751 except error.BundleValueError as exc:
752 raise error.Abort('missing support for %s' % exc)
752 raise error.Abort('missing support for %s' % exc)
753 except error.PushkeyFailed as exc:
753 except error.PushkeyFailed as exc:
754 partid = int(exc.partid)
754 partid = int(exc.partid)
755 if partid not in pushop.pkfailcb:
755 if partid not in pushop.pkfailcb:
756 raise
756 raise
757 pushop.pkfailcb[partid](pushop, exc)
757 pushop.pkfailcb[partid](pushop, exc)
758 for rephand in replyhandlers:
758 for rephand in replyhandlers:
759 rephand(op)
759 rephand(op)
760
760
761 def _pushchangeset(pushop):
761 def _pushchangeset(pushop):
762 """Make the actual push of changeset bundle to remote repo"""
762 """Make the actual push of changeset bundle to remote repo"""
763 if 'changesets' in pushop.stepsdone:
763 if 'changesets' in pushop.stepsdone:
764 return
764 return
765 pushop.stepsdone.add('changesets')
765 pushop.stepsdone.add('changesets')
766 if not _pushcheckoutgoing(pushop):
766 if not _pushcheckoutgoing(pushop):
767 return
767 return
768 pushop.repo.prepushoutgoinghooks(pushop.repo,
768 pushop.repo.prepushoutgoinghooks(pushop.repo,
769 pushop.remote,
769 pushop.remote,
770 pushop.outgoing)
770 pushop.outgoing)
771 outgoing = pushop.outgoing
771 outgoing = pushop.outgoing
772 unbundle = pushop.remote.capable('unbundle')
772 unbundle = pushop.remote.capable('unbundle')
773 # TODO: get bundlecaps from remote
773 # TODO: get bundlecaps from remote
774 bundlecaps = None
774 bundlecaps = None
775 # create a changegroup from local
775 # create a changegroup from local
776 if pushop.revs is None and not (outgoing.excluded
776 if pushop.revs is None and not (outgoing.excluded
777 or pushop.repo.changelog.filteredrevs):
777 or pushop.repo.changelog.filteredrevs):
778 # push everything,
778 # push everything,
779 # use the fast path, no race possible on push
779 # use the fast path, no race possible on push
780 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
780 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
781 cg = changegroup.getsubset(pushop.repo,
781 cg = changegroup.getsubset(pushop.repo,
782 outgoing,
782 outgoing,
783 bundler,
783 bundler,
784 'push',
784 'push',
785 fastpath=True)
785 fastpath=True)
786 else:
786 else:
787 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
787 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
788 bundlecaps)
788 bundlecaps)
789
789
790 # apply changegroup to remote
790 # apply changegroup to remote
791 if unbundle:
791 if unbundle:
792 # local repo finds heads on server, finds out what
792 # local repo finds heads on server, finds out what
793 # revs it must push. once revs transferred, if server
793 # revs it must push. once revs transferred, if server
794 # finds it has different heads (someone else won
794 # finds it has different heads (someone else won
795 # commit/push race), server aborts.
795 # commit/push race), server aborts.
796 if pushop.force:
796 if pushop.force:
797 remoteheads = ['force']
797 remoteheads = ['force']
798 else:
798 else:
799 remoteheads = pushop.remoteheads
799 remoteheads = pushop.remoteheads
800 # ssh: return remote's addchangegroup()
800 # ssh: return remote's addchangegroup()
801 # http: return remote's addchangegroup() or 0 for error
801 # http: return remote's addchangegroup() or 0 for error
802 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
802 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
803 pushop.repo.url())
803 pushop.repo.url())
804 else:
804 else:
805 # we return an integer indicating remote head count
805 # we return an integer indicating remote head count
806 # change
806 # change
807 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
807 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
808 pushop.repo.url())
808 pushop.repo.url())
809
809
810 def _pushsyncphase(pushop):
810 def _pushsyncphase(pushop):
811 """synchronise phase information locally and remotely"""
811 """synchronise phase information locally and remotely"""
812 cheads = pushop.commonheads
812 cheads = pushop.commonheads
813 # even when we don't push, exchanging phase data is useful
813 # even when we don't push, exchanging phase data is useful
814 remotephases = pushop.remote.listkeys('phases')
814 remotephases = pushop.remote.listkeys('phases')
815 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
815 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
816 and remotephases # server supports phases
816 and remotephases # server supports phases
817 and pushop.cgresult is None # nothing was pushed
817 and pushop.cgresult is None # nothing was pushed
818 and remotephases.get('publishing', False)):
818 and remotephases.get('publishing', False)):
819 # When:
819 # When:
820 # - this is a subrepo push
820 # - this is a subrepo push
821 # - and remote support phase
821 # - and remote support phase
822 # - and no changeset was pushed
822 # - and no changeset was pushed
823 # - and remote is publishing
823 # - and remote is publishing
824 # We may be in issue 3871 case!
824 # We may be in issue 3871 case!
825 # We drop the possible phase synchronisation done by
825 # We drop the possible phase synchronisation done by
826 # courtesy to publish changesets possibly locally draft
826 # courtesy to publish changesets possibly locally draft
827 # on the remote.
827 # on the remote.
828 remotephases = {'publishing': 'True'}
828 remotephases = {'publishing': 'True'}
829 if not remotephases: # old server or public only reply from non-publishing
829 if not remotephases: # old server or public only reply from non-publishing
830 _localphasemove(pushop, cheads)
830 _localphasemove(pushop, cheads)
831 # don't push any phase data as there is nothing to push
831 # don't push any phase data as there is nothing to push
832 else:
832 else:
833 ana = phases.analyzeremotephases(pushop.repo, cheads,
833 ana = phases.analyzeremotephases(pushop.repo, cheads,
834 remotephases)
834 remotephases)
835 pheads, droots = ana
835 pheads, droots = ana
836 ### Apply remote phase on local
836 ### Apply remote phase on local
837 if remotephases.get('publishing', False):
837 if remotephases.get('publishing', False):
838 _localphasemove(pushop, cheads)
838 _localphasemove(pushop, cheads)
839 else: # publish = False
839 else: # publish = False
840 _localphasemove(pushop, pheads)
840 _localphasemove(pushop, pheads)
841 _localphasemove(pushop, cheads, phases.draft)
841 _localphasemove(pushop, cheads, phases.draft)
842 ### Apply local phase on remote
842 ### Apply local phase on remote
843
843
844 if pushop.cgresult:
844 if pushop.cgresult:
845 if 'phases' in pushop.stepsdone:
845 if 'phases' in pushop.stepsdone:
846 # phases already pushed though bundle2
846 # phases already pushed though bundle2
847 return
847 return
848 outdated = pushop.outdatedphases
848 outdated = pushop.outdatedphases
849 else:
849 else:
850 outdated = pushop.fallbackoutdatedphases
850 outdated = pushop.fallbackoutdatedphases
851
851
852 pushop.stepsdone.add('phases')
852 pushop.stepsdone.add('phases')
853
853
854 # filter heads already turned public by the push
854 # filter heads already turned public by the push
855 outdated = [c for c in outdated if c.node() not in pheads]
855 outdated = [c for c in outdated if c.node() not in pheads]
856 # fallback to independent pushkey command
856 # fallback to independent pushkey command
857 for newremotehead in outdated:
857 for newremotehead in outdated:
858 r = pushop.remote.pushkey('phases',
858 r = pushop.remote.pushkey('phases',
859 newremotehead.hex(),
859 newremotehead.hex(),
860 str(phases.draft),
860 str(phases.draft),
861 str(phases.public))
861 str(phases.public))
862 if not r:
862 if not r:
863 pushop.ui.warn(_('updating %s to public failed!\n')
863 pushop.ui.warn(_('updating %s to public failed!\n')
864 % newremotehead)
864 % newremotehead)
865
865
866 def _localphasemove(pushop, nodes, phase=phases.public):
866 def _localphasemove(pushop, nodes, phase=phases.public):
867 """move <nodes> to <phase> in the local source repo"""
867 """move <nodes> to <phase> in the local source repo"""
868 if pushop.trmanager:
868 if pushop.trmanager:
869 phases.advanceboundary(pushop.repo,
869 phases.advanceboundary(pushop.repo,
870 pushop.trmanager.transaction(),
870 pushop.trmanager.transaction(),
871 phase,
871 phase,
872 nodes)
872 nodes)
873 else:
873 else:
874 # repo is not locked, do not change any phases!
874 # repo is not locked, do not change any phases!
875 # Informs the user that phases should have been moved when
875 # Informs the user that phases should have been moved when
876 # applicable.
876 # applicable.
877 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
877 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
878 phasestr = phases.phasenames[phase]
878 phasestr = phases.phasenames[phase]
879 if actualmoves:
879 if actualmoves:
880 pushop.ui.status(_('cannot lock source repo, skipping '
880 pushop.ui.status(_('cannot lock source repo, skipping '
881 'local %s phase update\n') % phasestr)
881 'local %s phase update\n') % phasestr)
882
882
883 def _pushobsolete(pushop):
883 def _pushobsolete(pushop):
884 """utility function to push obsolete markers to a remote"""
884 """utility function to push obsolete markers to a remote"""
885 if 'obsmarkers' in pushop.stepsdone:
885 if 'obsmarkers' in pushop.stepsdone:
886 return
886 return
887 repo = pushop.repo
887 repo = pushop.repo
888 remote = pushop.remote
888 remote = pushop.remote
889 pushop.stepsdone.add('obsmarkers')
889 pushop.stepsdone.add('obsmarkers')
890 if pushop.outobsmarkers:
890 if pushop.outobsmarkers:
891 pushop.ui.debug('try to push obsolete markers to remote\n')
891 pushop.ui.debug('try to push obsolete markers to remote\n')
892 rslts = []
892 rslts = []
893 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
893 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
894 for key in sorted(remotedata, reverse=True):
894 for key in sorted(remotedata, reverse=True):
895 # reverse sort to ensure we end with dump0
895 # reverse sort to ensure we end with dump0
896 data = remotedata[key]
896 data = remotedata[key]
897 rslts.append(remote.pushkey('obsolete', key, '', data))
897 rslts.append(remote.pushkey('obsolete', key, '', data))
898 if [r for r in rslts if not r]:
898 if [r for r in rslts if not r]:
899 msg = _('failed to push some obsolete markers!\n')
899 msg = _('failed to push some obsolete markers!\n')
900 repo.ui.warn(msg)
900 repo.ui.warn(msg)
901
901
902 def _pushbookmark(pushop):
902 def _pushbookmark(pushop):
903 """Update bookmark position on remote"""
903 """Update bookmark position on remote"""
904 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
904 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
905 return
905 return
906 pushop.stepsdone.add('bookmarks')
906 pushop.stepsdone.add('bookmarks')
907 ui = pushop.ui
907 ui = pushop.ui
908 remote = pushop.remote
908 remote = pushop.remote
909
909
910 for b, old, new in pushop.outbookmarks:
910 for b, old, new in pushop.outbookmarks:
911 action = 'update'
911 action = 'update'
912 if not old:
912 if not old:
913 action = 'export'
913 action = 'export'
914 elif not new:
914 elif not new:
915 action = 'delete'
915 action = 'delete'
916 if remote.pushkey('bookmarks', b, old, new):
916 if remote.pushkey('bookmarks', b, old, new):
917 ui.status(bookmsgmap[action][0] % b)
917 ui.status(bookmsgmap[action][0] % b)
918 else:
918 else:
919 ui.warn(bookmsgmap[action][1] % b)
919 ui.warn(bookmsgmap[action][1] % b)
920 # discovery can have set the value form invalid entry
920 # discovery can have set the value form invalid entry
921 if pushop.bkresult is not None:
921 if pushop.bkresult is not None:
922 pushop.bkresult = 1
922 pushop.bkresult = 1
923
923
924 class pulloperation(object):
924 class pulloperation(object):
925 """A object that represent a single pull operation
925 """A object that represent a single pull operation
926
926
927 It purpose is to carry pull related state and very common operation.
927 It purpose is to carry pull related state and very common operation.
928
928
929 A new should be created at the beginning of each pull and discarded
929 A new should be created at the beginning of each pull and discarded
930 afterward.
930 afterward.
931 """
931 """
932
932
933 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
933 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
934 remotebookmarks=None, streamclonerequested=None):
934 remotebookmarks=None, streamclonerequested=None):
935 # repo we pull into
935 # repo we pull into
936 self.repo = repo
936 self.repo = repo
937 # repo we pull from
937 # repo we pull from
938 self.remote = remote
938 self.remote = remote
939 # revision we try to pull (None is "all")
939 # revision we try to pull (None is "all")
940 self.heads = heads
940 self.heads = heads
941 # bookmark pulled explicitly
941 # bookmark pulled explicitly
942 self.explicitbookmarks = bookmarks
942 self.explicitbookmarks = bookmarks
943 # do we force pull?
943 # do we force pull?
944 self.force = force
944 self.force = force
945 # whether a streaming clone was requested
945 # whether a streaming clone was requested
946 self.streamclonerequested = streamclonerequested
946 self.streamclonerequested = streamclonerequested
947 # transaction manager
947 # transaction manager
948 self.trmanager = None
948 self.trmanager = None
949 # set of common changeset between local and remote before pull
949 # set of common changeset between local and remote before pull
950 self.common = None
950 self.common = None
951 # set of pulled head
951 # set of pulled head
952 self.rheads = None
952 self.rheads = None
953 # list of missing changeset to fetch remotely
953 # list of missing changeset to fetch remotely
954 self.fetch = None
954 self.fetch = None
955 # remote bookmarks data
955 # remote bookmarks data
956 self.remotebookmarks = remotebookmarks
956 self.remotebookmarks = remotebookmarks
957 # result of changegroup pulling (used as return code by pull)
957 # result of changegroup pulling (used as return code by pull)
958 self.cgresult = None
958 self.cgresult = None
959 # list of step already done
959 # list of step already done
960 self.stepsdone = set()
960 self.stepsdone = set()
961
961
962 @util.propertycache
962 @util.propertycache
963 def pulledsubset(self):
963 def pulledsubset(self):
964 """heads of the set of changeset target by the pull"""
964 """heads of the set of changeset target by the pull"""
965 # compute target subset
965 # compute target subset
966 if self.heads is None:
966 if self.heads is None:
967 # We pulled every thing possible
967 # We pulled every thing possible
968 # sync on everything common
968 # sync on everything common
969 c = set(self.common)
969 c = set(self.common)
970 ret = list(self.common)
970 ret = list(self.common)
971 for n in self.rheads:
971 for n in self.rheads:
972 if n not in c:
972 if n not in c:
973 ret.append(n)
973 ret.append(n)
974 return ret
974 return ret
975 else:
975 else:
976 # We pulled a specific subset
976 # We pulled a specific subset
977 # sync on this subset
977 # sync on this subset
978 return self.heads
978 return self.heads
979
979
980 @util.propertycache
980 @util.propertycache
981 def canusebundle2(self):
981 def canusebundle2(self):
982 return _canusebundle2(self)
982 return _canusebundle2(self)
983
983
984 @util.propertycache
984 @util.propertycache
985 def remotebundle2caps(self):
985 def remotebundle2caps(self):
986 return bundle2.bundle2caps(self.remote)
986 return bundle2.bundle2caps(self.remote)
987
987
988 def gettransaction(self):
988 def gettransaction(self):
989 # deprecated; talk to trmanager directly
989 # deprecated; talk to trmanager directly
990 return self.trmanager.transaction()
990 return self.trmanager.transaction()
991
991
992 class transactionmanager(object):
992 class transactionmanager(object):
993 """An object to manage the life cycle of a transaction
993 """An object to manage the life cycle of a transaction
994
994
995 It creates the transaction on demand and calls the appropriate hooks when
995 It creates the transaction on demand and calls the appropriate hooks when
996 closing the transaction."""
996 closing the transaction."""
997 def __init__(self, repo, source, url):
997 def __init__(self, repo, source, url):
998 self.repo = repo
998 self.repo = repo
999 self.source = source
999 self.source = source
1000 self.url = url
1000 self.url = url
1001 self._tr = None
1001 self._tr = None
1002
1002
1003 def transaction(self):
1003 def transaction(self):
1004 """Return an open transaction object, constructing if necessary"""
1004 """Return an open transaction object, constructing if necessary"""
1005 if not self._tr:
1005 if not self._tr:
1006 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1006 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1007 self._tr = self.repo.transaction(trname)
1007 self._tr = self.repo.transaction(trname)
1008 self._tr.hookargs['source'] = self.source
1008 self._tr.hookargs['source'] = self.source
1009 self._tr.hookargs['url'] = self.url
1009 self._tr.hookargs['url'] = self.url
1010 return self._tr
1010 return self._tr
1011
1011
1012 def close(self):
1012 def close(self):
1013 """close transaction if created"""
1013 """close transaction if created"""
1014 if self._tr is not None:
1014 if self._tr is not None:
1015 self._tr.close()
1015 self._tr.close()
1016
1016
1017 def release(self):
1017 def release(self):
1018 """release transaction if created"""
1018 """release transaction if created"""
1019 if self._tr is not None:
1019 if self._tr is not None:
1020 self._tr.release()
1020 self._tr.release()
1021
1021
1022 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1022 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1023 streamclonerequested=None):
1023 streamclonerequested=None):
1024 """Fetch repository data from a remote.
1024 """Fetch repository data from a remote.
1025
1025
1026 This is the main function used to retrieve data from a remote repository.
1026 This is the main function used to retrieve data from a remote repository.
1027
1027
1028 ``repo`` is the local repository to clone into.
1028 ``repo`` is the local repository to clone into.
1029 ``remote`` is a peer instance.
1029 ``remote`` is a peer instance.
1030 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1030 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1031 default) means to pull everything from the remote.
1031 default) means to pull everything from the remote.
1032 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1032 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1033 default, all remote bookmarks are pulled.
1033 default, all remote bookmarks are pulled.
1034 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1034 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1035 initialization.
1035 initialization.
1036 ``streamclonerequested`` is a boolean indicating whether a "streaming
1036 ``streamclonerequested`` is a boolean indicating whether a "streaming
1037 clone" is requested. A "streaming clone" is essentially a raw file copy
1037 clone" is requested. A "streaming clone" is essentially a raw file copy
1038 of revlogs from the server. This only works when the local repository is
1038 of revlogs from the server. This only works when the local repository is
1039 empty. The default value of ``None`` means to respect the server
1039 empty. The default value of ``None`` means to respect the server
1040 configuration for preferring stream clones.
1040 configuration for preferring stream clones.
1041
1041
1042 Returns the ``pulloperation`` created for this pull.
1042 Returns the ``pulloperation`` created for this pull.
1043 """
1043 """
1044 if opargs is None:
1044 if opargs is None:
1045 opargs = {}
1045 opargs = {}
1046 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1046 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1047 streamclonerequested=streamclonerequested, **opargs)
1047 streamclonerequested=streamclonerequested, **opargs)
1048 if pullop.remote.local():
1048 if pullop.remote.local():
1049 missing = set(pullop.remote.requirements) - pullop.repo.supported
1049 missing = set(pullop.remote.requirements) - pullop.repo.supported
1050 if missing:
1050 if missing:
1051 msg = _("required features are not"
1051 msg = _("required features are not"
1052 " supported in the destination:"
1052 " supported in the destination:"
1053 " %s") % (', '.join(sorted(missing)))
1053 " %s") % (', '.join(sorted(missing)))
1054 raise error.Abort(msg)
1054 raise error.Abort(msg)
1055
1055
1056 lock = pullop.repo.lock()
1056 lock = pullop.repo.lock()
1057 try:
1057 try:
1058 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1058 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1059 streamclone.maybeperformlegacystreamclone(pullop)
1059 streamclone.maybeperformlegacystreamclone(pullop)
1060 # This should ideally be in _pullbundle2(). However, it needs to run
1060 # This should ideally be in _pullbundle2(). However, it needs to run
1061 # before discovery to avoid extra work.
1061 # before discovery to avoid extra work.
1062 _maybeapplyclonebundle(pullop)
1062 _maybeapplyclonebundle(pullop)
1063 _pulldiscovery(pullop)
1063 _pulldiscovery(pullop)
1064 if pullop.canusebundle2:
1064 if pullop.canusebundle2:
1065 _pullbundle2(pullop)
1065 _pullbundle2(pullop)
1066 _pullchangeset(pullop)
1066 _pullchangeset(pullop)
1067 _pullphase(pullop)
1067 _pullphase(pullop)
1068 _pullbookmarks(pullop)
1068 _pullbookmarks(pullop)
1069 _pullobsolete(pullop)
1069 _pullobsolete(pullop)
1070 pullop.trmanager.close()
1070 pullop.trmanager.close()
1071 finally:
1071 finally:
1072 pullop.trmanager.release()
1072 pullop.trmanager.release()
1073 lock.release()
1073 lock.release()
1074
1074
1075 return pullop
1075 return pullop
1076
1076
1077 # list of steps to perform discovery before pull
1077 # list of steps to perform discovery before pull
1078 pulldiscoveryorder = []
1078 pulldiscoveryorder = []
1079
1079
1080 # Mapping between step name and function
1080 # Mapping between step name and function
1081 #
1081 #
1082 # This exists to help extensions wrap steps if necessary
1082 # This exists to help extensions wrap steps if necessary
1083 pulldiscoverymapping = {}
1083 pulldiscoverymapping = {}
1084
1084
1085 def pulldiscovery(stepname):
1085 def pulldiscovery(stepname):
1086 """decorator for function performing discovery before pull
1086 """decorator for function performing discovery before pull
1087
1087
1088 The function is added to the step -> function mapping and appended to the
1088 The function is added to the step -> function mapping and appended to the
1089 list of steps. Beware that decorated function will be added in order (this
1089 list of steps. Beware that decorated function will be added in order (this
1090 may matter).
1090 may matter).
1091
1091
1092 You can only use this decorator for a new step, if you want to wrap a step
1092 You can only use this decorator for a new step, if you want to wrap a step
1093 from an extension, change the pulldiscovery dictionary directly."""
1093 from an extension, change the pulldiscovery dictionary directly."""
1094 def dec(func):
1094 def dec(func):
1095 assert stepname not in pulldiscoverymapping
1095 assert stepname not in pulldiscoverymapping
1096 pulldiscoverymapping[stepname] = func
1096 pulldiscoverymapping[stepname] = func
1097 pulldiscoveryorder.append(stepname)
1097 pulldiscoveryorder.append(stepname)
1098 return func
1098 return func
1099 return dec
1099 return dec
1100
1100
1101 def _pulldiscovery(pullop):
1101 def _pulldiscovery(pullop):
1102 """Run all discovery steps"""
1102 """Run all discovery steps"""
1103 for stepname in pulldiscoveryorder:
1103 for stepname in pulldiscoveryorder:
1104 step = pulldiscoverymapping[stepname]
1104 step = pulldiscoverymapping[stepname]
1105 step(pullop)
1105 step(pullop)
1106
1106
1107 @pulldiscovery('b1:bookmarks')
1107 @pulldiscovery('b1:bookmarks')
1108 def _pullbookmarkbundle1(pullop):
1108 def _pullbookmarkbundle1(pullop):
1109 """fetch bookmark data in bundle1 case
1109 """fetch bookmark data in bundle1 case
1110
1110
1111 If not using bundle2, we have to fetch bookmarks before changeset
1111 If not using bundle2, we have to fetch bookmarks before changeset
1112 discovery to reduce the chance and impact of race conditions."""
1112 discovery to reduce the chance and impact of race conditions."""
1113 if pullop.remotebookmarks is not None:
1113 if pullop.remotebookmarks is not None:
1114 return
1114 return
1115 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1115 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1116 # all known bundle2 servers now support listkeys, but lets be nice with
1116 # all known bundle2 servers now support listkeys, but lets be nice with
1117 # new implementation.
1117 # new implementation.
1118 return
1118 return
1119 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1119 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1120
1120
1121
1121
1122 @pulldiscovery('changegroup')
1122 @pulldiscovery('changegroup')
1123 def _pulldiscoverychangegroup(pullop):
1123 def _pulldiscoverychangegroup(pullop):
1124 """discovery phase for the pull
1124 """discovery phase for the pull
1125
1125
1126 Current handle changeset discovery only, will change handle all discovery
1126 Current handle changeset discovery only, will change handle all discovery
1127 at some point."""
1127 at some point."""
1128 tmp = discovery.findcommonincoming(pullop.repo,
1128 tmp = discovery.findcommonincoming(pullop.repo,
1129 pullop.remote,
1129 pullop.remote,
1130 heads=pullop.heads,
1130 heads=pullop.heads,
1131 force=pullop.force)
1131 force=pullop.force)
1132 common, fetch, rheads = tmp
1132 common, fetch, rheads = tmp
1133 nm = pullop.repo.unfiltered().changelog.nodemap
1133 nm = pullop.repo.unfiltered().changelog.nodemap
1134 if fetch and rheads:
1134 if fetch and rheads:
1135 # If a remote heads in filtered locally, lets drop it from the unknown
1135 # If a remote heads in filtered locally, lets drop it from the unknown
1136 # remote heads and put in back in common.
1136 # remote heads and put in back in common.
1137 #
1137 #
1138 # This is a hackish solution to catch most of "common but locally
1138 # This is a hackish solution to catch most of "common but locally
1139 # hidden situation". We do not performs discovery on unfiltered
1139 # hidden situation". We do not performs discovery on unfiltered
1140 # repository because it end up doing a pathological amount of round
1140 # repository because it end up doing a pathological amount of round
1141 # trip for w huge amount of changeset we do not care about.
1141 # trip for w huge amount of changeset we do not care about.
1142 #
1142 #
1143 # If a set of such "common but filtered" changeset exist on the server
1143 # If a set of such "common but filtered" changeset exist on the server
1144 # but are not including a remote heads, we'll not be able to detect it,
1144 # but are not including a remote heads, we'll not be able to detect it,
1145 scommon = set(common)
1145 scommon = set(common)
1146 filteredrheads = []
1146 filteredrheads = []
1147 for n in rheads:
1147 for n in rheads:
1148 if n in nm:
1148 if n in nm:
1149 if n not in scommon:
1149 if n not in scommon:
1150 common.append(n)
1150 common.append(n)
1151 else:
1151 else:
1152 filteredrheads.append(n)
1152 filteredrheads.append(n)
1153 if not filteredrheads:
1153 if not filteredrheads:
1154 fetch = []
1154 fetch = []
1155 rheads = filteredrheads
1155 rheads = filteredrheads
1156 pullop.common = common
1156 pullop.common = common
1157 pullop.fetch = fetch
1157 pullop.fetch = fetch
1158 pullop.rheads = rheads
1158 pullop.rheads = rheads
1159
1159
1160 def _pullbundle2(pullop):
1160 def _pullbundle2(pullop):
1161 """pull data using bundle2
1161 """pull data using bundle2
1162
1162
1163 For now, the only supported data are changegroup."""
1163 For now, the only supported data are changegroup."""
1164 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1164 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1165
1165
1166 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1166 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1167
1167
1168 # pulling changegroup
1168 # pulling changegroup
1169 pullop.stepsdone.add('changegroup')
1169 pullop.stepsdone.add('changegroup')
1170
1170
1171 kwargs['common'] = pullop.common
1171 kwargs['common'] = pullop.common
1172 kwargs['heads'] = pullop.heads or pullop.rheads
1172 kwargs['heads'] = pullop.heads or pullop.rheads
1173 kwargs['cg'] = pullop.fetch
1173 kwargs['cg'] = pullop.fetch
1174 if 'listkeys' in pullop.remotebundle2caps:
1174 if 'listkeys' in pullop.remotebundle2caps:
1175 kwargs['listkeys'] = ['phase']
1175 kwargs['listkeys'] = ['phase']
1176 if pullop.remotebookmarks is None:
1176 if pullop.remotebookmarks is None:
1177 # make sure to always includes bookmark data when migrating
1177 # make sure to always includes bookmark data when migrating
1178 # `hg incoming --bundle` to using this function.
1178 # `hg incoming --bundle` to using this function.
1179 kwargs['listkeys'].append('bookmarks')
1179 kwargs['listkeys'].append('bookmarks')
1180 if streaming:
1180 if streaming:
1181 pullop.repo.ui.status(_('streaming all changes\n'))
1181 pullop.repo.ui.status(_('streaming all changes\n'))
1182 elif not pullop.fetch:
1182 elif not pullop.fetch:
1183 pullop.repo.ui.status(_("no changes found\n"))
1183 pullop.repo.ui.status(_("no changes found\n"))
1184 pullop.cgresult = 0
1184 pullop.cgresult = 0
1185 else:
1185 else:
1186 if pullop.heads is None and list(pullop.common) == [nullid]:
1186 if pullop.heads is None and list(pullop.common) == [nullid]:
1187 pullop.repo.ui.status(_("requesting all changes\n"))
1187 pullop.repo.ui.status(_("requesting all changes\n"))
1188 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1188 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1189 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1189 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1190 if obsolete.commonversion(remoteversions) is not None:
1190 if obsolete.commonversion(remoteversions) is not None:
1191 kwargs['obsmarkers'] = True
1191 kwargs['obsmarkers'] = True
1192 pullop.stepsdone.add('obsmarkers')
1192 pullop.stepsdone.add('obsmarkers')
1193 _pullbundle2extraprepare(pullop, kwargs)
1193 _pullbundle2extraprepare(pullop, kwargs)
1194 bundle = pullop.remote.getbundle('pull', **kwargs)
1194 bundle = pullop.remote.getbundle('pull', **kwargs)
1195 try:
1195 try:
1196 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1196 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1197 except error.BundleValueError as exc:
1197 except error.BundleValueError as exc:
1198 raise error.Abort('missing support for %s' % exc)
1198 raise error.Abort('missing support for %s' % exc)
1199
1199
1200 if pullop.fetch:
1200 if pullop.fetch:
1201 results = [cg['return'] for cg in op.records['changegroup']]
1201 results = [cg['return'] for cg in op.records['changegroup']]
1202 pullop.cgresult = changegroup.combineresults(results)
1202 pullop.cgresult = changegroup.combineresults(results)
1203
1203
1204 # processing phases change
1204 # processing phases change
1205 for namespace, value in op.records['listkeys']:
1205 for namespace, value in op.records['listkeys']:
1206 if namespace == 'phases':
1206 if namespace == 'phases':
1207 _pullapplyphases(pullop, value)
1207 _pullapplyphases(pullop, value)
1208
1208
1209 # processing bookmark update
1209 # processing bookmark update
1210 for namespace, value in op.records['listkeys']:
1210 for namespace, value in op.records['listkeys']:
1211 if namespace == 'bookmarks':
1211 if namespace == 'bookmarks':
1212 pullop.remotebookmarks = value
1212 pullop.remotebookmarks = value
1213
1213
1214 # bookmark data were either already there or pulled in the bundle
1214 # bookmark data were either already there or pulled in the bundle
1215 if pullop.remotebookmarks is not None:
1215 if pullop.remotebookmarks is not None:
1216 _pullbookmarks(pullop)
1216 _pullbookmarks(pullop)
1217
1217
1218 def _pullbundle2extraprepare(pullop, kwargs):
1218 def _pullbundle2extraprepare(pullop, kwargs):
1219 """hook function so that extensions can extend the getbundle call"""
1219 """hook function so that extensions can extend the getbundle call"""
1220 pass
1220 pass
1221
1221
1222 def _pullchangeset(pullop):
1222 def _pullchangeset(pullop):
1223 """pull changeset from unbundle into the local repo"""
1223 """pull changeset from unbundle into the local repo"""
1224 # We delay the open of the transaction as late as possible so we
1224 # We delay the open of the transaction as late as possible so we
1225 # don't open transaction for nothing or you break future useful
1225 # don't open transaction for nothing or you break future useful
1226 # rollback call
1226 # rollback call
1227 if 'changegroup' in pullop.stepsdone:
1227 if 'changegroup' in pullop.stepsdone:
1228 return
1228 return
1229 pullop.stepsdone.add('changegroup')
1229 pullop.stepsdone.add('changegroup')
1230 if not pullop.fetch:
1230 if not pullop.fetch:
1231 pullop.repo.ui.status(_("no changes found\n"))
1231 pullop.repo.ui.status(_("no changes found\n"))
1232 pullop.cgresult = 0
1232 pullop.cgresult = 0
1233 return
1233 return
1234 pullop.gettransaction()
1234 pullop.gettransaction()
1235 if pullop.heads is None and list(pullop.common) == [nullid]:
1235 if pullop.heads is None and list(pullop.common) == [nullid]:
1236 pullop.repo.ui.status(_("requesting all changes\n"))
1236 pullop.repo.ui.status(_("requesting all changes\n"))
1237 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1237 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1238 # issue1320, avoid a race if remote changed after discovery
1238 # issue1320, avoid a race if remote changed after discovery
1239 pullop.heads = pullop.rheads
1239 pullop.heads = pullop.rheads
1240
1240
1241 if pullop.remote.capable('getbundle'):
1241 if pullop.remote.capable('getbundle'):
1242 # TODO: get bundlecaps from remote
1242 # TODO: get bundlecaps from remote
1243 cg = pullop.remote.getbundle('pull', common=pullop.common,
1243 cg = pullop.remote.getbundle('pull', common=pullop.common,
1244 heads=pullop.heads or pullop.rheads)
1244 heads=pullop.heads or pullop.rheads)
1245 elif pullop.heads is None:
1245 elif pullop.heads is None:
1246 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1246 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1247 elif not pullop.remote.capable('changegroupsubset'):
1247 elif not pullop.remote.capable('changegroupsubset'):
1248 raise error.Abort(_("partial pull cannot be done because "
1248 raise error.Abort(_("partial pull cannot be done because "
1249 "other repository doesn't support "
1249 "other repository doesn't support "
1250 "changegroupsubset."))
1250 "changegroupsubset."))
1251 else:
1251 else:
1252 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1252 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1253 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1253 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1254 pullop.remote.url())
1254 pullop.remote.url())
1255
1255
1256 def _pullphase(pullop):
1256 def _pullphase(pullop):
1257 # Get remote phases data from remote
1257 # Get remote phases data from remote
1258 if 'phases' in pullop.stepsdone:
1258 if 'phases' in pullop.stepsdone:
1259 return
1259 return
1260 remotephases = pullop.remote.listkeys('phases')
1260 remotephases = pullop.remote.listkeys('phases')
1261 _pullapplyphases(pullop, remotephases)
1261 _pullapplyphases(pullop, remotephases)
1262
1262
1263 def _pullapplyphases(pullop, remotephases):
1263 def _pullapplyphases(pullop, remotephases):
1264 """apply phase movement from observed remote state"""
1264 """apply phase movement from observed remote state"""
1265 if 'phases' in pullop.stepsdone:
1265 if 'phases' in pullop.stepsdone:
1266 return
1266 return
1267 pullop.stepsdone.add('phases')
1267 pullop.stepsdone.add('phases')
1268 publishing = bool(remotephases.get('publishing', False))
1268 publishing = bool(remotephases.get('publishing', False))
1269 if remotephases and not publishing:
1269 if remotephases and not publishing:
1270 # remote is new and unpublishing
1270 # remote is new and unpublishing
1271 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1271 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1272 pullop.pulledsubset,
1272 pullop.pulledsubset,
1273 remotephases)
1273 remotephases)
1274 dheads = pullop.pulledsubset
1274 dheads = pullop.pulledsubset
1275 else:
1275 else:
1276 # Remote is old or publishing all common changesets
1276 # Remote is old or publishing all common changesets
1277 # should be seen as public
1277 # should be seen as public
1278 pheads = pullop.pulledsubset
1278 pheads = pullop.pulledsubset
1279 dheads = []
1279 dheads = []
1280 unfi = pullop.repo.unfiltered()
1280 unfi = pullop.repo.unfiltered()
1281 phase = unfi._phasecache.phase
1281 phase = unfi._phasecache.phase
1282 rev = unfi.changelog.nodemap.get
1282 rev = unfi.changelog.nodemap.get
1283 public = phases.public
1283 public = phases.public
1284 draft = phases.draft
1284 draft = phases.draft
1285
1285
1286 # exclude changesets already public locally and update the others
1286 # exclude changesets already public locally and update the others
1287 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1287 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1288 if pheads:
1288 if pheads:
1289 tr = pullop.gettransaction()
1289 tr = pullop.gettransaction()
1290 phases.advanceboundary(pullop.repo, tr, public, pheads)
1290 phases.advanceboundary(pullop.repo, tr, public, pheads)
1291
1291
1292 # exclude changesets already draft locally and update the others
1292 # exclude changesets already draft locally and update the others
1293 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1293 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1294 if dheads:
1294 if dheads:
1295 tr = pullop.gettransaction()
1295 tr = pullop.gettransaction()
1296 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1296 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1297
1297
1298 def _pullbookmarks(pullop):
1298 def _pullbookmarks(pullop):
1299 """process the remote bookmark information to update the local one"""
1299 """process the remote bookmark information to update the local one"""
1300 if 'bookmarks' in pullop.stepsdone:
1300 if 'bookmarks' in pullop.stepsdone:
1301 return
1301 return
1302 pullop.stepsdone.add('bookmarks')
1302 pullop.stepsdone.add('bookmarks')
1303 repo = pullop.repo
1303 repo = pullop.repo
1304 remotebookmarks = pullop.remotebookmarks
1304 remotebookmarks = pullop.remotebookmarks
1305 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1305 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1306 pullop.remote.url(),
1306 pullop.remote.url(),
1307 pullop.gettransaction,
1307 pullop.gettransaction,
1308 explicit=pullop.explicitbookmarks)
1308 explicit=pullop.explicitbookmarks)
1309
1309
1310 def _pullobsolete(pullop):
1310 def _pullobsolete(pullop):
1311 """utility function to pull obsolete markers from a remote
1311 """utility function to pull obsolete markers from a remote
1312
1312
1313 The `gettransaction` is function that return the pull transaction, creating
1313 The `gettransaction` is function that return the pull transaction, creating
1314 one if necessary. We return the transaction to inform the calling code that
1314 one if necessary. We return the transaction to inform the calling code that
1315 a new transaction have been created (when applicable).
1315 a new transaction have been created (when applicable).
1316
1316
1317 Exists mostly to allow overriding for experimentation purpose"""
1317 Exists mostly to allow overriding for experimentation purpose"""
1318 if 'obsmarkers' in pullop.stepsdone:
1318 if 'obsmarkers' in pullop.stepsdone:
1319 return
1319 return
1320 pullop.stepsdone.add('obsmarkers')
1320 pullop.stepsdone.add('obsmarkers')
1321 tr = None
1321 tr = None
1322 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1322 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1323 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1323 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1324 remoteobs = pullop.remote.listkeys('obsolete')
1324 remoteobs = pullop.remote.listkeys('obsolete')
1325 if 'dump0' in remoteobs:
1325 if 'dump0' in remoteobs:
1326 tr = pullop.gettransaction()
1326 tr = pullop.gettransaction()
1327 for key in sorted(remoteobs, reverse=True):
1327 for key in sorted(remoteobs, reverse=True):
1328 if key.startswith('dump'):
1328 if key.startswith('dump'):
1329 data = base85.b85decode(remoteobs[key])
1329 data = base85.b85decode(remoteobs[key])
1330 pullop.repo.obsstore.mergemarkers(tr, data)
1330 pullop.repo.obsstore.mergemarkers(tr, data)
1331 pullop.repo.invalidatevolatilesets()
1331 pullop.repo.invalidatevolatilesets()
1332 return tr
1332 return tr
1333
1333
1334 def caps20to10(repo):
1334 def caps20to10(repo):
1335 """return a set with appropriate options to use bundle20 during getbundle"""
1335 """return a set with appropriate options to use bundle20 during getbundle"""
1336 caps = set(['HG20'])
1336 caps = set(['HG20'])
1337 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1337 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1338 caps.add('bundle2=' + urllib.quote(capsblob))
1338 caps.add('bundle2=' + urllib.quote(capsblob))
1339 return caps
1339 return caps
1340
1340
1341 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1341 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1342 getbundle2partsorder = []
1342 getbundle2partsorder = []
1343
1343
1344 # Mapping between step name and function
1344 # Mapping between step name and function
1345 #
1345 #
1346 # This exists to help extensions wrap steps if necessary
1346 # This exists to help extensions wrap steps if necessary
1347 getbundle2partsmapping = {}
1347 getbundle2partsmapping = {}
1348
1348
1349 def getbundle2partsgenerator(stepname, idx=None):
1349 def getbundle2partsgenerator(stepname, idx=None):
1350 """decorator for function generating bundle2 part for getbundle
1350 """decorator for function generating bundle2 part for getbundle
1351
1351
1352 The function is added to the step -> function mapping and appended to the
1352 The function is added to the step -> function mapping and appended to the
1353 list of steps. Beware that decorated functions will be added in order
1353 list of steps. Beware that decorated functions will be added in order
1354 (this may matter).
1354 (this may matter).
1355
1355
1356 You can only use this decorator for new steps, if you want to wrap a step
1356 You can only use this decorator for new steps, if you want to wrap a step
1357 from an extension, attack the getbundle2partsmapping dictionary directly."""
1357 from an extension, attack the getbundle2partsmapping dictionary directly."""
1358 def dec(func):
1358 def dec(func):
1359 assert stepname not in getbundle2partsmapping
1359 assert stepname not in getbundle2partsmapping
1360 getbundle2partsmapping[stepname] = func
1360 getbundle2partsmapping[stepname] = func
1361 if idx is None:
1361 if idx is None:
1362 getbundle2partsorder.append(stepname)
1362 getbundle2partsorder.append(stepname)
1363 else:
1363 else:
1364 getbundle2partsorder.insert(idx, stepname)
1364 getbundle2partsorder.insert(idx, stepname)
1365 return func
1365 return func
1366 return dec
1366 return dec
1367
1367
1368 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1368 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1369 **kwargs):
1369 **kwargs):
1370 """return a full bundle (with potentially multiple kind of parts)
1370 """return a full bundle (with potentially multiple kind of parts)
1371
1371
1372 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1372 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1373 passed. For now, the bundle can contain only changegroup, but this will
1373 passed. For now, the bundle can contain only changegroup, but this will
1374 changes when more part type will be available for bundle2.
1374 changes when more part type will be available for bundle2.
1375
1375
1376 This is different from changegroup.getchangegroup that only returns an HG10
1376 This is different from changegroup.getchangegroup that only returns an HG10
1377 changegroup bundle. They may eventually get reunited in the future when we
1377 changegroup bundle. They may eventually get reunited in the future when we
1378 have a clearer idea of the API we what to query different data.
1378 have a clearer idea of the API we what to query different data.
1379
1379
1380 The implementation is at a very early stage and will get massive rework
1380 The implementation is at a very early stage and will get massive rework
1381 when the API of bundle is refined.
1381 when the API of bundle is refined.
1382 """
1382 """
1383 # bundle10 case
1383 # bundle10 case
1384 usebundle2 = False
1384 usebundle2 = False
1385 if bundlecaps is not None:
1385 if bundlecaps is not None:
1386 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1386 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1387 if not usebundle2:
1387 if not usebundle2:
1388 if bundlecaps and not kwargs.get('cg', True):
1388 if bundlecaps and not kwargs.get('cg', True):
1389 raise ValueError(_('request for bundle10 must include changegroup'))
1389 raise ValueError(_('request for bundle10 must include changegroup'))
1390
1390
1391 if kwargs:
1391 if kwargs:
1392 raise ValueError(_('unsupported getbundle arguments: %s')
1392 raise ValueError(_('unsupported getbundle arguments: %s')
1393 % ', '.join(sorted(kwargs.keys())))
1393 % ', '.join(sorted(kwargs.keys())))
1394 return changegroup.getchangegroup(repo, source, heads=heads,
1394 return changegroup.getchangegroup(repo, source, heads=heads,
1395 common=common, bundlecaps=bundlecaps)
1395 common=common, bundlecaps=bundlecaps)
1396
1396
1397 # bundle20 case
1397 # bundle20 case
1398 b2caps = {}
1398 b2caps = {}
1399 for bcaps in bundlecaps:
1399 for bcaps in bundlecaps:
1400 if bcaps.startswith('bundle2='):
1400 if bcaps.startswith('bundle2='):
1401 blob = urllib.unquote(bcaps[len('bundle2='):])
1401 blob = urllib.unquote(bcaps[len('bundle2='):])
1402 b2caps.update(bundle2.decodecaps(blob))
1402 b2caps.update(bundle2.decodecaps(blob))
1403 bundler = bundle2.bundle20(repo.ui, b2caps)
1403 bundler = bundle2.bundle20(repo.ui, b2caps)
1404
1404
1405 kwargs['heads'] = heads
1405 kwargs['heads'] = heads
1406 kwargs['common'] = common
1406 kwargs['common'] = common
1407
1407
1408 for name in getbundle2partsorder:
1408 for name in getbundle2partsorder:
1409 func = getbundle2partsmapping[name]
1409 func = getbundle2partsmapping[name]
1410 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1410 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1411 **kwargs)
1411 **kwargs)
1412
1412
1413 return util.chunkbuffer(bundler.getchunks())
1413 return util.chunkbuffer(bundler.getchunks())
1414
1414
1415 @getbundle2partsgenerator('changegroup')
1415 @getbundle2partsgenerator('changegroup')
1416 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1416 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1417 b2caps=None, heads=None, common=None, **kwargs):
1417 b2caps=None, heads=None, common=None, **kwargs):
1418 """add a changegroup part to the requested bundle"""
1418 """add a changegroup part to the requested bundle"""
1419 cg = None
1419 cg = None
1420 if kwargs.get('cg', True):
1420 if kwargs.get('cg', True):
1421 # build changegroup bundle here.
1421 # build changegroup bundle here.
1422 version = None
1422 version = None
1423 cgversions = b2caps.get('changegroup')
1423 cgversions = b2caps.get('changegroup')
1424 getcgkwargs = {}
1424 getcgkwargs = {}
1425 if cgversions: # 3.1 and 3.2 ship with an empty value
1425 if cgversions: # 3.1 and 3.2 ship with an empty value
1426 cgversions = [v for v in cgversions if v in changegroup.packermap]
1426 cgversions = [v for v in cgversions if v in changegroup.packermap]
1427 if not cgversions:
1427 if not cgversions:
1428 raise ValueError(_('no common changegroup version'))
1428 raise ValueError(_('no common changegroup version'))
1429 version = getcgkwargs['version'] = max(cgversions)
1429 version = getcgkwargs['version'] = max(cgversions)
1430 outgoing = changegroup.computeoutgoing(repo, heads, common)
1430 outgoing = changegroup.computeoutgoing(repo, heads, common)
1431 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1431 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1432 bundlecaps=bundlecaps,
1432 bundlecaps=bundlecaps,
1433 **getcgkwargs)
1433 **getcgkwargs)
1434
1434
1435 if cg:
1435 if cg:
1436 part = bundler.newpart('changegroup', data=cg)
1436 part = bundler.newpart('changegroup', data=cg)
1437 if version is not None:
1437 if version is not None:
1438 part.addparam('version', version)
1438 part.addparam('version', version)
1439 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1439 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1440
1440
1441 @getbundle2partsgenerator('listkeys')
1441 @getbundle2partsgenerator('listkeys')
1442 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1442 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1443 b2caps=None, **kwargs):
1443 b2caps=None, **kwargs):
1444 """add parts containing listkeys namespaces to the requested bundle"""
1444 """add parts containing listkeys namespaces to the requested bundle"""
1445 listkeys = kwargs.get('listkeys', ())
1445 listkeys = kwargs.get('listkeys', ())
1446 for namespace in listkeys:
1446 for namespace in listkeys:
1447 part = bundler.newpart('listkeys')
1447 part = bundler.newpart('listkeys')
1448 part.addparam('namespace', namespace)
1448 part.addparam('namespace', namespace)
1449 keys = repo.listkeys(namespace).items()
1449 keys = repo.listkeys(namespace).items()
1450 part.data = pushkey.encodekeys(keys)
1450 part.data = pushkey.encodekeys(keys)
1451
1451
1452 @getbundle2partsgenerator('obsmarkers')
1452 @getbundle2partsgenerator('obsmarkers')
1453 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1453 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1454 b2caps=None, heads=None, **kwargs):
1454 b2caps=None, heads=None, **kwargs):
1455 """add an obsolescence markers part to the requested bundle"""
1455 """add an obsolescence markers part to the requested bundle"""
1456 if kwargs.get('obsmarkers', False):
1456 if kwargs.get('obsmarkers', False):
1457 if heads is None:
1457 if heads is None:
1458 heads = repo.heads()
1458 heads = repo.heads()
1459 subset = [c.node() for c in repo.set('::%ln', heads)]
1459 subset = [c.node() for c in repo.set('::%ln', heads)]
1460 markers = repo.obsstore.relevantmarkers(subset)
1460 markers = repo.obsstore.relevantmarkers(subset)
1461 markers = sorted(markers)
1461 markers = sorted(markers)
1462 buildobsmarkerspart(bundler, markers)
1462 buildobsmarkerspart(bundler, markers)
1463
1463
1464 @getbundle2partsgenerator('hgtagsfnodes')
1464 @getbundle2partsgenerator('hgtagsfnodes')
1465 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1465 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1466 b2caps=None, heads=None, common=None,
1466 b2caps=None, heads=None, common=None,
1467 **kwargs):
1467 **kwargs):
1468 """Transfer the .hgtags filenodes mapping.
1468 """Transfer the .hgtags filenodes mapping.
1469
1469
1470 Only values for heads in this bundle will be transferred.
1470 Only values for heads in this bundle will be transferred.
1471
1471
1472 The part data consists of pairs of 20 byte changeset node and .hgtags
1472 The part data consists of pairs of 20 byte changeset node and .hgtags
1473 filenodes raw values.
1473 filenodes raw values.
1474 """
1474 """
1475 # Don't send unless:
1475 # Don't send unless:
1476 # - changeset are being exchanged,
1476 # - changeset are being exchanged,
1477 # - the client supports it.
1477 # - the client supports it.
1478 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1478 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1479 return
1479 return
1480
1480
1481 outgoing = changegroup.computeoutgoing(repo, heads, common)
1481 outgoing = changegroup.computeoutgoing(repo, heads, common)
1482
1482
1483 if not outgoing.missingheads:
1483 if not outgoing.missingheads:
1484 return
1484 return
1485
1485
1486 cache = tags.hgtagsfnodescache(repo.unfiltered())
1486 cache = tags.hgtagsfnodescache(repo.unfiltered())
1487 chunks = []
1487 chunks = []
1488
1488
1489 # .hgtags fnodes are only relevant for head changesets. While we could
1489 # .hgtags fnodes are only relevant for head changesets. While we could
1490 # transfer values for all known nodes, there will likely be little to
1490 # transfer values for all known nodes, there will likely be little to
1491 # no benefit.
1491 # no benefit.
1492 #
1492 #
1493 # We don't bother using a generator to produce output data because
1493 # We don't bother using a generator to produce output data because
1494 # a) we only have 40 bytes per head and even esoteric numbers of heads
1494 # a) we only have 40 bytes per head and even esoteric numbers of heads
1495 # consume little memory (1M heads is 40MB) b) we don't want to send the
1495 # consume little memory (1M heads is 40MB) b) we don't want to send the
1496 # part if we don't have entries and knowing if we have entries requires
1496 # part if we don't have entries and knowing if we have entries requires
1497 # cache lookups.
1497 # cache lookups.
1498 for node in outgoing.missingheads:
1498 for node in outgoing.missingheads:
1499 # Don't compute missing, as this may slow down serving.
1499 # Don't compute missing, as this may slow down serving.
1500 fnode = cache.getfnode(node, computemissing=False)
1500 fnode = cache.getfnode(node, computemissing=False)
1501 if fnode is not None:
1501 if fnode is not None:
1502 chunks.extend([node, fnode])
1502 chunks.extend([node, fnode])
1503
1503
1504 if chunks:
1504 if chunks:
1505 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1505 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1506
1506
1507 def check_heads(repo, their_heads, context):
1507 def check_heads(repo, their_heads, context):
1508 """check if the heads of a repo have been modified
1508 """check if the heads of a repo have been modified
1509
1509
1510 Used by peer for unbundling.
1510 Used by peer for unbundling.
1511 """
1511 """
1512 heads = repo.heads()
1512 heads = repo.heads()
1513 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1513 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1514 if not (their_heads == ['force'] or their_heads == heads or
1514 if not (their_heads == ['force'] or their_heads == heads or
1515 their_heads == ['hashed', heads_hash]):
1515 their_heads == ['hashed', heads_hash]):
1516 # someone else committed/pushed/unbundled while we
1516 # someone else committed/pushed/unbundled while we
1517 # were transferring data
1517 # were transferring data
1518 raise error.PushRaced('repository changed while %s - '
1518 raise error.PushRaced('repository changed while %s - '
1519 'please try again' % context)
1519 'please try again' % context)
1520
1520
1521 def unbundle(repo, cg, heads, source, url):
1521 def unbundle(repo, cg, heads, source, url):
1522 """Apply a bundle to a repo.
1522 """Apply a bundle to a repo.
1523
1523
1524 this function makes sure the repo is locked during the application and have
1524 this function makes sure the repo is locked during the application and have
1525 mechanism to check that no push race occurred between the creation of the
1525 mechanism to check that no push race occurred between the creation of the
1526 bundle and its application.
1526 bundle and its application.
1527
1527
1528 If the push was raced as PushRaced exception is raised."""
1528 If the push was raced as PushRaced exception is raised."""
1529 r = 0
1529 r = 0
1530 # need a transaction when processing a bundle2 stream
1530 # need a transaction when processing a bundle2 stream
1531 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1531 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1532 lockandtr = [None, None, None]
1532 lockandtr = [None, None, None]
1533 recordout = None
1533 recordout = None
1534 # quick fix for output mismatch with bundle2 in 3.4
1534 # quick fix for output mismatch with bundle2 in 3.4
1535 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1535 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1536 False)
1536 False)
1537 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1537 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1538 captureoutput = True
1538 captureoutput = True
1539 try:
1539 try:
1540 check_heads(repo, heads, 'uploading changes')
1540 check_heads(repo, heads, 'uploading changes')
1541 # push can proceed
1541 # push can proceed
1542 if util.safehasattr(cg, 'params'):
1542 if util.safehasattr(cg, 'params'):
1543 r = None
1543 r = None
1544 try:
1544 try:
1545 def gettransaction():
1545 def gettransaction():
1546 if not lockandtr[2]:
1546 if not lockandtr[2]:
1547 lockandtr[0] = repo.wlock()
1547 lockandtr[0] = repo.wlock()
1548 lockandtr[1] = repo.lock()
1548 lockandtr[1] = repo.lock()
1549 lockandtr[2] = repo.transaction(source)
1549 lockandtr[2] = repo.transaction(source)
1550 lockandtr[2].hookargs['source'] = source
1550 lockandtr[2].hookargs['source'] = source
1551 lockandtr[2].hookargs['url'] = url
1551 lockandtr[2].hookargs['url'] = url
1552 lockandtr[2].hookargs['bundle2'] = '1'
1552 lockandtr[2].hookargs['bundle2'] = '1'
1553 return lockandtr[2]
1553 return lockandtr[2]
1554
1554
1555 # Do greedy locking by default until we're satisfied with lazy
1555 # Do greedy locking by default until we're satisfied with lazy
1556 # locking.
1556 # locking.
1557 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1557 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1558 gettransaction()
1558 gettransaction()
1559
1559
1560 op = bundle2.bundleoperation(repo, gettransaction,
1560 op = bundle2.bundleoperation(repo, gettransaction,
1561 captureoutput=captureoutput)
1561 captureoutput=captureoutput)
1562 try:
1562 try:
1563 op = bundle2.processbundle(repo, cg, op=op)
1563 op = bundle2.processbundle(repo, cg, op=op)
1564 finally:
1564 finally:
1565 r = op.reply
1565 r = op.reply
1566 if captureoutput and r is not None:
1566 if captureoutput and r is not None:
1567 repo.ui.pushbuffer(error=True, subproc=True)
1567 repo.ui.pushbuffer(error=True, subproc=True)
1568 def recordout(output):
1568 def recordout(output):
1569 r.newpart('output', data=output, mandatory=False)
1569 r.newpart('output', data=output, mandatory=False)
1570 if lockandtr[2] is not None:
1570 if lockandtr[2] is not None:
1571 lockandtr[2].close()
1571 lockandtr[2].close()
1572 except BaseException as exc:
1572 except BaseException as exc:
1573 exc.duringunbundle2 = True
1573 exc.duringunbundle2 = True
1574 if captureoutput and r is not None:
1574 if captureoutput and r is not None:
1575 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1575 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1576 def recordout(output):
1576 def recordout(output):
1577 part = bundle2.bundlepart('output', data=output,
1577 part = bundle2.bundlepart('output', data=output,
1578 mandatory=False)
1578 mandatory=False)
1579 parts.append(part)
1579 parts.append(part)
1580 raise
1580 raise
1581 else:
1581 else:
1582 lockandtr[1] = repo.lock()
1582 lockandtr[1] = repo.lock()
1583 r = changegroup.addchangegroup(repo, cg, source, url)
1583 r = changegroup.addchangegroup(repo, cg, source, url)
1584 finally:
1584 finally:
1585 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1585 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1586 if recordout is not None:
1586 if recordout is not None:
1587 recordout(repo.ui.popbuffer())
1587 recordout(repo.ui.popbuffer())
1588 return r
1588 return r
1589
1589
1590 def _maybeapplyclonebundle(pullop):
1590 def _maybeapplyclonebundle(pullop):
1591 """Apply a clone bundle from a remote, if possible."""
1591 """Apply a clone bundle from a remote, if possible."""
1592
1592
1593 repo = pullop.repo
1593 repo = pullop.repo
1594 remote = pullop.remote
1594 remote = pullop.remote
1595
1595
1596 if not repo.ui.configbool('experimental', 'clonebundles', False):
1596 if not repo.ui.configbool('experimental', 'clonebundles', False):
1597 return
1597 return
1598
1598
1599 if pullop.heads:
1599 if pullop.heads:
1600 return
1600 return
1601
1601
1602 if not remote.capable('clonebundles'):
1602 if not remote.capable('clonebundles'):
1603 return
1603 return
1604
1604
1605 res = remote._call('clonebundles')
1605 res = remote._call('clonebundles')
1606 entries = parseclonebundlesmanifest(repo, res)
1606 entries = parseclonebundlesmanifest(repo, res)
1607 if not entries:
1607 if not entries:
1608 repo.ui.note(_('no clone bundles available on remote; '
1608 repo.ui.note(_('no clone bundles available on remote; '
1609 'falling back to regular clone\n'))
1609 'falling back to regular clone\n'))
1610 return
1610 return
1611
1611
1612 entries = filterclonebundleentries(repo, entries)
1612 entries = filterclonebundleentries(repo, entries)
1613 if not entries:
1613 if not entries:
1614 # There is a thundering herd concern here. However, if a server
1614 # There is a thundering herd concern here. However, if a server
1615 # operator doesn't advertise bundles appropriate for its clients,
1615 # operator doesn't advertise bundles appropriate for its clients,
1616 # they deserve what's coming. Furthermore, from a client's
1616 # they deserve what's coming. Furthermore, from a client's
1617 # perspective, no automatic fallback would mean not being able to
1617 # perspective, no automatic fallback would mean not being able to
1618 # clone!
1618 # clone!
1619 repo.ui.warn(_('no compatible clone bundles available on server; '
1619 repo.ui.warn(_('no compatible clone bundles available on server; '
1620 'falling back to regular clone\n'))
1620 'falling back to regular clone\n'))
1621 repo.ui.warn(_('(you may want to report this to the server '
1621 repo.ui.warn(_('(you may want to report this to the server '
1622 'operator)\n'))
1622 'operator)\n'))
1623 return
1623 return
1624
1624
1625 entries = sortclonebundleentries(repo.ui, entries)
1625 entries = sortclonebundleentries(repo.ui, entries)
1626
1626
1627 url = entries[0]['URL']
1627 url = entries[0]['URL']
1628 repo.ui.status(_('applying clone bundle from %s\n') % url)
1628 repo.ui.status(_('applying clone bundle from %s\n') % url)
1629 if trypullbundlefromurl(repo.ui, repo, url):
1629 if trypullbundlefromurl(repo.ui, repo, url):
1630 repo.ui.status(_('finished applying clone bundle\n'))
1630 repo.ui.status(_('finished applying clone bundle\n'))
1631 # Bundle failed.
1631 # Bundle failed.
1632 #
1632 #
1633 # We abort by default to avoid the thundering herd of
1633 # We abort by default to avoid the thundering herd of
1634 # clients flooding a server that was expecting expensive
1634 # clients flooding a server that was expecting expensive
1635 # clone load to be offloaded.
1635 # clone load to be offloaded.
1636 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1636 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1637 repo.ui.warn(_('falling back to normal clone\n'))
1637 repo.ui.warn(_('falling back to normal clone\n'))
1638 else:
1638 else:
1639 raise error.Abort(_('error applying bundle'),
1639 raise error.Abort(_('error applying bundle'),
1640 hint=_('consider contacting the server '
1640 hint=_('consider contacting the server '
1641 'operator if this error persists'))
1641 'operator if this error persists'))
1642
1642
1643 def parseclonebundlesmanifest(repo, s):
1643 def parseclonebundlesmanifest(repo, s):
1644 """Parses the raw text of a clone bundles manifest.
1644 """Parses the raw text of a clone bundles manifest.
1645
1645
1646 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1646 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1647 to the URL and other keys are the attributes for the entry.
1647 to the URL and other keys are the attributes for the entry.
1648 """
1648 """
1649 m = []
1649 m = []
1650 for line in s.splitlines():
1650 for line in s.splitlines():
1651 fields = line.split()
1651 fields = line.split()
1652 if not fields:
1652 if not fields:
1653 continue
1653 continue
1654 attrs = {'URL': fields[0]}
1654 attrs = {'URL': fields[0]}
1655 for rawattr in fields[1:]:
1655 for rawattr in fields[1:]:
1656 key, value = rawattr.split('=', 1)
1656 key, value = rawattr.split('=', 1)
1657 key = urllib.unquote(key)
1657 key = urllib.unquote(key)
1658 value = urllib.unquote(value)
1658 value = urllib.unquote(value)
1659 attrs[key] = value
1659 attrs[key] = value
1660
1660
1661 # Parse BUNDLESPEC into components. This makes client-side
1661 # Parse BUNDLESPEC into components. This makes client-side
1662 # preferences easier to specify since you can prefer a single
1662 # preferences easier to specify since you can prefer a single
1663 # component of the BUNDLESPEC.
1663 # component of the BUNDLESPEC.
1664 if key == 'BUNDLESPEC':
1664 if key == 'BUNDLESPEC':
1665 try:
1665 try:
1666 comp, version = parsebundlespec(repo, value,
1666 comp, version = parsebundlespec(repo, value,
1667 externalnames=True)
1667 externalnames=True)
1668 attrs['COMPRESSION'] = comp
1668 attrs['COMPRESSION'] = comp
1669 attrs['VERSION'] = version
1669 attrs['VERSION'] = version
1670 except error.InvalidBundleSpecification:
1670 except error.InvalidBundleSpecification:
1671 pass
1671 pass
1672 except error.UnsupportedBundleSpecification:
1672 except error.UnsupportedBundleSpecification:
1673 pass
1673 pass
1674
1674
1675 m.append(attrs)
1675 m.append(attrs)
1676
1676
1677 return m
1677 return m
1678
1678
1679 def filterclonebundleentries(repo, entries):
1679 def filterclonebundleentries(repo, entries):
1680 newentries = []
1680 newentries = []
1681 for entry in entries:
1681 for entry in entries:
1682 spec = entry.get('BUNDLESPEC')
1682 spec = entry.get('BUNDLESPEC')
1683 if spec:
1683 if spec:
1684 try:
1684 try:
1685 parsebundlespec(repo, spec, strict=True)
1685 parsebundlespec(repo, spec, strict=True)
1686 except error.InvalidBundleSpecification as e:
1686 except error.InvalidBundleSpecification as e:
1687 repo.ui.debug(str(e) + '\n')
1687 repo.ui.debug(str(e) + '\n')
1688 continue
1688 continue
1689 except error.UnsupportedBundleSpecification as e:
1689 except error.UnsupportedBundleSpecification as e:
1690 repo.ui.debug('filtering %s because unsupported bundle '
1690 repo.ui.debug('filtering %s because unsupported bundle '
1691 'spec: %s\n' % (entry['URL'], str(e)))
1691 'spec: %s\n' % (entry['URL'], str(e)))
1692 continue
1692 continue
1693
1693
1694 if 'REQUIRESNI' in entry and not sslutil.hassni:
1694 if 'REQUIRESNI' in entry and not sslutil.hassni:
1695 repo.ui.debug('filtering %s because SNI not supported\n' %
1695 repo.ui.debug('filtering %s because SNI not supported\n' %
1696 entry['URL'])
1696 entry['URL'])
1697 continue
1697 continue
1698
1698
1699 newentries.append(entry)
1699 newentries.append(entry)
1700
1700
1701 return newentries
1701 return newentries
1702
1702
1703 def sortclonebundleentries(ui, entries):
1703 def sortclonebundleentries(ui, entries):
1704 # experimental config: experimental.clonebundleprefers
1704 # experimental config: experimental.clonebundleprefers
1705 prefers = ui.configlist('experimental', 'clonebundleprefers', default=[])
1705 prefers = ui.configlist('experimental', 'clonebundleprefers', default=[])
1706 if not prefers:
1706 if not prefers:
1707 return list(entries)
1707 return list(entries)
1708
1708
1709 prefers = [p.split('=', 1) for p in prefers]
1709 prefers = [p.split('=', 1) for p in prefers]
1710
1710
1711 # Our sort function.
1711 # Our sort function.
1712 def compareentry(a, b):
1712 def compareentry(a, b):
1713 for prefkey, prefvalue in prefers:
1713 for prefkey, prefvalue in prefers:
1714 avalue = a.get(prefkey)
1714 avalue = a.get(prefkey)
1715 bvalue = b.get(prefkey)
1715 bvalue = b.get(prefkey)
1716
1716
1717 # Special case for b missing attribute and a matches exactly.
1717 # Special case for b missing attribute and a matches exactly.
1718 if avalue is not None and bvalue is None and avalue == prefvalue:
1718 if avalue is not None and bvalue is None and avalue == prefvalue:
1719 return -1
1719 return -1
1720
1720
1721 # Special case for a missing attribute and b matches exactly.
1721 # Special case for a missing attribute and b matches exactly.
1722 if bvalue is not None and avalue is None and bvalue == prefvalue:
1722 if bvalue is not None and avalue is None and bvalue == prefvalue:
1723 return 1
1723 return 1
1724
1724
1725 # We can't compare unless attribute present on both.
1725 # We can't compare unless attribute present on both.
1726 if avalue is None or bvalue is None:
1726 if avalue is None or bvalue is None:
1727 continue
1727 continue
1728
1728
1729 # Same values should fall back to next attribute.
1729 # Same values should fall back to next attribute.
1730 if avalue == bvalue:
1730 if avalue == bvalue:
1731 continue
1731 continue
1732
1732
1733 # Exact matches come first.
1733 # Exact matches come first.
1734 if avalue == prefvalue:
1734 if avalue == prefvalue:
1735 return -1
1735 return -1
1736 if bvalue == prefvalue:
1736 if bvalue == prefvalue:
1737 return 1
1737 return 1
1738
1738
1739 # Fall back to next attribute.
1739 # Fall back to next attribute.
1740 continue
1740 continue
1741
1741
1742 # If we got here we couldn't sort by attributes and prefers. Fall
1742 # If we got here we couldn't sort by attributes and prefers. Fall
1743 # back to index order.
1743 # back to index order.
1744 return 0
1744 return 0
1745
1745
1746 return sorted(entries, cmp=compareentry)
1746 return sorted(entries, cmp=compareentry)
1747
1747
1748 def trypullbundlefromurl(ui, repo, url):
1748 def trypullbundlefromurl(ui, repo, url):
1749 """Attempt to apply a bundle from a URL."""
1749 """Attempt to apply a bundle from a URL."""
1750 lock = repo.lock()
1750 lock = repo.lock()
1751 try:
1751 try:
1752 tr = repo.transaction('bundleurl')
1752 tr = repo.transaction('bundleurl')
1753 try:
1753 try:
1754 try:
1754 try:
1755 fh = urlmod.open(ui, url)
1755 fh = urlmod.open(ui, url)
1756 cg = readbundle(ui, fh, 'stream')
1756 cg = readbundle(ui, fh, 'stream')
1757
1757
1758 if isinstance(cg, bundle2.unbundle20):
1758 if isinstance(cg, bundle2.unbundle20):
1759 bundle2.processbundle(repo, cg, lambda: tr)
1759 bundle2.processbundle(repo, cg, lambda: tr)
1760 else:
1760 else:
1761 changegroup.addchangegroup(repo, cg, 'clonebundles', url)
1761 changegroup.addchangegroup(repo, cg, 'clonebundles', url)
1762 tr.close()
1762 tr.close()
1763 return True
1763 return True
1764 except urllib2.HTTPError as e:
1764 except urllib2.HTTPError as e:
1765 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1765 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1766 except urllib2.URLError as e:
1766 except urllib2.URLError as e:
1767 ui.warn(_('error fetching bundle: %s\n') % e.reason)
1767 ui.warn(_('error fetching bundle: %s\n') % e.reason)
1768
1768
1769 return False
1769 return False
1770 finally:
1770 finally:
1771 tr.release()
1771 tr.release()
1772 finally:
1772 finally:
1773 lock.release()
1773 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now