##// END OF EJS Templates
exchange: allow fallbackheads to use lazy set behavior...
Durham Goode -
r26184:327d09f0 default
parent child Browse files
Show More
@@ -1,1574 +1,1574 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import time
8 import time
9 from i18n import _
9 from i18n import _
10 from node import hex, nullid
10 from node import hex, nullid
11 import errno, urllib
11 import errno, urllib
12 import util, scmutil, changegroup, base85, error, store
12 import util, scmutil, changegroup, base85, error, store
13 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
14 import lock as lockmod
14 import lock as lockmod
15 import tags
15 import tags
16
16
17 def readbundle(ui, fh, fname, vfs=None):
17 def readbundle(ui, fh, fname, vfs=None):
18 header = changegroup.readexactly(fh, 4)
18 header = changegroup.readexactly(fh, 4)
19
19
20 alg = None
20 alg = None
21 if not fname:
21 if not fname:
22 fname = "stream"
22 fname = "stream"
23 if not header.startswith('HG') and header.startswith('\0'):
23 if not header.startswith('HG') and header.startswith('\0'):
24 fh = changegroup.headerlessfixup(fh, header)
24 fh = changegroup.headerlessfixup(fh, header)
25 header = "HG10"
25 header = "HG10"
26 alg = 'UN'
26 alg = 'UN'
27 elif vfs:
27 elif vfs:
28 fname = vfs.join(fname)
28 fname = vfs.join(fname)
29
29
30 magic, version = header[0:2], header[2:4]
30 magic, version = header[0:2], header[2:4]
31
31
32 if magic != 'HG':
32 if magic != 'HG':
33 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
33 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
34 if version == '10':
34 if version == '10':
35 if alg is None:
35 if alg is None:
36 alg = changegroup.readexactly(fh, 2)
36 alg = changegroup.readexactly(fh, 2)
37 return changegroup.cg1unpacker(fh, alg)
37 return changegroup.cg1unpacker(fh, alg)
38 elif version.startswith('2'):
38 elif version.startswith('2'):
39 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
39 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
40 else:
40 else:
41 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
41 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
42
42
43 def buildobsmarkerspart(bundler, markers):
43 def buildobsmarkerspart(bundler, markers):
44 """add an obsmarker part to the bundler with <markers>
44 """add an obsmarker part to the bundler with <markers>
45
45
46 No part is created if markers is empty.
46 No part is created if markers is empty.
47 Raises ValueError if the bundler doesn't support any known obsmarker format.
47 Raises ValueError if the bundler doesn't support any known obsmarker format.
48 """
48 """
49 if markers:
49 if markers:
50 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
50 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
51 version = obsolete.commonversion(remoteversions)
51 version = obsolete.commonversion(remoteversions)
52 if version is None:
52 if version is None:
53 raise ValueError('bundler do not support common obsmarker format')
53 raise ValueError('bundler do not support common obsmarker format')
54 stream = obsolete.encodemarkers(markers, True, version=version)
54 stream = obsolete.encodemarkers(markers, True, version=version)
55 return bundler.newpart('obsmarkers', data=stream)
55 return bundler.newpart('obsmarkers', data=stream)
56 return None
56 return None
57
57
58 def _canusebundle2(op):
58 def _canusebundle2(op):
59 """return true if a pull/push can use bundle2
59 """return true if a pull/push can use bundle2
60
60
61 Feel free to nuke this function when we drop the experimental option"""
61 Feel free to nuke this function when we drop the experimental option"""
62 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
62 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
63 and op.remote.capable('bundle2'))
63 and op.remote.capable('bundle2'))
64
64
65
65
66 class pushoperation(object):
66 class pushoperation(object):
67 """A object that represent a single push operation
67 """A object that represent a single push operation
68
68
69 It purpose is to carry push related state and very common operation.
69 It purpose is to carry push related state and very common operation.
70
70
71 A new should be created at the beginning of each push and discarded
71 A new should be created at the beginning of each push and discarded
72 afterward.
72 afterward.
73 """
73 """
74
74
75 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
75 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
76 bookmarks=()):
76 bookmarks=()):
77 # repo we push from
77 # repo we push from
78 self.repo = repo
78 self.repo = repo
79 self.ui = repo.ui
79 self.ui = repo.ui
80 # repo we push to
80 # repo we push to
81 self.remote = remote
81 self.remote = remote
82 # force option provided
82 # force option provided
83 self.force = force
83 self.force = force
84 # revs to be pushed (None is "all")
84 # revs to be pushed (None is "all")
85 self.revs = revs
85 self.revs = revs
86 # bookmark explicitly pushed
86 # bookmark explicitly pushed
87 self.bookmarks = bookmarks
87 self.bookmarks = bookmarks
88 # allow push of new branch
88 # allow push of new branch
89 self.newbranch = newbranch
89 self.newbranch = newbranch
90 # did a local lock get acquired?
90 # did a local lock get acquired?
91 self.locallocked = None
91 self.locallocked = None
92 # step already performed
92 # step already performed
93 # (used to check what steps have been already performed through bundle2)
93 # (used to check what steps have been already performed through bundle2)
94 self.stepsdone = set()
94 self.stepsdone = set()
95 # Integer version of the changegroup push result
95 # Integer version of the changegroup push result
96 # - None means nothing to push
96 # - None means nothing to push
97 # - 0 means HTTP error
97 # - 0 means HTTP error
98 # - 1 means we pushed and remote head count is unchanged *or*
98 # - 1 means we pushed and remote head count is unchanged *or*
99 # we have outgoing changesets but refused to push
99 # we have outgoing changesets but refused to push
100 # - other values as described by addchangegroup()
100 # - other values as described by addchangegroup()
101 self.cgresult = None
101 self.cgresult = None
102 # Boolean value for the bookmark push
102 # Boolean value for the bookmark push
103 self.bkresult = None
103 self.bkresult = None
104 # discover.outgoing object (contains common and outgoing data)
104 # discover.outgoing object (contains common and outgoing data)
105 self.outgoing = None
105 self.outgoing = None
106 # all remote heads before the push
106 # all remote heads before the push
107 self.remoteheads = None
107 self.remoteheads = None
108 # testable as a boolean indicating if any nodes are missing locally.
108 # testable as a boolean indicating if any nodes are missing locally.
109 self.incoming = None
109 self.incoming = None
110 # phases changes that must be pushed along side the changesets
110 # phases changes that must be pushed along side the changesets
111 self.outdatedphases = None
111 self.outdatedphases = None
112 # phases changes that must be pushed if changeset push fails
112 # phases changes that must be pushed if changeset push fails
113 self.fallbackoutdatedphases = None
113 self.fallbackoutdatedphases = None
114 # outgoing obsmarkers
114 # outgoing obsmarkers
115 self.outobsmarkers = set()
115 self.outobsmarkers = set()
116 # outgoing bookmarks
116 # outgoing bookmarks
117 self.outbookmarks = []
117 self.outbookmarks = []
118 # transaction manager
118 # transaction manager
119 self.trmanager = None
119 self.trmanager = None
120 # map { pushkey partid -> callback handling failure}
120 # map { pushkey partid -> callback handling failure}
121 # used to handle exception from mandatory pushkey part failure
121 # used to handle exception from mandatory pushkey part failure
122 self.pkfailcb = {}
122 self.pkfailcb = {}
123
123
124 @util.propertycache
124 @util.propertycache
125 def futureheads(self):
125 def futureheads(self):
126 """future remote heads if the changeset push succeeds"""
126 """future remote heads if the changeset push succeeds"""
127 return self.outgoing.missingheads
127 return self.outgoing.missingheads
128
128
129 @util.propertycache
129 @util.propertycache
130 def fallbackheads(self):
130 def fallbackheads(self):
131 """future remote heads if the changeset push fails"""
131 """future remote heads if the changeset push fails"""
132 if self.revs is None:
132 if self.revs is None:
133 # not target to push, all common are relevant
133 # not target to push, all common are relevant
134 return self.outgoing.commonheads
134 return self.outgoing.commonheads
135 unfi = self.repo.unfiltered()
135 unfi = self.repo.unfiltered()
136 # I want cheads = heads(::missingheads and ::commonheads)
136 # I want cheads = heads(::missingheads and ::commonheads)
137 # (missingheads is revs with secret changeset filtered out)
137 # (missingheads is revs with secret changeset filtered out)
138 #
138 #
139 # This can be expressed as:
139 # This can be expressed as:
140 # cheads = ( (missingheads and ::commonheads)
140 # cheads = ( (missingheads and ::commonheads)
141 # + (commonheads and ::missingheads))"
141 # + (commonheads and ::missingheads))"
142 # )
142 # )
143 #
143 #
144 # while trying to push we already computed the following:
144 # while trying to push we already computed the following:
145 # common = (::commonheads)
145 # common = (::commonheads)
146 # missing = ((commonheads::missingheads) - commonheads)
146 # missing = ((commonheads::missingheads) - commonheads)
147 #
147 #
148 # We can pick:
148 # We can pick:
149 # * missingheads part of common (::commonheads)
149 # * missingheads part of common (::commonheads)
150 common = set(self.outgoing.common)
150 common = self.outgoing.common
151 nm = self.repo.changelog.nodemap
151 nm = self.repo.changelog.nodemap
152 cheads = [node for node in self.revs if nm[node] in common]
152 cheads = [node for node in self.revs if nm[node] in common]
153 # and
153 # and
154 # * commonheads parents on missing
154 # * commonheads parents on missing
155 revset = unfi.set('%ln and parents(roots(%ln))',
155 revset = unfi.set('%ln and parents(roots(%ln))',
156 self.outgoing.commonheads,
156 self.outgoing.commonheads,
157 self.outgoing.missing)
157 self.outgoing.missing)
158 cheads.extend(c.node() for c in revset)
158 cheads.extend(c.node() for c in revset)
159 return cheads
159 return cheads
160
160
161 @property
161 @property
162 def commonheads(self):
162 def commonheads(self):
163 """set of all common heads after changeset bundle push"""
163 """set of all common heads after changeset bundle push"""
164 if self.cgresult:
164 if self.cgresult:
165 return self.futureheads
165 return self.futureheads
166 else:
166 else:
167 return self.fallbackheads
167 return self.fallbackheads
168
168
169 # mapping of message used when pushing bookmark
169 # mapping of message used when pushing bookmark
170 bookmsgmap = {'update': (_("updating bookmark %s\n"),
170 bookmsgmap = {'update': (_("updating bookmark %s\n"),
171 _('updating bookmark %s failed!\n')),
171 _('updating bookmark %s failed!\n')),
172 'export': (_("exporting bookmark %s\n"),
172 'export': (_("exporting bookmark %s\n"),
173 _('exporting bookmark %s failed!\n')),
173 _('exporting bookmark %s failed!\n')),
174 'delete': (_("deleting remote bookmark %s\n"),
174 'delete': (_("deleting remote bookmark %s\n"),
175 _('deleting remote bookmark %s failed!\n')),
175 _('deleting remote bookmark %s failed!\n')),
176 }
176 }
177
177
178
178
179 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
179 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
180 '''Push outgoing changesets (limited by revs) from a local
180 '''Push outgoing changesets (limited by revs) from a local
181 repository to remote. Return an integer:
181 repository to remote. Return an integer:
182 - None means nothing to push
182 - None means nothing to push
183 - 0 means HTTP error
183 - 0 means HTTP error
184 - 1 means we pushed and remote head count is unchanged *or*
184 - 1 means we pushed and remote head count is unchanged *or*
185 we have outgoing changesets but refused to push
185 we have outgoing changesets but refused to push
186 - other values as described by addchangegroup()
186 - other values as described by addchangegroup()
187 '''
187 '''
188 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
188 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
189 if pushop.remote.local():
189 if pushop.remote.local():
190 missing = (set(pushop.repo.requirements)
190 missing = (set(pushop.repo.requirements)
191 - pushop.remote.local().supported)
191 - pushop.remote.local().supported)
192 if missing:
192 if missing:
193 msg = _("required features are not"
193 msg = _("required features are not"
194 " supported in the destination:"
194 " supported in the destination:"
195 " %s") % (', '.join(sorted(missing)))
195 " %s") % (', '.join(sorted(missing)))
196 raise util.Abort(msg)
196 raise util.Abort(msg)
197
197
198 # there are two ways to push to remote repo:
198 # there are two ways to push to remote repo:
199 #
199 #
200 # addchangegroup assumes local user can lock remote
200 # addchangegroup assumes local user can lock remote
201 # repo (local filesystem, old ssh servers).
201 # repo (local filesystem, old ssh servers).
202 #
202 #
203 # unbundle assumes local user cannot lock remote repo (new ssh
203 # unbundle assumes local user cannot lock remote repo (new ssh
204 # servers, http servers).
204 # servers, http servers).
205
205
206 if not pushop.remote.canpush():
206 if not pushop.remote.canpush():
207 raise util.Abort(_("destination does not support push"))
207 raise util.Abort(_("destination does not support push"))
208 # get local lock as we might write phase data
208 # get local lock as we might write phase data
209 localwlock = locallock = None
209 localwlock = locallock = None
210 try:
210 try:
211 # bundle2 push may receive a reply bundle touching bookmarks or other
211 # bundle2 push may receive a reply bundle touching bookmarks or other
212 # things requiring the wlock. Take it now to ensure proper ordering.
212 # things requiring the wlock. Take it now to ensure proper ordering.
213 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
213 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
214 if _canusebundle2(pushop) and maypushback:
214 if _canusebundle2(pushop) and maypushback:
215 localwlock = pushop.repo.wlock()
215 localwlock = pushop.repo.wlock()
216 locallock = pushop.repo.lock()
216 locallock = pushop.repo.lock()
217 pushop.locallocked = True
217 pushop.locallocked = True
218 except IOError as err:
218 except IOError as err:
219 pushop.locallocked = False
219 pushop.locallocked = False
220 if err.errno != errno.EACCES:
220 if err.errno != errno.EACCES:
221 raise
221 raise
222 # source repo cannot be locked.
222 # source repo cannot be locked.
223 # We do not abort the push, but just disable the local phase
223 # We do not abort the push, but just disable the local phase
224 # synchronisation.
224 # synchronisation.
225 msg = 'cannot lock source repository: %s\n' % err
225 msg = 'cannot lock source repository: %s\n' % err
226 pushop.ui.debug(msg)
226 pushop.ui.debug(msg)
227 try:
227 try:
228 if pushop.locallocked:
228 if pushop.locallocked:
229 pushop.trmanager = transactionmanager(repo,
229 pushop.trmanager = transactionmanager(repo,
230 'push-response',
230 'push-response',
231 pushop.remote.url())
231 pushop.remote.url())
232 pushop.repo.checkpush(pushop)
232 pushop.repo.checkpush(pushop)
233 lock = None
233 lock = None
234 unbundle = pushop.remote.capable('unbundle')
234 unbundle = pushop.remote.capable('unbundle')
235 if not unbundle:
235 if not unbundle:
236 lock = pushop.remote.lock()
236 lock = pushop.remote.lock()
237 try:
237 try:
238 _pushdiscovery(pushop)
238 _pushdiscovery(pushop)
239 if _canusebundle2(pushop):
239 if _canusebundle2(pushop):
240 _pushbundle2(pushop)
240 _pushbundle2(pushop)
241 _pushchangeset(pushop)
241 _pushchangeset(pushop)
242 _pushsyncphase(pushop)
242 _pushsyncphase(pushop)
243 _pushobsolete(pushop)
243 _pushobsolete(pushop)
244 _pushbookmark(pushop)
244 _pushbookmark(pushop)
245 finally:
245 finally:
246 if lock is not None:
246 if lock is not None:
247 lock.release()
247 lock.release()
248 if pushop.trmanager:
248 if pushop.trmanager:
249 pushop.trmanager.close()
249 pushop.trmanager.close()
250 finally:
250 finally:
251 if pushop.trmanager:
251 if pushop.trmanager:
252 pushop.trmanager.release()
252 pushop.trmanager.release()
253 if locallock is not None:
253 if locallock is not None:
254 locallock.release()
254 locallock.release()
255 if localwlock is not None:
255 if localwlock is not None:
256 localwlock.release()
256 localwlock.release()
257
257
258 return pushop
258 return pushop
259
259
260 # list of steps to perform discovery before push
260 # list of steps to perform discovery before push
261 pushdiscoveryorder = []
261 pushdiscoveryorder = []
262
262
263 # Mapping between step name and function
263 # Mapping between step name and function
264 #
264 #
265 # This exists to help extensions wrap steps if necessary
265 # This exists to help extensions wrap steps if necessary
266 pushdiscoverymapping = {}
266 pushdiscoverymapping = {}
267
267
268 def pushdiscovery(stepname):
268 def pushdiscovery(stepname):
269 """decorator for function performing discovery before push
269 """decorator for function performing discovery before push
270
270
271 The function is added to the step -> function mapping and appended to the
271 The function is added to the step -> function mapping and appended to the
272 list of steps. Beware that decorated function will be added in order (this
272 list of steps. Beware that decorated function will be added in order (this
273 may matter).
273 may matter).
274
274
275 You can only use this decorator for a new step, if you want to wrap a step
275 You can only use this decorator for a new step, if you want to wrap a step
276 from an extension, change the pushdiscovery dictionary directly."""
276 from an extension, change the pushdiscovery dictionary directly."""
277 def dec(func):
277 def dec(func):
278 assert stepname not in pushdiscoverymapping
278 assert stepname not in pushdiscoverymapping
279 pushdiscoverymapping[stepname] = func
279 pushdiscoverymapping[stepname] = func
280 pushdiscoveryorder.append(stepname)
280 pushdiscoveryorder.append(stepname)
281 return func
281 return func
282 return dec
282 return dec
283
283
284 def _pushdiscovery(pushop):
284 def _pushdiscovery(pushop):
285 """Run all discovery steps"""
285 """Run all discovery steps"""
286 for stepname in pushdiscoveryorder:
286 for stepname in pushdiscoveryorder:
287 step = pushdiscoverymapping[stepname]
287 step = pushdiscoverymapping[stepname]
288 step(pushop)
288 step(pushop)
289
289
290 @pushdiscovery('changeset')
290 @pushdiscovery('changeset')
291 def _pushdiscoverychangeset(pushop):
291 def _pushdiscoverychangeset(pushop):
292 """discover the changeset that need to be pushed"""
292 """discover the changeset that need to be pushed"""
293 fci = discovery.findcommonincoming
293 fci = discovery.findcommonincoming
294 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
294 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
295 common, inc, remoteheads = commoninc
295 common, inc, remoteheads = commoninc
296 fco = discovery.findcommonoutgoing
296 fco = discovery.findcommonoutgoing
297 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
297 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
298 commoninc=commoninc, force=pushop.force)
298 commoninc=commoninc, force=pushop.force)
299 pushop.outgoing = outgoing
299 pushop.outgoing = outgoing
300 pushop.remoteheads = remoteheads
300 pushop.remoteheads = remoteheads
301 pushop.incoming = inc
301 pushop.incoming = inc
302
302
303 @pushdiscovery('phase')
303 @pushdiscovery('phase')
304 def _pushdiscoveryphase(pushop):
304 def _pushdiscoveryphase(pushop):
305 """discover the phase that needs to be pushed
305 """discover the phase that needs to be pushed
306
306
307 (computed for both success and failure case for changesets push)"""
307 (computed for both success and failure case for changesets push)"""
308 outgoing = pushop.outgoing
308 outgoing = pushop.outgoing
309 unfi = pushop.repo.unfiltered()
309 unfi = pushop.repo.unfiltered()
310 remotephases = pushop.remote.listkeys('phases')
310 remotephases = pushop.remote.listkeys('phases')
311 publishing = remotephases.get('publishing', False)
311 publishing = remotephases.get('publishing', False)
312 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
312 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
313 and remotephases # server supports phases
313 and remotephases # server supports phases
314 and not pushop.outgoing.missing # no changesets to be pushed
314 and not pushop.outgoing.missing # no changesets to be pushed
315 and publishing):
315 and publishing):
316 # When:
316 # When:
317 # - this is a subrepo push
317 # - this is a subrepo push
318 # - and remote support phase
318 # - and remote support phase
319 # - and no changeset are to be pushed
319 # - and no changeset are to be pushed
320 # - and remote is publishing
320 # - and remote is publishing
321 # We may be in issue 3871 case!
321 # We may be in issue 3871 case!
322 # We drop the possible phase synchronisation done by
322 # We drop the possible phase synchronisation done by
323 # courtesy to publish changesets possibly locally draft
323 # courtesy to publish changesets possibly locally draft
324 # on the remote.
324 # on the remote.
325 remotephases = {'publishing': 'True'}
325 remotephases = {'publishing': 'True'}
326 ana = phases.analyzeremotephases(pushop.repo,
326 ana = phases.analyzeremotephases(pushop.repo,
327 pushop.fallbackheads,
327 pushop.fallbackheads,
328 remotephases)
328 remotephases)
329 pheads, droots = ana
329 pheads, droots = ana
330 extracond = ''
330 extracond = ''
331 if not publishing:
331 if not publishing:
332 extracond = ' and public()'
332 extracond = ' and public()'
333 revset = 'heads((%%ln::%%ln) %s)' % extracond
333 revset = 'heads((%%ln::%%ln) %s)' % extracond
334 # Get the list of all revs draft on remote by public here.
334 # Get the list of all revs draft on remote by public here.
335 # XXX Beware that revset break if droots is not strictly
335 # XXX Beware that revset break if droots is not strictly
336 # XXX root we may want to ensure it is but it is costly
336 # XXX root we may want to ensure it is but it is costly
337 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
337 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
338 if not outgoing.missing:
338 if not outgoing.missing:
339 future = fallback
339 future = fallback
340 else:
340 else:
341 # adds changeset we are going to push as draft
341 # adds changeset we are going to push as draft
342 #
342 #
343 # should not be necessary for publishing server, but because of an
343 # should not be necessary for publishing server, but because of an
344 # issue fixed in xxxxx we have to do it anyway.
344 # issue fixed in xxxxx we have to do it anyway.
345 fdroots = list(unfi.set('roots(%ln + %ln::)',
345 fdroots = list(unfi.set('roots(%ln + %ln::)',
346 outgoing.missing, droots))
346 outgoing.missing, droots))
347 fdroots = [f.node() for f in fdroots]
347 fdroots = [f.node() for f in fdroots]
348 future = list(unfi.set(revset, fdroots, pushop.futureheads))
348 future = list(unfi.set(revset, fdroots, pushop.futureheads))
349 pushop.outdatedphases = future
349 pushop.outdatedphases = future
350 pushop.fallbackoutdatedphases = fallback
350 pushop.fallbackoutdatedphases = fallback
351
351
352 @pushdiscovery('obsmarker')
352 @pushdiscovery('obsmarker')
353 def _pushdiscoveryobsmarkers(pushop):
353 def _pushdiscoveryobsmarkers(pushop):
354 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
354 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
355 and pushop.repo.obsstore
355 and pushop.repo.obsstore
356 and 'obsolete' in pushop.remote.listkeys('namespaces')):
356 and 'obsolete' in pushop.remote.listkeys('namespaces')):
357 repo = pushop.repo
357 repo = pushop.repo
358 # very naive computation, that can be quite expensive on big repo.
358 # very naive computation, that can be quite expensive on big repo.
359 # However: evolution is currently slow on them anyway.
359 # However: evolution is currently slow on them anyway.
360 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
360 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
361 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
361 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
362
362
363 @pushdiscovery('bookmarks')
363 @pushdiscovery('bookmarks')
364 def _pushdiscoverybookmarks(pushop):
364 def _pushdiscoverybookmarks(pushop):
365 ui = pushop.ui
365 ui = pushop.ui
366 repo = pushop.repo.unfiltered()
366 repo = pushop.repo.unfiltered()
367 remote = pushop.remote
367 remote = pushop.remote
368 ui.debug("checking for updated bookmarks\n")
368 ui.debug("checking for updated bookmarks\n")
369 ancestors = ()
369 ancestors = ()
370 if pushop.revs:
370 if pushop.revs:
371 revnums = map(repo.changelog.rev, pushop.revs)
371 revnums = map(repo.changelog.rev, pushop.revs)
372 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
372 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
373 remotebookmark = remote.listkeys('bookmarks')
373 remotebookmark = remote.listkeys('bookmarks')
374
374
375 explicit = set(pushop.bookmarks)
375 explicit = set(pushop.bookmarks)
376
376
377 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
377 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
378 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
378 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
379 for b, scid, dcid in advsrc:
379 for b, scid, dcid in advsrc:
380 if b in explicit:
380 if b in explicit:
381 explicit.remove(b)
381 explicit.remove(b)
382 if not ancestors or repo[scid].rev() in ancestors:
382 if not ancestors or repo[scid].rev() in ancestors:
383 pushop.outbookmarks.append((b, dcid, scid))
383 pushop.outbookmarks.append((b, dcid, scid))
384 # search added bookmark
384 # search added bookmark
385 for b, scid, dcid in addsrc:
385 for b, scid, dcid in addsrc:
386 if b in explicit:
386 if b in explicit:
387 explicit.remove(b)
387 explicit.remove(b)
388 pushop.outbookmarks.append((b, '', scid))
388 pushop.outbookmarks.append((b, '', scid))
389 # search for overwritten bookmark
389 # search for overwritten bookmark
390 for b, scid, dcid in advdst + diverge + differ:
390 for b, scid, dcid in advdst + diverge + differ:
391 if b in explicit:
391 if b in explicit:
392 explicit.remove(b)
392 explicit.remove(b)
393 pushop.outbookmarks.append((b, dcid, scid))
393 pushop.outbookmarks.append((b, dcid, scid))
394 # search for bookmark to delete
394 # search for bookmark to delete
395 for b, scid, dcid in adddst:
395 for b, scid, dcid in adddst:
396 if b in explicit:
396 if b in explicit:
397 explicit.remove(b)
397 explicit.remove(b)
398 # treat as "deleted locally"
398 # treat as "deleted locally"
399 pushop.outbookmarks.append((b, dcid, ''))
399 pushop.outbookmarks.append((b, dcid, ''))
400 # identical bookmarks shouldn't get reported
400 # identical bookmarks shouldn't get reported
401 for b, scid, dcid in same:
401 for b, scid, dcid in same:
402 if b in explicit:
402 if b in explicit:
403 explicit.remove(b)
403 explicit.remove(b)
404
404
405 if explicit:
405 if explicit:
406 explicit = sorted(explicit)
406 explicit = sorted(explicit)
407 # we should probably list all of them
407 # we should probably list all of them
408 ui.warn(_('bookmark %s does not exist on the local '
408 ui.warn(_('bookmark %s does not exist on the local '
409 'or remote repository!\n') % explicit[0])
409 'or remote repository!\n') % explicit[0])
410 pushop.bkresult = 2
410 pushop.bkresult = 2
411
411
412 pushop.outbookmarks.sort()
412 pushop.outbookmarks.sort()
413
413
414 def _pushcheckoutgoing(pushop):
414 def _pushcheckoutgoing(pushop):
415 outgoing = pushop.outgoing
415 outgoing = pushop.outgoing
416 unfi = pushop.repo.unfiltered()
416 unfi = pushop.repo.unfiltered()
417 if not outgoing.missing:
417 if not outgoing.missing:
418 # nothing to push
418 # nothing to push
419 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
419 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
420 return False
420 return False
421 # something to push
421 # something to push
422 if not pushop.force:
422 if not pushop.force:
423 # if repo.obsstore == False --> no obsolete
423 # if repo.obsstore == False --> no obsolete
424 # then, save the iteration
424 # then, save the iteration
425 if unfi.obsstore:
425 if unfi.obsstore:
426 # this message are here for 80 char limit reason
426 # this message are here for 80 char limit reason
427 mso = _("push includes obsolete changeset: %s!")
427 mso = _("push includes obsolete changeset: %s!")
428 mst = {"unstable": _("push includes unstable changeset: %s!"),
428 mst = {"unstable": _("push includes unstable changeset: %s!"),
429 "bumped": _("push includes bumped changeset: %s!"),
429 "bumped": _("push includes bumped changeset: %s!"),
430 "divergent": _("push includes divergent changeset: %s!")}
430 "divergent": _("push includes divergent changeset: %s!")}
431 # If we are to push if there is at least one
431 # If we are to push if there is at least one
432 # obsolete or unstable changeset in missing, at
432 # obsolete or unstable changeset in missing, at
433 # least one of the missinghead will be obsolete or
433 # least one of the missinghead will be obsolete or
434 # unstable. So checking heads only is ok
434 # unstable. So checking heads only is ok
435 for node in outgoing.missingheads:
435 for node in outgoing.missingheads:
436 ctx = unfi[node]
436 ctx = unfi[node]
437 if ctx.obsolete():
437 if ctx.obsolete():
438 raise util.Abort(mso % ctx)
438 raise util.Abort(mso % ctx)
439 elif ctx.troubled():
439 elif ctx.troubled():
440 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
440 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
441
441
442 # internal config: bookmarks.pushing
442 # internal config: bookmarks.pushing
443 newbm = pushop.ui.configlist('bookmarks', 'pushing')
443 newbm = pushop.ui.configlist('bookmarks', 'pushing')
444 discovery.checkheads(unfi, pushop.remote, outgoing,
444 discovery.checkheads(unfi, pushop.remote, outgoing,
445 pushop.remoteheads,
445 pushop.remoteheads,
446 pushop.newbranch,
446 pushop.newbranch,
447 bool(pushop.incoming),
447 bool(pushop.incoming),
448 newbm)
448 newbm)
449 return True
449 return True
450
450
451 # List of names of steps to perform for an outgoing bundle2, order matters.
451 # List of names of steps to perform for an outgoing bundle2, order matters.
452 b2partsgenorder = []
452 b2partsgenorder = []
453
453
454 # Mapping between step name and function
454 # Mapping between step name and function
455 #
455 #
456 # This exists to help extensions wrap steps if necessary
456 # This exists to help extensions wrap steps if necessary
457 b2partsgenmapping = {}
457 b2partsgenmapping = {}
458
458
459 def b2partsgenerator(stepname, idx=None):
459 def b2partsgenerator(stepname, idx=None):
460 """decorator for function generating bundle2 part
460 """decorator for function generating bundle2 part
461
461
462 The function is added to the step -> function mapping and appended to the
462 The function is added to the step -> function mapping and appended to the
463 list of steps. Beware that decorated functions will be added in order
463 list of steps. Beware that decorated functions will be added in order
464 (this may matter).
464 (this may matter).
465
465
466 You can only use this decorator for new steps, if you want to wrap a step
466 You can only use this decorator for new steps, if you want to wrap a step
467 from an extension, attack the b2partsgenmapping dictionary directly."""
467 from an extension, attack the b2partsgenmapping dictionary directly."""
468 def dec(func):
468 def dec(func):
469 assert stepname not in b2partsgenmapping
469 assert stepname not in b2partsgenmapping
470 b2partsgenmapping[stepname] = func
470 b2partsgenmapping[stepname] = func
471 if idx is None:
471 if idx is None:
472 b2partsgenorder.append(stepname)
472 b2partsgenorder.append(stepname)
473 else:
473 else:
474 b2partsgenorder.insert(idx, stepname)
474 b2partsgenorder.insert(idx, stepname)
475 return func
475 return func
476 return dec
476 return dec
477
477
478 @b2partsgenerator('changeset')
478 @b2partsgenerator('changeset')
479 def _pushb2ctx(pushop, bundler):
479 def _pushb2ctx(pushop, bundler):
480 """handle changegroup push through bundle2
480 """handle changegroup push through bundle2
481
481
482 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
482 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
483 """
483 """
484 if 'changesets' in pushop.stepsdone:
484 if 'changesets' in pushop.stepsdone:
485 return
485 return
486 pushop.stepsdone.add('changesets')
486 pushop.stepsdone.add('changesets')
487 # Send known heads to the server for race detection.
487 # Send known heads to the server for race detection.
488 if not _pushcheckoutgoing(pushop):
488 if not _pushcheckoutgoing(pushop):
489 return
489 return
490 pushop.repo.prepushoutgoinghooks(pushop.repo,
490 pushop.repo.prepushoutgoinghooks(pushop.repo,
491 pushop.remote,
491 pushop.remote,
492 pushop.outgoing)
492 pushop.outgoing)
493 if not pushop.force:
493 if not pushop.force:
494 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
494 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
495 b2caps = bundle2.bundle2caps(pushop.remote)
495 b2caps = bundle2.bundle2caps(pushop.remote)
496 version = None
496 version = None
497 cgversions = b2caps.get('changegroup')
497 cgversions = b2caps.get('changegroup')
498 if not cgversions: # 3.1 and 3.2 ship with an empty value
498 if not cgversions: # 3.1 and 3.2 ship with an empty value
499 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
499 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
500 pushop.outgoing)
500 pushop.outgoing)
501 else:
501 else:
502 cgversions = [v for v in cgversions if v in changegroup.packermap]
502 cgversions = [v for v in cgversions if v in changegroup.packermap]
503 if not cgversions:
503 if not cgversions:
504 raise ValueError(_('no common changegroup version'))
504 raise ValueError(_('no common changegroup version'))
505 version = max(cgversions)
505 version = max(cgversions)
506 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
506 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
507 pushop.outgoing,
507 pushop.outgoing,
508 version=version)
508 version=version)
509 cgpart = bundler.newpart('changegroup', data=cg)
509 cgpart = bundler.newpart('changegroup', data=cg)
510 if version is not None:
510 if version is not None:
511 cgpart.addparam('version', version)
511 cgpart.addparam('version', version)
512 def handlereply(op):
512 def handlereply(op):
513 """extract addchangegroup returns from server reply"""
513 """extract addchangegroup returns from server reply"""
514 cgreplies = op.records.getreplies(cgpart.id)
514 cgreplies = op.records.getreplies(cgpart.id)
515 assert len(cgreplies['changegroup']) == 1
515 assert len(cgreplies['changegroup']) == 1
516 pushop.cgresult = cgreplies['changegroup'][0]['return']
516 pushop.cgresult = cgreplies['changegroup'][0]['return']
517 return handlereply
517 return handlereply
518
518
519 @b2partsgenerator('phase')
519 @b2partsgenerator('phase')
520 def _pushb2phases(pushop, bundler):
520 def _pushb2phases(pushop, bundler):
521 """handle phase push through bundle2"""
521 """handle phase push through bundle2"""
522 if 'phases' in pushop.stepsdone:
522 if 'phases' in pushop.stepsdone:
523 return
523 return
524 b2caps = bundle2.bundle2caps(pushop.remote)
524 b2caps = bundle2.bundle2caps(pushop.remote)
525 if not 'pushkey' in b2caps:
525 if not 'pushkey' in b2caps:
526 return
526 return
527 pushop.stepsdone.add('phases')
527 pushop.stepsdone.add('phases')
528 part2node = []
528 part2node = []
529
529
530 def handlefailure(pushop, exc):
530 def handlefailure(pushop, exc):
531 targetid = int(exc.partid)
531 targetid = int(exc.partid)
532 for partid, node in part2node:
532 for partid, node in part2node:
533 if partid == targetid:
533 if partid == targetid:
534 raise error.Abort(_('updating %s to public failed') % node)
534 raise error.Abort(_('updating %s to public failed') % node)
535
535
536 enc = pushkey.encode
536 enc = pushkey.encode
537 for newremotehead in pushop.outdatedphases:
537 for newremotehead in pushop.outdatedphases:
538 part = bundler.newpart('pushkey')
538 part = bundler.newpart('pushkey')
539 part.addparam('namespace', enc('phases'))
539 part.addparam('namespace', enc('phases'))
540 part.addparam('key', enc(newremotehead.hex()))
540 part.addparam('key', enc(newremotehead.hex()))
541 part.addparam('old', enc(str(phases.draft)))
541 part.addparam('old', enc(str(phases.draft)))
542 part.addparam('new', enc(str(phases.public)))
542 part.addparam('new', enc(str(phases.public)))
543 part2node.append((part.id, newremotehead))
543 part2node.append((part.id, newremotehead))
544 pushop.pkfailcb[part.id] = handlefailure
544 pushop.pkfailcb[part.id] = handlefailure
545
545
546 def handlereply(op):
546 def handlereply(op):
547 for partid, node in part2node:
547 for partid, node in part2node:
548 partrep = op.records.getreplies(partid)
548 partrep = op.records.getreplies(partid)
549 results = partrep['pushkey']
549 results = partrep['pushkey']
550 assert len(results) <= 1
550 assert len(results) <= 1
551 msg = None
551 msg = None
552 if not results:
552 if not results:
553 msg = _('server ignored update of %s to public!\n') % node
553 msg = _('server ignored update of %s to public!\n') % node
554 elif not int(results[0]['return']):
554 elif not int(results[0]['return']):
555 msg = _('updating %s to public failed!\n') % node
555 msg = _('updating %s to public failed!\n') % node
556 if msg is not None:
556 if msg is not None:
557 pushop.ui.warn(msg)
557 pushop.ui.warn(msg)
558 return handlereply
558 return handlereply
559
559
560 @b2partsgenerator('obsmarkers')
560 @b2partsgenerator('obsmarkers')
561 def _pushb2obsmarkers(pushop, bundler):
561 def _pushb2obsmarkers(pushop, bundler):
562 if 'obsmarkers' in pushop.stepsdone:
562 if 'obsmarkers' in pushop.stepsdone:
563 return
563 return
564 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
564 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
565 if obsolete.commonversion(remoteversions) is None:
565 if obsolete.commonversion(remoteversions) is None:
566 return
566 return
567 pushop.stepsdone.add('obsmarkers')
567 pushop.stepsdone.add('obsmarkers')
568 if pushop.outobsmarkers:
568 if pushop.outobsmarkers:
569 markers = sorted(pushop.outobsmarkers)
569 markers = sorted(pushop.outobsmarkers)
570 buildobsmarkerspart(bundler, markers)
570 buildobsmarkerspart(bundler, markers)
571
571
572 @b2partsgenerator('bookmarks')
572 @b2partsgenerator('bookmarks')
573 def _pushb2bookmarks(pushop, bundler):
573 def _pushb2bookmarks(pushop, bundler):
574 """handle bookmark push through bundle2"""
574 """handle bookmark push through bundle2"""
575 if 'bookmarks' in pushop.stepsdone:
575 if 'bookmarks' in pushop.stepsdone:
576 return
576 return
577 b2caps = bundle2.bundle2caps(pushop.remote)
577 b2caps = bundle2.bundle2caps(pushop.remote)
578 if 'pushkey' not in b2caps:
578 if 'pushkey' not in b2caps:
579 return
579 return
580 pushop.stepsdone.add('bookmarks')
580 pushop.stepsdone.add('bookmarks')
581 part2book = []
581 part2book = []
582 enc = pushkey.encode
582 enc = pushkey.encode
583
583
584 def handlefailure(pushop, exc):
584 def handlefailure(pushop, exc):
585 targetid = int(exc.partid)
585 targetid = int(exc.partid)
586 for partid, book, action in part2book:
586 for partid, book, action in part2book:
587 if partid == targetid:
587 if partid == targetid:
588 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
588 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
589 # we should not be called for part we did not generated
589 # we should not be called for part we did not generated
590 assert False
590 assert False
591
591
592 for book, old, new in pushop.outbookmarks:
592 for book, old, new in pushop.outbookmarks:
593 part = bundler.newpart('pushkey')
593 part = bundler.newpart('pushkey')
594 part.addparam('namespace', enc('bookmarks'))
594 part.addparam('namespace', enc('bookmarks'))
595 part.addparam('key', enc(book))
595 part.addparam('key', enc(book))
596 part.addparam('old', enc(old))
596 part.addparam('old', enc(old))
597 part.addparam('new', enc(new))
597 part.addparam('new', enc(new))
598 action = 'update'
598 action = 'update'
599 if not old:
599 if not old:
600 action = 'export'
600 action = 'export'
601 elif not new:
601 elif not new:
602 action = 'delete'
602 action = 'delete'
603 part2book.append((part.id, book, action))
603 part2book.append((part.id, book, action))
604 pushop.pkfailcb[part.id] = handlefailure
604 pushop.pkfailcb[part.id] = handlefailure
605
605
606 def handlereply(op):
606 def handlereply(op):
607 ui = pushop.ui
607 ui = pushop.ui
608 for partid, book, action in part2book:
608 for partid, book, action in part2book:
609 partrep = op.records.getreplies(partid)
609 partrep = op.records.getreplies(partid)
610 results = partrep['pushkey']
610 results = partrep['pushkey']
611 assert len(results) <= 1
611 assert len(results) <= 1
612 if not results:
612 if not results:
613 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
613 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
614 else:
614 else:
615 ret = int(results[0]['return'])
615 ret = int(results[0]['return'])
616 if ret:
616 if ret:
617 ui.status(bookmsgmap[action][0] % book)
617 ui.status(bookmsgmap[action][0] % book)
618 else:
618 else:
619 ui.warn(bookmsgmap[action][1] % book)
619 ui.warn(bookmsgmap[action][1] % book)
620 if pushop.bkresult is not None:
620 if pushop.bkresult is not None:
621 pushop.bkresult = 1
621 pushop.bkresult = 1
622 return handlereply
622 return handlereply
623
623
624
624
625 def _pushbundle2(pushop):
625 def _pushbundle2(pushop):
626 """push data to the remote using bundle2
626 """push data to the remote using bundle2
627
627
628 The only currently supported type of data is changegroup but this will
628 The only currently supported type of data is changegroup but this will
629 evolve in the future."""
629 evolve in the future."""
630 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
630 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
631 pushback = (pushop.trmanager
631 pushback = (pushop.trmanager
632 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
632 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
633
633
634 # create reply capability
634 # create reply capability
635 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
635 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
636 allowpushback=pushback))
636 allowpushback=pushback))
637 bundler.newpart('replycaps', data=capsblob)
637 bundler.newpart('replycaps', data=capsblob)
638 replyhandlers = []
638 replyhandlers = []
639 for partgenname in b2partsgenorder:
639 for partgenname in b2partsgenorder:
640 partgen = b2partsgenmapping[partgenname]
640 partgen = b2partsgenmapping[partgenname]
641 ret = partgen(pushop, bundler)
641 ret = partgen(pushop, bundler)
642 if callable(ret):
642 if callable(ret):
643 replyhandlers.append(ret)
643 replyhandlers.append(ret)
644 # do not push if nothing to push
644 # do not push if nothing to push
645 if bundler.nbparts <= 1:
645 if bundler.nbparts <= 1:
646 return
646 return
647 stream = util.chunkbuffer(bundler.getchunks())
647 stream = util.chunkbuffer(bundler.getchunks())
648 try:
648 try:
649 try:
649 try:
650 reply = pushop.remote.unbundle(stream, ['force'], 'push')
650 reply = pushop.remote.unbundle(stream, ['force'], 'push')
651 except error.BundleValueError as exc:
651 except error.BundleValueError as exc:
652 raise util.Abort('missing support for %s' % exc)
652 raise util.Abort('missing support for %s' % exc)
653 try:
653 try:
654 trgetter = None
654 trgetter = None
655 if pushback:
655 if pushback:
656 trgetter = pushop.trmanager.transaction
656 trgetter = pushop.trmanager.transaction
657 op = bundle2.processbundle(pushop.repo, reply, trgetter)
657 op = bundle2.processbundle(pushop.repo, reply, trgetter)
658 except error.BundleValueError as exc:
658 except error.BundleValueError as exc:
659 raise util.Abort('missing support for %s' % exc)
659 raise util.Abort('missing support for %s' % exc)
660 except error.PushkeyFailed as exc:
660 except error.PushkeyFailed as exc:
661 partid = int(exc.partid)
661 partid = int(exc.partid)
662 if partid not in pushop.pkfailcb:
662 if partid not in pushop.pkfailcb:
663 raise
663 raise
664 pushop.pkfailcb[partid](pushop, exc)
664 pushop.pkfailcb[partid](pushop, exc)
665 for rephand in replyhandlers:
665 for rephand in replyhandlers:
666 rephand(op)
666 rephand(op)
667
667
668 def _pushchangeset(pushop):
668 def _pushchangeset(pushop):
669 """Make the actual push of changeset bundle to remote repo"""
669 """Make the actual push of changeset bundle to remote repo"""
670 if 'changesets' in pushop.stepsdone:
670 if 'changesets' in pushop.stepsdone:
671 return
671 return
672 pushop.stepsdone.add('changesets')
672 pushop.stepsdone.add('changesets')
673 if not _pushcheckoutgoing(pushop):
673 if not _pushcheckoutgoing(pushop):
674 return
674 return
675 pushop.repo.prepushoutgoinghooks(pushop.repo,
675 pushop.repo.prepushoutgoinghooks(pushop.repo,
676 pushop.remote,
676 pushop.remote,
677 pushop.outgoing)
677 pushop.outgoing)
678 outgoing = pushop.outgoing
678 outgoing = pushop.outgoing
679 unbundle = pushop.remote.capable('unbundle')
679 unbundle = pushop.remote.capable('unbundle')
680 # TODO: get bundlecaps from remote
680 # TODO: get bundlecaps from remote
681 bundlecaps = None
681 bundlecaps = None
682 # create a changegroup from local
682 # create a changegroup from local
683 if pushop.revs is None and not (outgoing.excluded
683 if pushop.revs is None and not (outgoing.excluded
684 or pushop.repo.changelog.filteredrevs):
684 or pushop.repo.changelog.filteredrevs):
685 # push everything,
685 # push everything,
686 # use the fast path, no race possible on push
686 # use the fast path, no race possible on push
687 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
687 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
688 cg = changegroup.getsubset(pushop.repo,
688 cg = changegroup.getsubset(pushop.repo,
689 outgoing,
689 outgoing,
690 bundler,
690 bundler,
691 'push',
691 'push',
692 fastpath=True)
692 fastpath=True)
693 else:
693 else:
694 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
694 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
695 bundlecaps)
695 bundlecaps)
696
696
697 # apply changegroup to remote
697 # apply changegroup to remote
698 if unbundle:
698 if unbundle:
699 # local repo finds heads on server, finds out what
699 # local repo finds heads on server, finds out what
700 # revs it must push. once revs transferred, if server
700 # revs it must push. once revs transferred, if server
701 # finds it has different heads (someone else won
701 # finds it has different heads (someone else won
702 # commit/push race), server aborts.
702 # commit/push race), server aborts.
703 if pushop.force:
703 if pushop.force:
704 remoteheads = ['force']
704 remoteheads = ['force']
705 else:
705 else:
706 remoteheads = pushop.remoteheads
706 remoteheads = pushop.remoteheads
707 # ssh: return remote's addchangegroup()
707 # ssh: return remote's addchangegroup()
708 # http: return remote's addchangegroup() or 0 for error
708 # http: return remote's addchangegroup() or 0 for error
709 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
709 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
710 pushop.repo.url())
710 pushop.repo.url())
711 else:
711 else:
712 # we return an integer indicating remote head count
712 # we return an integer indicating remote head count
713 # change
713 # change
714 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
714 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
715 pushop.repo.url())
715 pushop.repo.url())
716
716
717 def _pushsyncphase(pushop):
717 def _pushsyncphase(pushop):
718 """synchronise phase information locally and remotely"""
718 """synchronise phase information locally and remotely"""
719 cheads = pushop.commonheads
719 cheads = pushop.commonheads
720 # even when we don't push, exchanging phase data is useful
720 # even when we don't push, exchanging phase data is useful
721 remotephases = pushop.remote.listkeys('phases')
721 remotephases = pushop.remote.listkeys('phases')
722 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
722 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
723 and remotephases # server supports phases
723 and remotephases # server supports phases
724 and pushop.cgresult is None # nothing was pushed
724 and pushop.cgresult is None # nothing was pushed
725 and remotephases.get('publishing', False)):
725 and remotephases.get('publishing', False)):
726 # When:
726 # When:
727 # - this is a subrepo push
727 # - this is a subrepo push
728 # - and remote support phase
728 # - and remote support phase
729 # - and no changeset was pushed
729 # - and no changeset was pushed
730 # - and remote is publishing
730 # - and remote is publishing
731 # We may be in issue 3871 case!
731 # We may be in issue 3871 case!
732 # We drop the possible phase synchronisation done by
732 # We drop the possible phase synchronisation done by
733 # courtesy to publish changesets possibly locally draft
733 # courtesy to publish changesets possibly locally draft
734 # on the remote.
734 # on the remote.
735 remotephases = {'publishing': 'True'}
735 remotephases = {'publishing': 'True'}
736 if not remotephases: # old server or public only reply from non-publishing
736 if not remotephases: # old server or public only reply from non-publishing
737 _localphasemove(pushop, cheads)
737 _localphasemove(pushop, cheads)
738 # don't push any phase data as there is nothing to push
738 # don't push any phase data as there is nothing to push
739 else:
739 else:
740 ana = phases.analyzeremotephases(pushop.repo, cheads,
740 ana = phases.analyzeremotephases(pushop.repo, cheads,
741 remotephases)
741 remotephases)
742 pheads, droots = ana
742 pheads, droots = ana
743 ### Apply remote phase on local
743 ### Apply remote phase on local
744 if remotephases.get('publishing', False):
744 if remotephases.get('publishing', False):
745 _localphasemove(pushop, cheads)
745 _localphasemove(pushop, cheads)
746 else: # publish = False
746 else: # publish = False
747 _localphasemove(pushop, pheads)
747 _localphasemove(pushop, pheads)
748 _localphasemove(pushop, cheads, phases.draft)
748 _localphasemove(pushop, cheads, phases.draft)
749 ### Apply local phase on remote
749 ### Apply local phase on remote
750
750
751 if pushop.cgresult:
751 if pushop.cgresult:
752 if 'phases' in pushop.stepsdone:
752 if 'phases' in pushop.stepsdone:
753 # phases already pushed though bundle2
753 # phases already pushed though bundle2
754 return
754 return
755 outdated = pushop.outdatedphases
755 outdated = pushop.outdatedphases
756 else:
756 else:
757 outdated = pushop.fallbackoutdatedphases
757 outdated = pushop.fallbackoutdatedphases
758
758
759 pushop.stepsdone.add('phases')
759 pushop.stepsdone.add('phases')
760
760
761 # filter heads already turned public by the push
761 # filter heads already turned public by the push
762 outdated = [c for c in outdated if c.node() not in pheads]
762 outdated = [c for c in outdated if c.node() not in pheads]
763 # fallback to independent pushkey command
763 # fallback to independent pushkey command
764 for newremotehead in outdated:
764 for newremotehead in outdated:
765 r = pushop.remote.pushkey('phases',
765 r = pushop.remote.pushkey('phases',
766 newremotehead.hex(),
766 newremotehead.hex(),
767 str(phases.draft),
767 str(phases.draft),
768 str(phases.public))
768 str(phases.public))
769 if not r:
769 if not r:
770 pushop.ui.warn(_('updating %s to public failed!\n')
770 pushop.ui.warn(_('updating %s to public failed!\n')
771 % newremotehead)
771 % newremotehead)
772
772
773 def _localphasemove(pushop, nodes, phase=phases.public):
773 def _localphasemove(pushop, nodes, phase=phases.public):
774 """move <nodes> to <phase> in the local source repo"""
774 """move <nodes> to <phase> in the local source repo"""
775 if pushop.trmanager:
775 if pushop.trmanager:
776 phases.advanceboundary(pushop.repo,
776 phases.advanceboundary(pushop.repo,
777 pushop.trmanager.transaction(),
777 pushop.trmanager.transaction(),
778 phase,
778 phase,
779 nodes)
779 nodes)
780 else:
780 else:
781 # repo is not locked, do not change any phases!
781 # repo is not locked, do not change any phases!
782 # Informs the user that phases should have been moved when
782 # Informs the user that phases should have been moved when
783 # applicable.
783 # applicable.
784 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
784 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
785 phasestr = phases.phasenames[phase]
785 phasestr = phases.phasenames[phase]
786 if actualmoves:
786 if actualmoves:
787 pushop.ui.status(_('cannot lock source repo, skipping '
787 pushop.ui.status(_('cannot lock source repo, skipping '
788 'local %s phase update\n') % phasestr)
788 'local %s phase update\n') % phasestr)
789
789
790 def _pushobsolete(pushop):
790 def _pushobsolete(pushop):
791 """utility function to push obsolete markers to a remote"""
791 """utility function to push obsolete markers to a remote"""
792 if 'obsmarkers' in pushop.stepsdone:
792 if 'obsmarkers' in pushop.stepsdone:
793 return
793 return
794 repo = pushop.repo
794 repo = pushop.repo
795 remote = pushop.remote
795 remote = pushop.remote
796 pushop.stepsdone.add('obsmarkers')
796 pushop.stepsdone.add('obsmarkers')
797 if pushop.outobsmarkers:
797 if pushop.outobsmarkers:
798 pushop.ui.debug('try to push obsolete markers to remote\n')
798 pushop.ui.debug('try to push obsolete markers to remote\n')
799 rslts = []
799 rslts = []
800 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
800 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
801 for key in sorted(remotedata, reverse=True):
801 for key in sorted(remotedata, reverse=True):
802 # reverse sort to ensure we end with dump0
802 # reverse sort to ensure we end with dump0
803 data = remotedata[key]
803 data = remotedata[key]
804 rslts.append(remote.pushkey('obsolete', key, '', data))
804 rslts.append(remote.pushkey('obsolete', key, '', data))
805 if [r for r in rslts if not r]:
805 if [r for r in rslts if not r]:
806 msg = _('failed to push some obsolete markers!\n')
806 msg = _('failed to push some obsolete markers!\n')
807 repo.ui.warn(msg)
807 repo.ui.warn(msg)
808
808
809 def _pushbookmark(pushop):
809 def _pushbookmark(pushop):
810 """Update bookmark position on remote"""
810 """Update bookmark position on remote"""
811 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
811 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
812 return
812 return
813 pushop.stepsdone.add('bookmarks')
813 pushop.stepsdone.add('bookmarks')
814 ui = pushop.ui
814 ui = pushop.ui
815 remote = pushop.remote
815 remote = pushop.remote
816
816
817 for b, old, new in pushop.outbookmarks:
817 for b, old, new in pushop.outbookmarks:
818 action = 'update'
818 action = 'update'
819 if not old:
819 if not old:
820 action = 'export'
820 action = 'export'
821 elif not new:
821 elif not new:
822 action = 'delete'
822 action = 'delete'
823 if remote.pushkey('bookmarks', b, old, new):
823 if remote.pushkey('bookmarks', b, old, new):
824 ui.status(bookmsgmap[action][0] % b)
824 ui.status(bookmsgmap[action][0] % b)
825 else:
825 else:
826 ui.warn(bookmsgmap[action][1] % b)
826 ui.warn(bookmsgmap[action][1] % b)
827 # discovery can have set the value form invalid entry
827 # discovery can have set the value form invalid entry
828 if pushop.bkresult is not None:
828 if pushop.bkresult is not None:
829 pushop.bkresult = 1
829 pushop.bkresult = 1
830
830
831 class pulloperation(object):
831 class pulloperation(object):
832 """A object that represent a single pull operation
832 """A object that represent a single pull operation
833
833
834 It purpose is to carry pull related state and very common operation.
834 It purpose is to carry pull related state and very common operation.
835
835
836 A new should be created at the beginning of each pull and discarded
836 A new should be created at the beginning of each pull and discarded
837 afterward.
837 afterward.
838 """
838 """
839
839
840 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
840 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
841 remotebookmarks=None):
841 remotebookmarks=None):
842 # repo we pull into
842 # repo we pull into
843 self.repo = repo
843 self.repo = repo
844 # repo we pull from
844 # repo we pull from
845 self.remote = remote
845 self.remote = remote
846 # revision we try to pull (None is "all")
846 # revision we try to pull (None is "all")
847 self.heads = heads
847 self.heads = heads
848 # bookmark pulled explicitly
848 # bookmark pulled explicitly
849 self.explicitbookmarks = bookmarks
849 self.explicitbookmarks = bookmarks
850 # do we force pull?
850 # do we force pull?
851 self.force = force
851 self.force = force
852 # transaction manager
852 # transaction manager
853 self.trmanager = None
853 self.trmanager = None
854 # set of common changeset between local and remote before pull
854 # set of common changeset between local and remote before pull
855 self.common = None
855 self.common = None
856 # set of pulled head
856 # set of pulled head
857 self.rheads = None
857 self.rheads = None
858 # list of missing changeset to fetch remotely
858 # list of missing changeset to fetch remotely
859 self.fetch = None
859 self.fetch = None
860 # remote bookmarks data
860 # remote bookmarks data
861 self.remotebookmarks = remotebookmarks
861 self.remotebookmarks = remotebookmarks
862 # result of changegroup pulling (used as return code by pull)
862 # result of changegroup pulling (used as return code by pull)
863 self.cgresult = None
863 self.cgresult = None
864 # list of step already done
864 # list of step already done
865 self.stepsdone = set()
865 self.stepsdone = set()
866
866
867 @util.propertycache
867 @util.propertycache
868 def pulledsubset(self):
868 def pulledsubset(self):
869 """heads of the set of changeset target by the pull"""
869 """heads of the set of changeset target by the pull"""
870 # compute target subset
870 # compute target subset
871 if self.heads is None:
871 if self.heads is None:
872 # We pulled every thing possible
872 # We pulled every thing possible
873 # sync on everything common
873 # sync on everything common
874 c = set(self.common)
874 c = set(self.common)
875 ret = list(self.common)
875 ret = list(self.common)
876 for n in self.rheads:
876 for n in self.rheads:
877 if n not in c:
877 if n not in c:
878 ret.append(n)
878 ret.append(n)
879 return ret
879 return ret
880 else:
880 else:
881 # We pulled a specific subset
881 # We pulled a specific subset
882 # sync on this subset
882 # sync on this subset
883 return self.heads
883 return self.heads
884
884
885 def gettransaction(self):
885 def gettransaction(self):
886 # deprecated; talk to trmanager directly
886 # deprecated; talk to trmanager directly
887 return self.trmanager.transaction()
887 return self.trmanager.transaction()
888
888
889 class transactionmanager(object):
889 class transactionmanager(object):
890 """An object to manage the life cycle of a transaction
890 """An object to manage the life cycle of a transaction
891
891
892 It creates the transaction on demand and calls the appropriate hooks when
892 It creates the transaction on demand and calls the appropriate hooks when
893 closing the transaction."""
893 closing the transaction."""
894 def __init__(self, repo, source, url):
894 def __init__(self, repo, source, url):
895 self.repo = repo
895 self.repo = repo
896 self.source = source
896 self.source = source
897 self.url = url
897 self.url = url
898 self._tr = None
898 self._tr = None
899
899
900 def transaction(self):
900 def transaction(self):
901 """Return an open transaction object, constructing if necessary"""
901 """Return an open transaction object, constructing if necessary"""
902 if not self._tr:
902 if not self._tr:
903 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
903 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
904 self._tr = self.repo.transaction(trname)
904 self._tr = self.repo.transaction(trname)
905 self._tr.hookargs['source'] = self.source
905 self._tr.hookargs['source'] = self.source
906 self._tr.hookargs['url'] = self.url
906 self._tr.hookargs['url'] = self.url
907 return self._tr
907 return self._tr
908
908
909 def close(self):
909 def close(self):
910 """close transaction if created"""
910 """close transaction if created"""
911 if self._tr is not None:
911 if self._tr is not None:
912 self._tr.close()
912 self._tr.close()
913
913
914 def release(self):
914 def release(self):
915 """release transaction if created"""
915 """release transaction if created"""
916 if self._tr is not None:
916 if self._tr is not None:
917 self._tr.release()
917 self._tr.release()
918
918
919 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None):
919 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None):
920 if opargs is None:
920 if opargs is None:
921 opargs = {}
921 opargs = {}
922 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
922 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
923 **opargs)
923 **opargs)
924 if pullop.remote.local():
924 if pullop.remote.local():
925 missing = set(pullop.remote.requirements) - pullop.repo.supported
925 missing = set(pullop.remote.requirements) - pullop.repo.supported
926 if missing:
926 if missing:
927 msg = _("required features are not"
927 msg = _("required features are not"
928 " supported in the destination:"
928 " supported in the destination:"
929 " %s") % (', '.join(sorted(missing)))
929 " %s") % (', '.join(sorted(missing)))
930 raise util.Abort(msg)
930 raise util.Abort(msg)
931
931
932 lock = pullop.repo.lock()
932 lock = pullop.repo.lock()
933 try:
933 try:
934 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
934 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
935 _pulldiscovery(pullop)
935 _pulldiscovery(pullop)
936 if _canusebundle2(pullop):
936 if _canusebundle2(pullop):
937 _pullbundle2(pullop)
937 _pullbundle2(pullop)
938 _pullchangeset(pullop)
938 _pullchangeset(pullop)
939 _pullphase(pullop)
939 _pullphase(pullop)
940 _pullbookmarks(pullop)
940 _pullbookmarks(pullop)
941 _pullobsolete(pullop)
941 _pullobsolete(pullop)
942 pullop.trmanager.close()
942 pullop.trmanager.close()
943 finally:
943 finally:
944 pullop.trmanager.release()
944 pullop.trmanager.release()
945 lock.release()
945 lock.release()
946
946
947 return pullop
947 return pullop
948
948
949 # list of steps to perform discovery before pull
949 # list of steps to perform discovery before pull
950 pulldiscoveryorder = []
950 pulldiscoveryorder = []
951
951
952 # Mapping between step name and function
952 # Mapping between step name and function
953 #
953 #
954 # This exists to help extensions wrap steps if necessary
954 # This exists to help extensions wrap steps if necessary
955 pulldiscoverymapping = {}
955 pulldiscoverymapping = {}
956
956
957 def pulldiscovery(stepname):
957 def pulldiscovery(stepname):
958 """decorator for function performing discovery before pull
958 """decorator for function performing discovery before pull
959
959
960 The function is added to the step -> function mapping and appended to the
960 The function is added to the step -> function mapping and appended to the
961 list of steps. Beware that decorated function will be added in order (this
961 list of steps. Beware that decorated function will be added in order (this
962 may matter).
962 may matter).
963
963
964 You can only use this decorator for a new step, if you want to wrap a step
964 You can only use this decorator for a new step, if you want to wrap a step
965 from an extension, change the pulldiscovery dictionary directly."""
965 from an extension, change the pulldiscovery dictionary directly."""
966 def dec(func):
966 def dec(func):
967 assert stepname not in pulldiscoverymapping
967 assert stepname not in pulldiscoverymapping
968 pulldiscoverymapping[stepname] = func
968 pulldiscoverymapping[stepname] = func
969 pulldiscoveryorder.append(stepname)
969 pulldiscoveryorder.append(stepname)
970 return func
970 return func
971 return dec
971 return dec
972
972
973 def _pulldiscovery(pullop):
973 def _pulldiscovery(pullop):
974 """Run all discovery steps"""
974 """Run all discovery steps"""
975 for stepname in pulldiscoveryorder:
975 for stepname in pulldiscoveryorder:
976 step = pulldiscoverymapping[stepname]
976 step = pulldiscoverymapping[stepname]
977 step(pullop)
977 step(pullop)
978
978
979 @pulldiscovery('b1:bookmarks')
979 @pulldiscovery('b1:bookmarks')
980 def _pullbookmarkbundle1(pullop):
980 def _pullbookmarkbundle1(pullop):
981 """fetch bookmark data in bundle1 case
981 """fetch bookmark data in bundle1 case
982
982
983 If not using bundle2, we have to fetch bookmarks before changeset
983 If not using bundle2, we have to fetch bookmarks before changeset
984 discovery to reduce the chance and impact of race conditions."""
984 discovery to reduce the chance and impact of race conditions."""
985 if pullop.remotebookmarks is not None:
985 if pullop.remotebookmarks is not None:
986 return
986 return
987 if (_canusebundle2(pullop)
987 if (_canusebundle2(pullop)
988 and 'listkeys' in bundle2.bundle2caps(pullop.remote)):
988 and 'listkeys' in bundle2.bundle2caps(pullop.remote)):
989 # all known bundle2 servers now support listkeys, but lets be nice with
989 # all known bundle2 servers now support listkeys, but lets be nice with
990 # new implementation.
990 # new implementation.
991 return
991 return
992 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
992 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
993
993
994
994
995 @pulldiscovery('changegroup')
995 @pulldiscovery('changegroup')
996 def _pulldiscoverychangegroup(pullop):
996 def _pulldiscoverychangegroup(pullop):
997 """discovery phase for the pull
997 """discovery phase for the pull
998
998
999 Current handle changeset discovery only, will change handle all discovery
999 Current handle changeset discovery only, will change handle all discovery
1000 at some point."""
1000 at some point."""
1001 tmp = discovery.findcommonincoming(pullop.repo,
1001 tmp = discovery.findcommonincoming(pullop.repo,
1002 pullop.remote,
1002 pullop.remote,
1003 heads=pullop.heads,
1003 heads=pullop.heads,
1004 force=pullop.force)
1004 force=pullop.force)
1005 common, fetch, rheads = tmp
1005 common, fetch, rheads = tmp
1006 nm = pullop.repo.unfiltered().changelog.nodemap
1006 nm = pullop.repo.unfiltered().changelog.nodemap
1007 if fetch and rheads:
1007 if fetch and rheads:
1008 # If a remote heads in filtered locally, lets drop it from the unknown
1008 # If a remote heads in filtered locally, lets drop it from the unknown
1009 # remote heads and put in back in common.
1009 # remote heads and put in back in common.
1010 #
1010 #
1011 # This is a hackish solution to catch most of "common but locally
1011 # This is a hackish solution to catch most of "common but locally
1012 # hidden situation". We do not performs discovery on unfiltered
1012 # hidden situation". We do not performs discovery on unfiltered
1013 # repository because it end up doing a pathological amount of round
1013 # repository because it end up doing a pathological amount of round
1014 # trip for w huge amount of changeset we do not care about.
1014 # trip for w huge amount of changeset we do not care about.
1015 #
1015 #
1016 # If a set of such "common but filtered" changeset exist on the server
1016 # If a set of such "common but filtered" changeset exist on the server
1017 # but are not including a remote heads, we'll not be able to detect it,
1017 # but are not including a remote heads, we'll not be able to detect it,
1018 scommon = set(common)
1018 scommon = set(common)
1019 filteredrheads = []
1019 filteredrheads = []
1020 for n in rheads:
1020 for n in rheads:
1021 if n in nm:
1021 if n in nm:
1022 if n not in scommon:
1022 if n not in scommon:
1023 common.append(n)
1023 common.append(n)
1024 else:
1024 else:
1025 filteredrheads.append(n)
1025 filteredrheads.append(n)
1026 if not filteredrheads:
1026 if not filteredrheads:
1027 fetch = []
1027 fetch = []
1028 rheads = filteredrheads
1028 rheads = filteredrheads
1029 pullop.common = common
1029 pullop.common = common
1030 pullop.fetch = fetch
1030 pullop.fetch = fetch
1031 pullop.rheads = rheads
1031 pullop.rheads = rheads
1032
1032
1033 def _pullbundle2(pullop):
1033 def _pullbundle2(pullop):
1034 """pull data using bundle2
1034 """pull data using bundle2
1035
1035
1036 For now, the only supported data are changegroup."""
1036 For now, the only supported data are changegroup."""
1037 remotecaps = bundle2.bundle2caps(pullop.remote)
1037 remotecaps = bundle2.bundle2caps(pullop.remote)
1038 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1038 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1039 # pulling changegroup
1039 # pulling changegroup
1040 pullop.stepsdone.add('changegroup')
1040 pullop.stepsdone.add('changegroup')
1041
1041
1042 kwargs['common'] = pullop.common
1042 kwargs['common'] = pullop.common
1043 kwargs['heads'] = pullop.heads or pullop.rheads
1043 kwargs['heads'] = pullop.heads or pullop.rheads
1044 kwargs['cg'] = pullop.fetch
1044 kwargs['cg'] = pullop.fetch
1045 if 'listkeys' in remotecaps:
1045 if 'listkeys' in remotecaps:
1046 kwargs['listkeys'] = ['phase']
1046 kwargs['listkeys'] = ['phase']
1047 if pullop.remotebookmarks is None:
1047 if pullop.remotebookmarks is None:
1048 # make sure to always includes bookmark data when migrating
1048 # make sure to always includes bookmark data when migrating
1049 # `hg incoming --bundle` to using this function.
1049 # `hg incoming --bundle` to using this function.
1050 kwargs['listkeys'].append('bookmarks')
1050 kwargs['listkeys'].append('bookmarks')
1051 if not pullop.fetch:
1051 if not pullop.fetch:
1052 pullop.repo.ui.status(_("no changes found\n"))
1052 pullop.repo.ui.status(_("no changes found\n"))
1053 pullop.cgresult = 0
1053 pullop.cgresult = 0
1054 else:
1054 else:
1055 if pullop.heads is None and list(pullop.common) == [nullid]:
1055 if pullop.heads is None and list(pullop.common) == [nullid]:
1056 pullop.repo.ui.status(_("requesting all changes\n"))
1056 pullop.repo.ui.status(_("requesting all changes\n"))
1057 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1057 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1058 remoteversions = bundle2.obsmarkersversion(remotecaps)
1058 remoteversions = bundle2.obsmarkersversion(remotecaps)
1059 if obsolete.commonversion(remoteversions) is not None:
1059 if obsolete.commonversion(remoteversions) is not None:
1060 kwargs['obsmarkers'] = True
1060 kwargs['obsmarkers'] = True
1061 pullop.stepsdone.add('obsmarkers')
1061 pullop.stepsdone.add('obsmarkers')
1062 _pullbundle2extraprepare(pullop, kwargs)
1062 _pullbundle2extraprepare(pullop, kwargs)
1063 bundle = pullop.remote.getbundle('pull', **kwargs)
1063 bundle = pullop.remote.getbundle('pull', **kwargs)
1064 try:
1064 try:
1065 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1065 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1066 except error.BundleValueError as exc:
1066 except error.BundleValueError as exc:
1067 raise util.Abort('missing support for %s' % exc)
1067 raise util.Abort('missing support for %s' % exc)
1068
1068
1069 if pullop.fetch:
1069 if pullop.fetch:
1070 results = [cg['return'] for cg in op.records['changegroup']]
1070 results = [cg['return'] for cg in op.records['changegroup']]
1071 pullop.cgresult = changegroup.combineresults(results)
1071 pullop.cgresult = changegroup.combineresults(results)
1072
1072
1073 # processing phases change
1073 # processing phases change
1074 for namespace, value in op.records['listkeys']:
1074 for namespace, value in op.records['listkeys']:
1075 if namespace == 'phases':
1075 if namespace == 'phases':
1076 _pullapplyphases(pullop, value)
1076 _pullapplyphases(pullop, value)
1077
1077
1078 # processing bookmark update
1078 # processing bookmark update
1079 for namespace, value in op.records['listkeys']:
1079 for namespace, value in op.records['listkeys']:
1080 if namespace == 'bookmarks':
1080 if namespace == 'bookmarks':
1081 pullop.remotebookmarks = value
1081 pullop.remotebookmarks = value
1082
1082
1083 # bookmark data were either already there or pulled in the bundle
1083 # bookmark data were either already there or pulled in the bundle
1084 if pullop.remotebookmarks is not None:
1084 if pullop.remotebookmarks is not None:
1085 _pullbookmarks(pullop)
1085 _pullbookmarks(pullop)
1086
1086
1087 def _pullbundle2extraprepare(pullop, kwargs):
1087 def _pullbundle2extraprepare(pullop, kwargs):
1088 """hook function so that extensions can extend the getbundle call"""
1088 """hook function so that extensions can extend the getbundle call"""
1089 pass
1089 pass
1090
1090
1091 def _pullchangeset(pullop):
1091 def _pullchangeset(pullop):
1092 """pull changeset from unbundle into the local repo"""
1092 """pull changeset from unbundle into the local repo"""
1093 # We delay the open of the transaction as late as possible so we
1093 # We delay the open of the transaction as late as possible so we
1094 # don't open transaction for nothing or you break future useful
1094 # don't open transaction for nothing or you break future useful
1095 # rollback call
1095 # rollback call
1096 if 'changegroup' in pullop.stepsdone:
1096 if 'changegroup' in pullop.stepsdone:
1097 return
1097 return
1098 pullop.stepsdone.add('changegroup')
1098 pullop.stepsdone.add('changegroup')
1099 if not pullop.fetch:
1099 if not pullop.fetch:
1100 pullop.repo.ui.status(_("no changes found\n"))
1100 pullop.repo.ui.status(_("no changes found\n"))
1101 pullop.cgresult = 0
1101 pullop.cgresult = 0
1102 return
1102 return
1103 pullop.gettransaction()
1103 pullop.gettransaction()
1104 if pullop.heads is None and list(pullop.common) == [nullid]:
1104 if pullop.heads is None and list(pullop.common) == [nullid]:
1105 pullop.repo.ui.status(_("requesting all changes\n"))
1105 pullop.repo.ui.status(_("requesting all changes\n"))
1106 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1106 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1107 # issue1320, avoid a race if remote changed after discovery
1107 # issue1320, avoid a race if remote changed after discovery
1108 pullop.heads = pullop.rheads
1108 pullop.heads = pullop.rheads
1109
1109
1110 if pullop.remote.capable('getbundle'):
1110 if pullop.remote.capable('getbundle'):
1111 # TODO: get bundlecaps from remote
1111 # TODO: get bundlecaps from remote
1112 cg = pullop.remote.getbundle('pull', common=pullop.common,
1112 cg = pullop.remote.getbundle('pull', common=pullop.common,
1113 heads=pullop.heads or pullop.rheads)
1113 heads=pullop.heads or pullop.rheads)
1114 elif pullop.heads is None:
1114 elif pullop.heads is None:
1115 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1115 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1116 elif not pullop.remote.capable('changegroupsubset'):
1116 elif not pullop.remote.capable('changegroupsubset'):
1117 raise util.Abort(_("partial pull cannot be done because "
1117 raise util.Abort(_("partial pull cannot be done because "
1118 "other repository doesn't support "
1118 "other repository doesn't support "
1119 "changegroupsubset."))
1119 "changegroupsubset."))
1120 else:
1120 else:
1121 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1121 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1122 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1122 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1123 pullop.remote.url())
1123 pullop.remote.url())
1124
1124
1125 def _pullphase(pullop):
1125 def _pullphase(pullop):
1126 # Get remote phases data from remote
1126 # Get remote phases data from remote
1127 if 'phases' in pullop.stepsdone:
1127 if 'phases' in pullop.stepsdone:
1128 return
1128 return
1129 remotephases = pullop.remote.listkeys('phases')
1129 remotephases = pullop.remote.listkeys('phases')
1130 _pullapplyphases(pullop, remotephases)
1130 _pullapplyphases(pullop, remotephases)
1131
1131
1132 def _pullapplyphases(pullop, remotephases):
1132 def _pullapplyphases(pullop, remotephases):
1133 """apply phase movement from observed remote state"""
1133 """apply phase movement from observed remote state"""
1134 if 'phases' in pullop.stepsdone:
1134 if 'phases' in pullop.stepsdone:
1135 return
1135 return
1136 pullop.stepsdone.add('phases')
1136 pullop.stepsdone.add('phases')
1137 publishing = bool(remotephases.get('publishing', False))
1137 publishing = bool(remotephases.get('publishing', False))
1138 if remotephases and not publishing:
1138 if remotephases and not publishing:
1139 # remote is new and unpublishing
1139 # remote is new and unpublishing
1140 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1140 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1141 pullop.pulledsubset,
1141 pullop.pulledsubset,
1142 remotephases)
1142 remotephases)
1143 dheads = pullop.pulledsubset
1143 dheads = pullop.pulledsubset
1144 else:
1144 else:
1145 # Remote is old or publishing all common changesets
1145 # Remote is old or publishing all common changesets
1146 # should be seen as public
1146 # should be seen as public
1147 pheads = pullop.pulledsubset
1147 pheads = pullop.pulledsubset
1148 dheads = []
1148 dheads = []
1149 unfi = pullop.repo.unfiltered()
1149 unfi = pullop.repo.unfiltered()
1150 phase = unfi._phasecache.phase
1150 phase = unfi._phasecache.phase
1151 rev = unfi.changelog.nodemap.get
1151 rev = unfi.changelog.nodemap.get
1152 public = phases.public
1152 public = phases.public
1153 draft = phases.draft
1153 draft = phases.draft
1154
1154
1155 # exclude changesets already public locally and update the others
1155 # exclude changesets already public locally and update the others
1156 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1156 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1157 if pheads:
1157 if pheads:
1158 tr = pullop.gettransaction()
1158 tr = pullop.gettransaction()
1159 phases.advanceboundary(pullop.repo, tr, public, pheads)
1159 phases.advanceboundary(pullop.repo, tr, public, pheads)
1160
1160
1161 # exclude changesets already draft locally and update the others
1161 # exclude changesets already draft locally and update the others
1162 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1162 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1163 if dheads:
1163 if dheads:
1164 tr = pullop.gettransaction()
1164 tr = pullop.gettransaction()
1165 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1165 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1166
1166
1167 def _pullbookmarks(pullop):
1167 def _pullbookmarks(pullop):
1168 """process the remote bookmark information to update the local one"""
1168 """process the remote bookmark information to update the local one"""
1169 if 'bookmarks' in pullop.stepsdone:
1169 if 'bookmarks' in pullop.stepsdone:
1170 return
1170 return
1171 pullop.stepsdone.add('bookmarks')
1171 pullop.stepsdone.add('bookmarks')
1172 repo = pullop.repo
1172 repo = pullop.repo
1173 remotebookmarks = pullop.remotebookmarks
1173 remotebookmarks = pullop.remotebookmarks
1174 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1174 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1175 pullop.remote.url(),
1175 pullop.remote.url(),
1176 pullop.gettransaction,
1176 pullop.gettransaction,
1177 explicit=pullop.explicitbookmarks)
1177 explicit=pullop.explicitbookmarks)
1178
1178
1179 def _pullobsolete(pullop):
1179 def _pullobsolete(pullop):
1180 """utility function to pull obsolete markers from a remote
1180 """utility function to pull obsolete markers from a remote
1181
1181
1182 The `gettransaction` is function that return the pull transaction, creating
1182 The `gettransaction` is function that return the pull transaction, creating
1183 one if necessary. We return the transaction to inform the calling code that
1183 one if necessary. We return the transaction to inform the calling code that
1184 a new transaction have been created (when applicable).
1184 a new transaction have been created (when applicable).
1185
1185
1186 Exists mostly to allow overriding for experimentation purpose"""
1186 Exists mostly to allow overriding for experimentation purpose"""
1187 if 'obsmarkers' in pullop.stepsdone:
1187 if 'obsmarkers' in pullop.stepsdone:
1188 return
1188 return
1189 pullop.stepsdone.add('obsmarkers')
1189 pullop.stepsdone.add('obsmarkers')
1190 tr = None
1190 tr = None
1191 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1191 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1192 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1192 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1193 remoteobs = pullop.remote.listkeys('obsolete')
1193 remoteobs = pullop.remote.listkeys('obsolete')
1194 if 'dump0' in remoteobs:
1194 if 'dump0' in remoteobs:
1195 tr = pullop.gettransaction()
1195 tr = pullop.gettransaction()
1196 for key in sorted(remoteobs, reverse=True):
1196 for key in sorted(remoteobs, reverse=True):
1197 if key.startswith('dump'):
1197 if key.startswith('dump'):
1198 data = base85.b85decode(remoteobs[key])
1198 data = base85.b85decode(remoteobs[key])
1199 pullop.repo.obsstore.mergemarkers(tr, data)
1199 pullop.repo.obsstore.mergemarkers(tr, data)
1200 pullop.repo.invalidatevolatilesets()
1200 pullop.repo.invalidatevolatilesets()
1201 return tr
1201 return tr
1202
1202
1203 def caps20to10(repo):
1203 def caps20to10(repo):
1204 """return a set with appropriate options to use bundle20 during getbundle"""
1204 """return a set with appropriate options to use bundle20 during getbundle"""
1205 caps = set(['HG20'])
1205 caps = set(['HG20'])
1206 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1206 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1207 caps.add('bundle2=' + urllib.quote(capsblob))
1207 caps.add('bundle2=' + urllib.quote(capsblob))
1208 return caps
1208 return caps
1209
1209
1210 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1210 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1211 getbundle2partsorder = []
1211 getbundle2partsorder = []
1212
1212
1213 # Mapping between step name and function
1213 # Mapping between step name and function
1214 #
1214 #
1215 # This exists to help extensions wrap steps if necessary
1215 # This exists to help extensions wrap steps if necessary
1216 getbundle2partsmapping = {}
1216 getbundle2partsmapping = {}
1217
1217
1218 def getbundle2partsgenerator(stepname, idx=None):
1218 def getbundle2partsgenerator(stepname, idx=None):
1219 """decorator for function generating bundle2 part for getbundle
1219 """decorator for function generating bundle2 part for getbundle
1220
1220
1221 The function is added to the step -> function mapping and appended to the
1221 The function is added to the step -> function mapping and appended to the
1222 list of steps. Beware that decorated functions will be added in order
1222 list of steps. Beware that decorated functions will be added in order
1223 (this may matter).
1223 (this may matter).
1224
1224
1225 You can only use this decorator for new steps, if you want to wrap a step
1225 You can only use this decorator for new steps, if you want to wrap a step
1226 from an extension, attack the getbundle2partsmapping dictionary directly."""
1226 from an extension, attack the getbundle2partsmapping dictionary directly."""
1227 def dec(func):
1227 def dec(func):
1228 assert stepname not in getbundle2partsmapping
1228 assert stepname not in getbundle2partsmapping
1229 getbundle2partsmapping[stepname] = func
1229 getbundle2partsmapping[stepname] = func
1230 if idx is None:
1230 if idx is None:
1231 getbundle2partsorder.append(stepname)
1231 getbundle2partsorder.append(stepname)
1232 else:
1232 else:
1233 getbundle2partsorder.insert(idx, stepname)
1233 getbundle2partsorder.insert(idx, stepname)
1234 return func
1234 return func
1235 return dec
1235 return dec
1236
1236
1237 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1237 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1238 **kwargs):
1238 **kwargs):
1239 """return a full bundle (with potentially multiple kind of parts)
1239 """return a full bundle (with potentially multiple kind of parts)
1240
1240
1241 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1241 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1242 passed. For now, the bundle can contain only changegroup, but this will
1242 passed. For now, the bundle can contain only changegroup, but this will
1243 changes when more part type will be available for bundle2.
1243 changes when more part type will be available for bundle2.
1244
1244
1245 This is different from changegroup.getchangegroup that only returns an HG10
1245 This is different from changegroup.getchangegroup that only returns an HG10
1246 changegroup bundle. They may eventually get reunited in the future when we
1246 changegroup bundle. They may eventually get reunited in the future when we
1247 have a clearer idea of the API we what to query different data.
1247 have a clearer idea of the API we what to query different data.
1248
1248
1249 The implementation is at a very early stage and will get massive rework
1249 The implementation is at a very early stage and will get massive rework
1250 when the API of bundle is refined.
1250 when the API of bundle is refined.
1251 """
1251 """
1252 # bundle10 case
1252 # bundle10 case
1253 usebundle2 = False
1253 usebundle2 = False
1254 if bundlecaps is not None:
1254 if bundlecaps is not None:
1255 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1255 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1256 if not usebundle2:
1256 if not usebundle2:
1257 if bundlecaps and not kwargs.get('cg', True):
1257 if bundlecaps and not kwargs.get('cg', True):
1258 raise ValueError(_('request for bundle10 must include changegroup'))
1258 raise ValueError(_('request for bundle10 must include changegroup'))
1259
1259
1260 if kwargs:
1260 if kwargs:
1261 raise ValueError(_('unsupported getbundle arguments: %s')
1261 raise ValueError(_('unsupported getbundle arguments: %s')
1262 % ', '.join(sorted(kwargs.keys())))
1262 % ', '.join(sorted(kwargs.keys())))
1263 return changegroup.getchangegroup(repo, source, heads=heads,
1263 return changegroup.getchangegroup(repo, source, heads=heads,
1264 common=common, bundlecaps=bundlecaps)
1264 common=common, bundlecaps=bundlecaps)
1265
1265
1266 # bundle20 case
1266 # bundle20 case
1267 b2caps = {}
1267 b2caps = {}
1268 for bcaps in bundlecaps:
1268 for bcaps in bundlecaps:
1269 if bcaps.startswith('bundle2='):
1269 if bcaps.startswith('bundle2='):
1270 blob = urllib.unquote(bcaps[len('bundle2='):])
1270 blob = urllib.unquote(bcaps[len('bundle2='):])
1271 b2caps.update(bundle2.decodecaps(blob))
1271 b2caps.update(bundle2.decodecaps(blob))
1272 bundler = bundle2.bundle20(repo.ui, b2caps)
1272 bundler = bundle2.bundle20(repo.ui, b2caps)
1273
1273
1274 kwargs['heads'] = heads
1274 kwargs['heads'] = heads
1275 kwargs['common'] = common
1275 kwargs['common'] = common
1276
1276
1277 for name in getbundle2partsorder:
1277 for name in getbundle2partsorder:
1278 func = getbundle2partsmapping[name]
1278 func = getbundle2partsmapping[name]
1279 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1279 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1280 **kwargs)
1280 **kwargs)
1281
1281
1282 return util.chunkbuffer(bundler.getchunks())
1282 return util.chunkbuffer(bundler.getchunks())
1283
1283
1284 @getbundle2partsgenerator('changegroup')
1284 @getbundle2partsgenerator('changegroup')
1285 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1285 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1286 b2caps=None, heads=None, common=None, **kwargs):
1286 b2caps=None, heads=None, common=None, **kwargs):
1287 """add a changegroup part to the requested bundle"""
1287 """add a changegroup part to the requested bundle"""
1288 cg = None
1288 cg = None
1289 if kwargs.get('cg', True):
1289 if kwargs.get('cg', True):
1290 # build changegroup bundle here.
1290 # build changegroup bundle here.
1291 version = None
1291 version = None
1292 cgversions = b2caps.get('changegroup')
1292 cgversions = b2caps.get('changegroup')
1293 getcgkwargs = {}
1293 getcgkwargs = {}
1294 if cgversions: # 3.1 and 3.2 ship with an empty value
1294 if cgversions: # 3.1 and 3.2 ship with an empty value
1295 cgversions = [v for v in cgversions if v in changegroup.packermap]
1295 cgversions = [v for v in cgversions if v in changegroup.packermap]
1296 if not cgversions:
1296 if not cgversions:
1297 raise ValueError(_('no common changegroup version'))
1297 raise ValueError(_('no common changegroup version'))
1298 version = getcgkwargs['version'] = max(cgversions)
1298 version = getcgkwargs['version'] = max(cgversions)
1299 outgoing = changegroup.computeoutgoing(repo, heads, common)
1299 outgoing = changegroup.computeoutgoing(repo, heads, common)
1300 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1300 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1301 bundlecaps=bundlecaps,
1301 bundlecaps=bundlecaps,
1302 **getcgkwargs)
1302 **getcgkwargs)
1303
1303
1304 if cg:
1304 if cg:
1305 part = bundler.newpart('changegroup', data=cg)
1305 part = bundler.newpart('changegroup', data=cg)
1306 if version is not None:
1306 if version is not None:
1307 part.addparam('version', version)
1307 part.addparam('version', version)
1308 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1308 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1309
1309
1310 @getbundle2partsgenerator('listkeys')
1310 @getbundle2partsgenerator('listkeys')
1311 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1311 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1312 b2caps=None, **kwargs):
1312 b2caps=None, **kwargs):
1313 """add parts containing listkeys namespaces to the requested bundle"""
1313 """add parts containing listkeys namespaces to the requested bundle"""
1314 listkeys = kwargs.get('listkeys', ())
1314 listkeys = kwargs.get('listkeys', ())
1315 for namespace in listkeys:
1315 for namespace in listkeys:
1316 part = bundler.newpart('listkeys')
1316 part = bundler.newpart('listkeys')
1317 part.addparam('namespace', namespace)
1317 part.addparam('namespace', namespace)
1318 keys = repo.listkeys(namespace).items()
1318 keys = repo.listkeys(namespace).items()
1319 part.data = pushkey.encodekeys(keys)
1319 part.data = pushkey.encodekeys(keys)
1320
1320
1321 @getbundle2partsgenerator('obsmarkers')
1321 @getbundle2partsgenerator('obsmarkers')
1322 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1322 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1323 b2caps=None, heads=None, **kwargs):
1323 b2caps=None, heads=None, **kwargs):
1324 """add an obsolescence markers part to the requested bundle"""
1324 """add an obsolescence markers part to the requested bundle"""
1325 if kwargs.get('obsmarkers', False):
1325 if kwargs.get('obsmarkers', False):
1326 if heads is None:
1326 if heads is None:
1327 heads = repo.heads()
1327 heads = repo.heads()
1328 subset = [c.node() for c in repo.set('::%ln', heads)]
1328 subset = [c.node() for c in repo.set('::%ln', heads)]
1329 markers = repo.obsstore.relevantmarkers(subset)
1329 markers = repo.obsstore.relevantmarkers(subset)
1330 markers = sorted(markers)
1330 markers = sorted(markers)
1331 buildobsmarkerspart(bundler, markers)
1331 buildobsmarkerspart(bundler, markers)
1332
1332
1333 @getbundle2partsgenerator('hgtagsfnodes')
1333 @getbundle2partsgenerator('hgtagsfnodes')
1334 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1334 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1335 b2caps=None, heads=None, common=None,
1335 b2caps=None, heads=None, common=None,
1336 **kwargs):
1336 **kwargs):
1337 """Transfer the .hgtags filenodes mapping.
1337 """Transfer the .hgtags filenodes mapping.
1338
1338
1339 Only values for heads in this bundle will be transferred.
1339 Only values for heads in this bundle will be transferred.
1340
1340
1341 The part data consists of pairs of 20 byte changeset node and .hgtags
1341 The part data consists of pairs of 20 byte changeset node and .hgtags
1342 filenodes raw values.
1342 filenodes raw values.
1343 """
1343 """
1344 # Don't send unless:
1344 # Don't send unless:
1345 # - changeset are being exchanged,
1345 # - changeset are being exchanged,
1346 # - the client supports it.
1346 # - the client supports it.
1347 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1347 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1348 return
1348 return
1349
1349
1350 outgoing = changegroup.computeoutgoing(repo, heads, common)
1350 outgoing = changegroup.computeoutgoing(repo, heads, common)
1351
1351
1352 if not outgoing.missingheads:
1352 if not outgoing.missingheads:
1353 return
1353 return
1354
1354
1355 cache = tags.hgtagsfnodescache(repo.unfiltered())
1355 cache = tags.hgtagsfnodescache(repo.unfiltered())
1356 chunks = []
1356 chunks = []
1357
1357
1358 # .hgtags fnodes are only relevant for head changesets. While we could
1358 # .hgtags fnodes are only relevant for head changesets. While we could
1359 # transfer values for all known nodes, there will likely be little to
1359 # transfer values for all known nodes, there will likely be little to
1360 # no benefit.
1360 # no benefit.
1361 #
1361 #
1362 # We don't bother using a generator to produce output data because
1362 # We don't bother using a generator to produce output data because
1363 # a) we only have 40 bytes per head and even esoteric numbers of heads
1363 # a) we only have 40 bytes per head and even esoteric numbers of heads
1364 # consume little memory (1M heads is 40MB) b) we don't want to send the
1364 # consume little memory (1M heads is 40MB) b) we don't want to send the
1365 # part if we don't have entries and knowing if we have entries requires
1365 # part if we don't have entries and knowing if we have entries requires
1366 # cache lookups.
1366 # cache lookups.
1367 for node in outgoing.missingheads:
1367 for node in outgoing.missingheads:
1368 # Don't compute missing, as this may slow down serving.
1368 # Don't compute missing, as this may slow down serving.
1369 fnode = cache.getfnode(node, computemissing=False)
1369 fnode = cache.getfnode(node, computemissing=False)
1370 if fnode is not None:
1370 if fnode is not None:
1371 chunks.extend([node, fnode])
1371 chunks.extend([node, fnode])
1372
1372
1373 if chunks:
1373 if chunks:
1374 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1374 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1375
1375
1376 def check_heads(repo, their_heads, context):
1376 def check_heads(repo, their_heads, context):
1377 """check if the heads of a repo have been modified
1377 """check if the heads of a repo have been modified
1378
1378
1379 Used by peer for unbundling.
1379 Used by peer for unbundling.
1380 """
1380 """
1381 heads = repo.heads()
1381 heads = repo.heads()
1382 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1382 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1383 if not (their_heads == ['force'] or their_heads == heads or
1383 if not (their_heads == ['force'] or their_heads == heads or
1384 their_heads == ['hashed', heads_hash]):
1384 their_heads == ['hashed', heads_hash]):
1385 # someone else committed/pushed/unbundled while we
1385 # someone else committed/pushed/unbundled while we
1386 # were transferring data
1386 # were transferring data
1387 raise error.PushRaced('repository changed while %s - '
1387 raise error.PushRaced('repository changed while %s - '
1388 'please try again' % context)
1388 'please try again' % context)
1389
1389
1390 def unbundle(repo, cg, heads, source, url):
1390 def unbundle(repo, cg, heads, source, url):
1391 """Apply a bundle to a repo.
1391 """Apply a bundle to a repo.
1392
1392
1393 this function makes sure the repo is locked during the application and have
1393 this function makes sure the repo is locked during the application and have
1394 mechanism to check that no push race occurred between the creation of the
1394 mechanism to check that no push race occurred between the creation of the
1395 bundle and its application.
1395 bundle and its application.
1396
1396
1397 If the push was raced as PushRaced exception is raised."""
1397 If the push was raced as PushRaced exception is raised."""
1398 r = 0
1398 r = 0
1399 # need a transaction when processing a bundle2 stream
1399 # need a transaction when processing a bundle2 stream
1400 wlock = lock = tr = None
1400 wlock = lock = tr = None
1401 recordout = None
1401 recordout = None
1402 # quick fix for output mismatch with bundle2 in 3.4
1402 # quick fix for output mismatch with bundle2 in 3.4
1403 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1403 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1404 False)
1404 False)
1405 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1405 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1406 captureoutput = True
1406 captureoutput = True
1407 try:
1407 try:
1408 check_heads(repo, heads, 'uploading changes')
1408 check_heads(repo, heads, 'uploading changes')
1409 # push can proceed
1409 # push can proceed
1410 if util.safehasattr(cg, 'params'):
1410 if util.safehasattr(cg, 'params'):
1411 r = None
1411 r = None
1412 try:
1412 try:
1413 wlock = repo.wlock()
1413 wlock = repo.wlock()
1414 lock = repo.lock()
1414 lock = repo.lock()
1415 tr = repo.transaction(source)
1415 tr = repo.transaction(source)
1416 tr.hookargs['source'] = source
1416 tr.hookargs['source'] = source
1417 tr.hookargs['url'] = url
1417 tr.hookargs['url'] = url
1418 tr.hookargs['bundle2'] = '1'
1418 tr.hookargs['bundle2'] = '1'
1419 op = bundle2.bundleoperation(repo, lambda: tr,
1419 op = bundle2.bundleoperation(repo, lambda: tr,
1420 captureoutput=captureoutput)
1420 captureoutput=captureoutput)
1421 try:
1421 try:
1422 op = bundle2.processbundle(repo, cg, op=op)
1422 op = bundle2.processbundle(repo, cg, op=op)
1423 finally:
1423 finally:
1424 r = op.reply
1424 r = op.reply
1425 if captureoutput and r is not None:
1425 if captureoutput and r is not None:
1426 repo.ui.pushbuffer(error=True, subproc=True)
1426 repo.ui.pushbuffer(error=True, subproc=True)
1427 def recordout(output):
1427 def recordout(output):
1428 r.newpart('output', data=output, mandatory=False)
1428 r.newpart('output', data=output, mandatory=False)
1429 tr.close()
1429 tr.close()
1430 except BaseException as exc:
1430 except BaseException as exc:
1431 exc.duringunbundle2 = True
1431 exc.duringunbundle2 = True
1432 if captureoutput and r is not None:
1432 if captureoutput and r is not None:
1433 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1433 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1434 def recordout(output):
1434 def recordout(output):
1435 part = bundle2.bundlepart('output', data=output,
1435 part = bundle2.bundlepart('output', data=output,
1436 mandatory=False)
1436 mandatory=False)
1437 parts.append(part)
1437 parts.append(part)
1438 raise
1438 raise
1439 else:
1439 else:
1440 lock = repo.lock()
1440 lock = repo.lock()
1441 r = changegroup.addchangegroup(repo, cg, source, url)
1441 r = changegroup.addchangegroup(repo, cg, source, url)
1442 finally:
1442 finally:
1443 lockmod.release(tr, lock, wlock)
1443 lockmod.release(tr, lock, wlock)
1444 if recordout is not None:
1444 if recordout is not None:
1445 recordout(repo.ui.popbuffer())
1445 recordout(repo.ui.popbuffer())
1446 return r
1446 return r
1447
1447
1448 # This is it's own function so extensions can override it.
1448 # This is it's own function so extensions can override it.
1449 def _walkstreamfiles(repo):
1449 def _walkstreamfiles(repo):
1450 return repo.store.walk()
1450 return repo.store.walk()
1451
1451
1452 def generatestreamclone(repo):
1452 def generatestreamclone(repo):
1453 """Emit content for a streaming clone.
1453 """Emit content for a streaming clone.
1454
1454
1455 This is a generator of raw chunks that constitute a streaming clone.
1455 This is a generator of raw chunks that constitute a streaming clone.
1456
1456
1457 The stream begins with a line of 2 space-delimited integers containing the
1457 The stream begins with a line of 2 space-delimited integers containing the
1458 number of entries and total bytes size.
1458 number of entries and total bytes size.
1459
1459
1460 Next, are N entries for each file being transferred. Each file entry starts
1460 Next, are N entries for each file being transferred. Each file entry starts
1461 as a line with the file name and integer size delimited by a null byte.
1461 as a line with the file name and integer size delimited by a null byte.
1462 The raw file data follows. Following the raw file data is the next file
1462 The raw file data follows. Following the raw file data is the next file
1463 entry, or EOF.
1463 entry, or EOF.
1464
1464
1465 When used on the wire protocol, an additional line indicating protocol
1465 When used on the wire protocol, an additional line indicating protocol
1466 success will be prepended to the stream. This function is not responsible
1466 success will be prepended to the stream. This function is not responsible
1467 for adding it.
1467 for adding it.
1468
1468
1469 This function will obtain a repository lock to ensure a consistent view of
1469 This function will obtain a repository lock to ensure a consistent view of
1470 the store is captured. It therefore may raise LockError.
1470 the store is captured. It therefore may raise LockError.
1471 """
1471 """
1472 entries = []
1472 entries = []
1473 total_bytes = 0
1473 total_bytes = 0
1474 # Get consistent snapshot of repo, lock during scan.
1474 # Get consistent snapshot of repo, lock during scan.
1475 lock = repo.lock()
1475 lock = repo.lock()
1476 try:
1476 try:
1477 repo.ui.debug('scanning\n')
1477 repo.ui.debug('scanning\n')
1478 for name, ename, size in _walkstreamfiles(repo):
1478 for name, ename, size in _walkstreamfiles(repo):
1479 if size:
1479 if size:
1480 entries.append((name, size))
1480 entries.append((name, size))
1481 total_bytes += size
1481 total_bytes += size
1482 finally:
1482 finally:
1483 lock.release()
1483 lock.release()
1484
1484
1485 repo.ui.debug('%d files, %d bytes to transfer\n' %
1485 repo.ui.debug('%d files, %d bytes to transfer\n' %
1486 (len(entries), total_bytes))
1486 (len(entries), total_bytes))
1487 yield '%d %d\n' % (len(entries), total_bytes)
1487 yield '%d %d\n' % (len(entries), total_bytes)
1488
1488
1489 svfs = repo.svfs
1489 svfs = repo.svfs
1490 oldaudit = svfs.mustaudit
1490 oldaudit = svfs.mustaudit
1491 debugflag = repo.ui.debugflag
1491 debugflag = repo.ui.debugflag
1492 svfs.mustaudit = False
1492 svfs.mustaudit = False
1493
1493
1494 try:
1494 try:
1495 for name, size in entries:
1495 for name, size in entries:
1496 if debugflag:
1496 if debugflag:
1497 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
1497 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
1498 # partially encode name over the wire for backwards compat
1498 # partially encode name over the wire for backwards compat
1499 yield '%s\0%d\n' % (store.encodedir(name), size)
1499 yield '%s\0%d\n' % (store.encodedir(name), size)
1500 if size <= 65536:
1500 if size <= 65536:
1501 fp = svfs(name)
1501 fp = svfs(name)
1502 try:
1502 try:
1503 data = fp.read(size)
1503 data = fp.read(size)
1504 finally:
1504 finally:
1505 fp.close()
1505 fp.close()
1506 yield data
1506 yield data
1507 else:
1507 else:
1508 for chunk in util.filechunkiter(svfs(name), limit=size):
1508 for chunk in util.filechunkiter(svfs(name), limit=size):
1509 yield chunk
1509 yield chunk
1510 finally:
1510 finally:
1511 svfs.mustaudit = oldaudit
1511 svfs.mustaudit = oldaudit
1512
1512
1513 def consumestreamclone(repo, fp):
1513 def consumestreamclone(repo, fp):
1514 """Apply the contents from a streaming clone file.
1514 """Apply the contents from a streaming clone file.
1515
1515
1516 This takes the output from "streamout" and applies it to the specified
1516 This takes the output from "streamout" and applies it to the specified
1517 repository.
1517 repository.
1518
1518
1519 Like "streamout," the status line added by the wire protocol is not handled
1519 Like "streamout," the status line added by the wire protocol is not handled
1520 by this function.
1520 by this function.
1521 """
1521 """
1522 lock = repo.lock()
1522 lock = repo.lock()
1523 try:
1523 try:
1524 repo.ui.status(_('streaming all changes\n'))
1524 repo.ui.status(_('streaming all changes\n'))
1525 l = fp.readline()
1525 l = fp.readline()
1526 try:
1526 try:
1527 total_files, total_bytes = map(int, l.split(' ', 1))
1527 total_files, total_bytes = map(int, l.split(' ', 1))
1528 except (ValueError, TypeError):
1528 except (ValueError, TypeError):
1529 raise error.ResponseError(
1529 raise error.ResponseError(
1530 _('unexpected response from remote server:'), l)
1530 _('unexpected response from remote server:'), l)
1531 repo.ui.status(_('%d files to transfer, %s of data\n') %
1531 repo.ui.status(_('%d files to transfer, %s of data\n') %
1532 (total_files, util.bytecount(total_bytes)))
1532 (total_files, util.bytecount(total_bytes)))
1533 handled_bytes = 0
1533 handled_bytes = 0
1534 repo.ui.progress(_('clone'), 0, total=total_bytes)
1534 repo.ui.progress(_('clone'), 0, total=total_bytes)
1535 start = time.time()
1535 start = time.time()
1536
1536
1537 tr = repo.transaction(_('clone'))
1537 tr = repo.transaction(_('clone'))
1538 try:
1538 try:
1539 for i in xrange(total_files):
1539 for i in xrange(total_files):
1540 # XXX doesn't support '\n' or '\r' in filenames
1540 # XXX doesn't support '\n' or '\r' in filenames
1541 l = fp.readline()
1541 l = fp.readline()
1542 try:
1542 try:
1543 name, size = l.split('\0', 1)
1543 name, size = l.split('\0', 1)
1544 size = int(size)
1544 size = int(size)
1545 except (ValueError, TypeError):
1545 except (ValueError, TypeError):
1546 raise error.ResponseError(
1546 raise error.ResponseError(
1547 _('unexpected response from remote server:'), l)
1547 _('unexpected response from remote server:'), l)
1548 if repo.ui.debugflag:
1548 if repo.ui.debugflag:
1549 repo.ui.debug('adding %s (%s)\n' %
1549 repo.ui.debug('adding %s (%s)\n' %
1550 (name, util.bytecount(size)))
1550 (name, util.bytecount(size)))
1551 # for backwards compat, name was partially encoded
1551 # for backwards compat, name was partially encoded
1552 ofp = repo.svfs(store.decodedir(name), 'w')
1552 ofp = repo.svfs(store.decodedir(name), 'w')
1553 for chunk in util.filechunkiter(fp, limit=size):
1553 for chunk in util.filechunkiter(fp, limit=size):
1554 handled_bytes += len(chunk)
1554 handled_bytes += len(chunk)
1555 repo.ui.progress(_('clone'), handled_bytes,
1555 repo.ui.progress(_('clone'), handled_bytes,
1556 total=total_bytes)
1556 total=total_bytes)
1557 ofp.write(chunk)
1557 ofp.write(chunk)
1558 ofp.close()
1558 ofp.close()
1559 tr.close()
1559 tr.close()
1560 finally:
1560 finally:
1561 tr.release()
1561 tr.release()
1562
1562
1563 # Writing straight to files circumvented the inmemory caches
1563 # Writing straight to files circumvented the inmemory caches
1564 repo.invalidate()
1564 repo.invalidate()
1565
1565
1566 elapsed = time.time() - start
1566 elapsed = time.time() - start
1567 if elapsed <= 0:
1567 if elapsed <= 0:
1568 elapsed = 0.001
1568 elapsed = 0.001
1569 repo.ui.progress(_('clone'), None)
1569 repo.ui.progress(_('clone'), None)
1570 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1570 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1571 (util.bytecount(total_bytes), elapsed,
1571 (util.bytecount(total_bytes), elapsed,
1572 util.bytecount(total_bytes / elapsed)))
1572 util.bytecount(total_bytes / elapsed)))
1573 finally:
1573 finally:
1574 lock.release()
1574 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now