##// END OF EJS Templates
pull: only prefetch bookmarks when using bundle1...
Pierre-Yves David -
r25369:02defdb1 default
parent child Browse files
Show More
@@ -1,1477 +1,1486
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import time
8 import time
9 from i18n import _
9 from i18n import _
10 from node import hex, nullid
10 from node import hex, nullid
11 import errno, urllib
11 import errno, urllib
12 import util, scmutil, changegroup, base85, error, store
12 import util, scmutil, changegroup, base85, error, store
13 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
14 import lock as lockmod
14 import lock as lockmod
15
15
16 def readbundle(ui, fh, fname, vfs=None):
16 def readbundle(ui, fh, fname, vfs=None):
17 header = changegroup.readexactly(fh, 4)
17 header = changegroup.readexactly(fh, 4)
18
18
19 alg = None
19 alg = None
20 if not fname:
20 if not fname:
21 fname = "stream"
21 fname = "stream"
22 if not header.startswith('HG') and header.startswith('\0'):
22 if not header.startswith('HG') and header.startswith('\0'):
23 fh = changegroup.headerlessfixup(fh, header)
23 fh = changegroup.headerlessfixup(fh, header)
24 header = "HG10"
24 header = "HG10"
25 alg = 'UN'
25 alg = 'UN'
26 elif vfs:
26 elif vfs:
27 fname = vfs.join(fname)
27 fname = vfs.join(fname)
28
28
29 magic, version = header[0:2], header[2:4]
29 magic, version = header[0:2], header[2:4]
30
30
31 if magic != 'HG':
31 if magic != 'HG':
32 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
32 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
33 if version == '10':
33 if version == '10':
34 if alg is None:
34 if alg is None:
35 alg = changegroup.readexactly(fh, 2)
35 alg = changegroup.readexactly(fh, 2)
36 return changegroup.cg1unpacker(fh, alg)
36 return changegroup.cg1unpacker(fh, alg)
37 elif version.startswith('2'):
37 elif version.startswith('2'):
38 return bundle2.getunbundler(ui, fh, header=magic + version)
38 return bundle2.getunbundler(ui, fh, header=magic + version)
39 else:
39 else:
40 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
40 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
41
41
42 def buildobsmarkerspart(bundler, markers):
42 def buildobsmarkerspart(bundler, markers):
43 """add an obsmarker part to the bundler with <markers>
43 """add an obsmarker part to the bundler with <markers>
44
44
45 No part is created if markers is empty.
45 No part is created if markers is empty.
46 Raises ValueError if the bundler doesn't support any known obsmarker format.
46 Raises ValueError if the bundler doesn't support any known obsmarker format.
47 """
47 """
48 if markers:
48 if markers:
49 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
49 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
50 version = obsolete.commonversion(remoteversions)
50 version = obsolete.commonversion(remoteversions)
51 if version is None:
51 if version is None:
52 raise ValueError('bundler do not support common obsmarker format')
52 raise ValueError('bundler do not support common obsmarker format')
53 stream = obsolete.encodemarkers(markers, True, version=version)
53 stream = obsolete.encodemarkers(markers, True, version=version)
54 return bundler.newpart('obsmarkers', data=stream)
54 return bundler.newpart('obsmarkers', data=stream)
55 return None
55 return None
56
56
57 def _canusebundle2(op):
57 def _canusebundle2(op):
58 """return true if a pull/push can use bundle2
58 """return true if a pull/push can use bundle2
59
59
60 Feel free to nuke this function when we drop the experimental option"""
60 Feel free to nuke this function when we drop the experimental option"""
61 return (op.repo.ui.configbool('experimental', 'bundle2-exp', False)
61 return (op.repo.ui.configbool('experimental', 'bundle2-exp', False)
62 and op.remote.capable('bundle2'))
62 and op.remote.capable('bundle2'))
63
63
64
64
65 class pushoperation(object):
65 class pushoperation(object):
66 """A object that represent a single push operation
66 """A object that represent a single push operation
67
67
68 It purpose is to carry push related state and very common operation.
68 It purpose is to carry push related state and very common operation.
69
69
70 A new should be created at the beginning of each push and discarded
70 A new should be created at the beginning of each push and discarded
71 afterward.
71 afterward.
72 """
72 """
73
73
74 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
74 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
75 bookmarks=()):
75 bookmarks=()):
76 # repo we push from
76 # repo we push from
77 self.repo = repo
77 self.repo = repo
78 self.ui = repo.ui
78 self.ui = repo.ui
79 # repo we push to
79 # repo we push to
80 self.remote = remote
80 self.remote = remote
81 # force option provided
81 # force option provided
82 self.force = force
82 self.force = force
83 # revs to be pushed (None is "all")
83 # revs to be pushed (None is "all")
84 self.revs = revs
84 self.revs = revs
85 # bookmark explicitly pushed
85 # bookmark explicitly pushed
86 self.bookmarks = bookmarks
86 self.bookmarks = bookmarks
87 # allow push of new branch
87 # allow push of new branch
88 self.newbranch = newbranch
88 self.newbranch = newbranch
89 # did a local lock get acquired?
89 # did a local lock get acquired?
90 self.locallocked = None
90 self.locallocked = None
91 # step already performed
91 # step already performed
92 # (used to check what steps have been already performed through bundle2)
92 # (used to check what steps have been already performed through bundle2)
93 self.stepsdone = set()
93 self.stepsdone = set()
94 # Integer version of the changegroup push result
94 # Integer version of the changegroup push result
95 # - None means nothing to push
95 # - None means nothing to push
96 # - 0 means HTTP error
96 # - 0 means HTTP error
97 # - 1 means we pushed and remote head count is unchanged *or*
97 # - 1 means we pushed and remote head count is unchanged *or*
98 # we have outgoing changesets but refused to push
98 # we have outgoing changesets but refused to push
99 # - other values as described by addchangegroup()
99 # - other values as described by addchangegroup()
100 self.cgresult = None
100 self.cgresult = None
101 # Boolean value for the bookmark push
101 # Boolean value for the bookmark push
102 self.bkresult = None
102 self.bkresult = None
103 # discover.outgoing object (contains common and outgoing data)
103 # discover.outgoing object (contains common and outgoing data)
104 self.outgoing = None
104 self.outgoing = None
105 # all remote heads before the push
105 # all remote heads before the push
106 self.remoteheads = None
106 self.remoteheads = None
107 # testable as a boolean indicating if any nodes are missing locally.
107 # testable as a boolean indicating if any nodes are missing locally.
108 self.incoming = None
108 self.incoming = None
109 # phases changes that must be pushed along side the changesets
109 # phases changes that must be pushed along side the changesets
110 self.outdatedphases = None
110 self.outdatedphases = None
111 # phases changes that must be pushed if changeset push fails
111 # phases changes that must be pushed if changeset push fails
112 self.fallbackoutdatedphases = None
112 self.fallbackoutdatedphases = None
113 # outgoing obsmarkers
113 # outgoing obsmarkers
114 self.outobsmarkers = set()
114 self.outobsmarkers = set()
115 # outgoing bookmarks
115 # outgoing bookmarks
116 self.outbookmarks = []
116 self.outbookmarks = []
117 # transaction manager
117 # transaction manager
118 self.trmanager = None
118 self.trmanager = None
119
119
120 @util.propertycache
120 @util.propertycache
121 def futureheads(self):
121 def futureheads(self):
122 """future remote heads if the changeset push succeeds"""
122 """future remote heads if the changeset push succeeds"""
123 return self.outgoing.missingheads
123 return self.outgoing.missingheads
124
124
125 @util.propertycache
125 @util.propertycache
126 def fallbackheads(self):
126 def fallbackheads(self):
127 """future remote heads if the changeset push fails"""
127 """future remote heads if the changeset push fails"""
128 if self.revs is None:
128 if self.revs is None:
129 # not target to push, all common are relevant
129 # not target to push, all common are relevant
130 return self.outgoing.commonheads
130 return self.outgoing.commonheads
131 unfi = self.repo.unfiltered()
131 unfi = self.repo.unfiltered()
132 # I want cheads = heads(::missingheads and ::commonheads)
132 # I want cheads = heads(::missingheads and ::commonheads)
133 # (missingheads is revs with secret changeset filtered out)
133 # (missingheads is revs with secret changeset filtered out)
134 #
134 #
135 # This can be expressed as:
135 # This can be expressed as:
136 # cheads = ( (missingheads and ::commonheads)
136 # cheads = ( (missingheads and ::commonheads)
137 # + (commonheads and ::missingheads))"
137 # + (commonheads and ::missingheads))"
138 # )
138 # )
139 #
139 #
140 # while trying to push we already computed the following:
140 # while trying to push we already computed the following:
141 # common = (::commonheads)
141 # common = (::commonheads)
142 # missing = ((commonheads::missingheads) - commonheads)
142 # missing = ((commonheads::missingheads) - commonheads)
143 #
143 #
144 # We can pick:
144 # We can pick:
145 # * missingheads part of common (::commonheads)
145 # * missingheads part of common (::commonheads)
146 common = set(self.outgoing.common)
146 common = set(self.outgoing.common)
147 nm = self.repo.changelog.nodemap
147 nm = self.repo.changelog.nodemap
148 cheads = [node for node in self.revs if nm[node] in common]
148 cheads = [node for node in self.revs if nm[node] in common]
149 # and
149 # and
150 # * commonheads parents on missing
150 # * commonheads parents on missing
151 revset = unfi.set('%ln and parents(roots(%ln))',
151 revset = unfi.set('%ln and parents(roots(%ln))',
152 self.outgoing.commonheads,
152 self.outgoing.commonheads,
153 self.outgoing.missing)
153 self.outgoing.missing)
154 cheads.extend(c.node() for c in revset)
154 cheads.extend(c.node() for c in revset)
155 return cheads
155 return cheads
156
156
157 @property
157 @property
158 def commonheads(self):
158 def commonheads(self):
159 """set of all common heads after changeset bundle push"""
159 """set of all common heads after changeset bundle push"""
160 if self.cgresult:
160 if self.cgresult:
161 return self.futureheads
161 return self.futureheads
162 else:
162 else:
163 return self.fallbackheads
163 return self.fallbackheads
164
164
165 # mapping of message used when pushing bookmark
165 # mapping of message used when pushing bookmark
166 bookmsgmap = {'update': (_("updating bookmark %s\n"),
166 bookmsgmap = {'update': (_("updating bookmark %s\n"),
167 _('updating bookmark %s failed!\n')),
167 _('updating bookmark %s failed!\n')),
168 'export': (_("exporting bookmark %s\n"),
168 'export': (_("exporting bookmark %s\n"),
169 _('exporting bookmark %s failed!\n')),
169 _('exporting bookmark %s failed!\n')),
170 'delete': (_("deleting remote bookmark %s\n"),
170 'delete': (_("deleting remote bookmark %s\n"),
171 _('deleting remote bookmark %s failed!\n')),
171 _('deleting remote bookmark %s failed!\n')),
172 }
172 }
173
173
174
174
175 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
175 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
176 '''Push outgoing changesets (limited by revs) from a local
176 '''Push outgoing changesets (limited by revs) from a local
177 repository to remote. Return an integer:
177 repository to remote. Return an integer:
178 - None means nothing to push
178 - None means nothing to push
179 - 0 means HTTP error
179 - 0 means HTTP error
180 - 1 means we pushed and remote head count is unchanged *or*
180 - 1 means we pushed and remote head count is unchanged *or*
181 we have outgoing changesets but refused to push
181 we have outgoing changesets but refused to push
182 - other values as described by addchangegroup()
182 - other values as described by addchangegroup()
183 '''
183 '''
184 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
184 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
185 if pushop.remote.local():
185 if pushop.remote.local():
186 missing = (set(pushop.repo.requirements)
186 missing = (set(pushop.repo.requirements)
187 - pushop.remote.local().supported)
187 - pushop.remote.local().supported)
188 if missing:
188 if missing:
189 msg = _("required features are not"
189 msg = _("required features are not"
190 " supported in the destination:"
190 " supported in the destination:"
191 " %s") % (', '.join(sorted(missing)))
191 " %s") % (', '.join(sorted(missing)))
192 raise util.Abort(msg)
192 raise util.Abort(msg)
193
193
194 # there are two ways to push to remote repo:
194 # there are two ways to push to remote repo:
195 #
195 #
196 # addchangegroup assumes local user can lock remote
196 # addchangegroup assumes local user can lock remote
197 # repo (local filesystem, old ssh servers).
197 # repo (local filesystem, old ssh servers).
198 #
198 #
199 # unbundle assumes local user cannot lock remote repo (new ssh
199 # unbundle assumes local user cannot lock remote repo (new ssh
200 # servers, http servers).
200 # servers, http servers).
201
201
202 if not pushop.remote.canpush():
202 if not pushop.remote.canpush():
203 raise util.Abort(_("destination does not support push"))
203 raise util.Abort(_("destination does not support push"))
204 # get local lock as we might write phase data
204 # get local lock as we might write phase data
205 localwlock = locallock = None
205 localwlock = locallock = None
206 try:
206 try:
207 # bundle2 push may receive a reply bundle touching bookmarks or other
207 # bundle2 push may receive a reply bundle touching bookmarks or other
208 # things requiring the wlock. Take it now to ensure proper ordering.
208 # things requiring the wlock. Take it now to ensure proper ordering.
209 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
209 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
210 if _canusebundle2(pushop) and maypushback:
210 if _canusebundle2(pushop) and maypushback:
211 localwlock = pushop.repo.wlock()
211 localwlock = pushop.repo.wlock()
212 locallock = pushop.repo.lock()
212 locallock = pushop.repo.lock()
213 pushop.locallocked = True
213 pushop.locallocked = True
214 except IOError, err:
214 except IOError, err:
215 pushop.locallocked = False
215 pushop.locallocked = False
216 if err.errno != errno.EACCES:
216 if err.errno != errno.EACCES:
217 raise
217 raise
218 # source repo cannot be locked.
218 # source repo cannot be locked.
219 # We do not abort the push, but just disable the local phase
219 # We do not abort the push, but just disable the local phase
220 # synchronisation.
220 # synchronisation.
221 msg = 'cannot lock source repository: %s\n' % err
221 msg = 'cannot lock source repository: %s\n' % err
222 pushop.ui.debug(msg)
222 pushop.ui.debug(msg)
223 try:
223 try:
224 if pushop.locallocked:
224 if pushop.locallocked:
225 pushop.trmanager = transactionmanager(repo,
225 pushop.trmanager = transactionmanager(repo,
226 'push-response',
226 'push-response',
227 pushop.remote.url())
227 pushop.remote.url())
228 pushop.repo.checkpush(pushop)
228 pushop.repo.checkpush(pushop)
229 lock = None
229 lock = None
230 unbundle = pushop.remote.capable('unbundle')
230 unbundle = pushop.remote.capable('unbundle')
231 if not unbundle:
231 if not unbundle:
232 lock = pushop.remote.lock()
232 lock = pushop.remote.lock()
233 try:
233 try:
234 _pushdiscovery(pushop)
234 _pushdiscovery(pushop)
235 if _canusebundle2(pushop):
235 if _canusebundle2(pushop):
236 _pushbundle2(pushop)
236 _pushbundle2(pushop)
237 _pushchangeset(pushop)
237 _pushchangeset(pushop)
238 _pushsyncphase(pushop)
238 _pushsyncphase(pushop)
239 _pushobsolete(pushop)
239 _pushobsolete(pushop)
240 _pushbookmark(pushop)
240 _pushbookmark(pushop)
241 finally:
241 finally:
242 if lock is not None:
242 if lock is not None:
243 lock.release()
243 lock.release()
244 if pushop.trmanager:
244 if pushop.trmanager:
245 pushop.trmanager.close()
245 pushop.trmanager.close()
246 finally:
246 finally:
247 if pushop.trmanager:
247 if pushop.trmanager:
248 pushop.trmanager.release()
248 pushop.trmanager.release()
249 if locallock is not None:
249 if locallock is not None:
250 locallock.release()
250 locallock.release()
251 if localwlock is not None:
251 if localwlock is not None:
252 localwlock.release()
252 localwlock.release()
253
253
254 return pushop
254 return pushop
255
255
256 # list of steps to perform discovery before push
256 # list of steps to perform discovery before push
257 pushdiscoveryorder = []
257 pushdiscoveryorder = []
258
258
259 # Mapping between step name and function
259 # Mapping between step name and function
260 #
260 #
261 # This exists to help extensions wrap steps if necessary
261 # This exists to help extensions wrap steps if necessary
262 pushdiscoverymapping = {}
262 pushdiscoverymapping = {}
263
263
264 def pushdiscovery(stepname):
264 def pushdiscovery(stepname):
265 """decorator for function performing discovery before push
265 """decorator for function performing discovery before push
266
266
267 The function is added to the step -> function mapping and appended to the
267 The function is added to the step -> function mapping and appended to the
268 list of steps. Beware that decorated function will be added in order (this
268 list of steps. Beware that decorated function will be added in order (this
269 may matter).
269 may matter).
270
270
271 You can only use this decorator for a new step, if you want to wrap a step
271 You can only use this decorator for a new step, if you want to wrap a step
272 from an extension, change the pushdiscovery dictionary directly."""
272 from an extension, change the pushdiscovery dictionary directly."""
273 def dec(func):
273 def dec(func):
274 assert stepname not in pushdiscoverymapping
274 assert stepname not in pushdiscoverymapping
275 pushdiscoverymapping[stepname] = func
275 pushdiscoverymapping[stepname] = func
276 pushdiscoveryorder.append(stepname)
276 pushdiscoveryorder.append(stepname)
277 return func
277 return func
278 return dec
278 return dec
279
279
280 def _pushdiscovery(pushop):
280 def _pushdiscovery(pushop):
281 """Run all discovery steps"""
281 """Run all discovery steps"""
282 for stepname in pushdiscoveryorder:
282 for stepname in pushdiscoveryorder:
283 step = pushdiscoverymapping[stepname]
283 step = pushdiscoverymapping[stepname]
284 step(pushop)
284 step(pushop)
285
285
286 @pushdiscovery('changeset')
286 @pushdiscovery('changeset')
287 def _pushdiscoverychangeset(pushop):
287 def _pushdiscoverychangeset(pushop):
288 """discover the changeset that need to be pushed"""
288 """discover the changeset that need to be pushed"""
289 fci = discovery.findcommonincoming
289 fci = discovery.findcommonincoming
290 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
290 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
291 common, inc, remoteheads = commoninc
291 common, inc, remoteheads = commoninc
292 fco = discovery.findcommonoutgoing
292 fco = discovery.findcommonoutgoing
293 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
293 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
294 commoninc=commoninc, force=pushop.force)
294 commoninc=commoninc, force=pushop.force)
295 pushop.outgoing = outgoing
295 pushop.outgoing = outgoing
296 pushop.remoteheads = remoteheads
296 pushop.remoteheads = remoteheads
297 pushop.incoming = inc
297 pushop.incoming = inc
298
298
299 @pushdiscovery('phase')
299 @pushdiscovery('phase')
300 def _pushdiscoveryphase(pushop):
300 def _pushdiscoveryphase(pushop):
301 """discover the phase that needs to be pushed
301 """discover the phase that needs to be pushed
302
302
303 (computed for both success and failure case for changesets push)"""
303 (computed for both success and failure case for changesets push)"""
304 outgoing = pushop.outgoing
304 outgoing = pushop.outgoing
305 unfi = pushop.repo.unfiltered()
305 unfi = pushop.repo.unfiltered()
306 remotephases = pushop.remote.listkeys('phases')
306 remotephases = pushop.remote.listkeys('phases')
307 publishing = remotephases.get('publishing', False)
307 publishing = remotephases.get('publishing', False)
308 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
308 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
309 and remotephases # server supports phases
309 and remotephases # server supports phases
310 and not pushop.outgoing.missing # no changesets to be pushed
310 and not pushop.outgoing.missing # no changesets to be pushed
311 and publishing):
311 and publishing):
312 # When:
312 # When:
313 # - this is a subrepo push
313 # - this is a subrepo push
314 # - and remote support phase
314 # - and remote support phase
315 # - and no changeset are to be pushed
315 # - and no changeset are to be pushed
316 # - and remote is publishing
316 # - and remote is publishing
317 # We may be in issue 3871 case!
317 # We may be in issue 3871 case!
318 # We drop the possible phase synchronisation done by
318 # We drop the possible phase synchronisation done by
319 # courtesy to publish changesets possibly locally draft
319 # courtesy to publish changesets possibly locally draft
320 # on the remote.
320 # on the remote.
321 remotephases = {'publishing': 'True'}
321 remotephases = {'publishing': 'True'}
322 ana = phases.analyzeremotephases(pushop.repo,
322 ana = phases.analyzeremotephases(pushop.repo,
323 pushop.fallbackheads,
323 pushop.fallbackheads,
324 remotephases)
324 remotephases)
325 pheads, droots = ana
325 pheads, droots = ana
326 extracond = ''
326 extracond = ''
327 if not publishing:
327 if not publishing:
328 extracond = ' and public()'
328 extracond = ' and public()'
329 revset = 'heads((%%ln::%%ln) %s)' % extracond
329 revset = 'heads((%%ln::%%ln) %s)' % extracond
330 # Get the list of all revs draft on remote by public here.
330 # Get the list of all revs draft on remote by public here.
331 # XXX Beware that revset break if droots is not strictly
331 # XXX Beware that revset break if droots is not strictly
332 # XXX root we may want to ensure it is but it is costly
332 # XXX root we may want to ensure it is but it is costly
333 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
333 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
334 if not outgoing.missing:
334 if not outgoing.missing:
335 future = fallback
335 future = fallback
336 else:
336 else:
337 # adds changeset we are going to push as draft
337 # adds changeset we are going to push as draft
338 #
338 #
339 # should not be necessary for publishing server, but because of an
339 # should not be necessary for publishing server, but because of an
340 # issue fixed in xxxxx we have to do it anyway.
340 # issue fixed in xxxxx we have to do it anyway.
341 fdroots = list(unfi.set('roots(%ln + %ln::)',
341 fdroots = list(unfi.set('roots(%ln + %ln::)',
342 outgoing.missing, droots))
342 outgoing.missing, droots))
343 fdroots = [f.node() for f in fdroots]
343 fdroots = [f.node() for f in fdroots]
344 future = list(unfi.set(revset, fdroots, pushop.futureheads))
344 future = list(unfi.set(revset, fdroots, pushop.futureheads))
345 pushop.outdatedphases = future
345 pushop.outdatedphases = future
346 pushop.fallbackoutdatedphases = fallback
346 pushop.fallbackoutdatedphases = fallback
347
347
348 @pushdiscovery('obsmarker')
348 @pushdiscovery('obsmarker')
349 def _pushdiscoveryobsmarkers(pushop):
349 def _pushdiscoveryobsmarkers(pushop):
350 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
350 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
351 and pushop.repo.obsstore
351 and pushop.repo.obsstore
352 and 'obsolete' in pushop.remote.listkeys('namespaces')):
352 and 'obsolete' in pushop.remote.listkeys('namespaces')):
353 repo = pushop.repo
353 repo = pushop.repo
354 # very naive computation, that can be quite expensive on big repo.
354 # very naive computation, that can be quite expensive on big repo.
355 # However: evolution is currently slow on them anyway.
355 # However: evolution is currently slow on them anyway.
356 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
356 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
357 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
357 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
358
358
359 @pushdiscovery('bookmarks')
359 @pushdiscovery('bookmarks')
360 def _pushdiscoverybookmarks(pushop):
360 def _pushdiscoverybookmarks(pushop):
361 ui = pushop.ui
361 ui = pushop.ui
362 repo = pushop.repo.unfiltered()
362 repo = pushop.repo.unfiltered()
363 remote = pushop.remote
363 remote = pushop.remote
364 ui.debug("checking for updated bookmarks\n")
364 ui.debug("checking for updated bookmarks\n")
365 ancestors = ()
365 ancestors = ()
366 if pushop.revs:
366 if pushop.revs:
367 revnums = map(repo.changelog.rev, pushop.revs)
367 revnums = map(repo.changelog.rev, pushop.revs)
368 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
368 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
369 remotebookmark = remote.listkeys('bookmarks')
369 remotebookmark = remote.listkeys('bookmarks')
370
370
371 explicit = set(pushop.bookmarks)
371 explicit = set(pushop.bookmarks)
372
372
373 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
373 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
374 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
374 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
375 for b, scid, dcid in advsrc:
375 for b, scid, dcid in advsrc:
376 if b in explicit:
376 if b in explicit:
377 explicit.remove(b)
377 explicit.remove(b)
378 if not ancestors or repo[scid].rev() in ancestors:
378 if not ancestors or repo[scid].rev() in ancestors:
379 pushop.outbookmarks.append((b, dcid, scid))
379 pushop.outbookmarks.append((b, dcid, scid))
380 # search added bookmark
380 # search added bookmark
381 for b, scid, dcid in addsrc:
381 for b, scid, dcid in addsrc:
382 if b in explicit:
382 if b in explicit:
383 explicit.remove(b)
383 explicit.remove(b)
384 pushop.outbookmarks.append((b, '', scid))
384 pushop.outbookmarks.append((b, '', scid))
385 # search for overwritten bookmark
385 # search for overwritten bookmark
386 for b, scid, dcid in advdst + diverge + differ:
386 for b, scid, dcid in advdst + diverge + differ:
387 if b in explicit:
387 if b in explicit:
388 explicit.remove(b)
388 explicit.remove(b)
389 pushop.outbookmarks.append((b, dcid, scid))
389 pushop.outbookmarks.append((b, dcid, scid))
390 # search for bookmark to delete
390 # search for bookmark to delete
391 for b, scid, dcid in adddst:
391 for b, scid, dcid in adddst:
392 if b in explicit:
392 if b in explicit:
393 explicit.remove(b)
393 explicit.remove(b)
394 # treat as "deleted locally"
394 # treat as "deleted locally"
395 pushop.outbookmarks.append((b, dcid, ''))
395 pushop.outbookmarks.append((b, dcid, ''))
396 # identical bookmarks shouldn't get reported
396 # identical bookmarks shouldn't get reported
397 for b, scid, dcid in same:
397 for b, scid, dcid in same:
398 if b in explicit:
398 if b in explicit:
399 explicit.remove(b)
399 explicit.remove(b)
400
400
401 if explicit:
401 if explicit:
402 explicit = sorted(explicit)
402 explicit = sorted(explicit)
403 # we should probably list all of them
403 # we should probably list all of them
404 ui.warn(_('bookmark %s does not exist on the local '
404 ui.warn(_('bookmark %s does not exist on the local '
405 'or remote repository!\n') % explicit[0])
405 'or remote repository!\n') % explicit[0])
406 pushop.bkresult = 2
406 pushop.bkresult = 2
407
407
408 pushop.outbookmarks.sort()
408 pushop.outbookmarks.sort()
409
409
410 def _pushcheckoutgoing(pushop):
410 def _pushcheckoutgoing(pushop):
411 outgoing = pushop.outgoing
411 outgoing = pushop.outgoing
412 unfi = pushop.repo.unfiltered()
412 unfi = pushop.repo.unfiltered()
413 if not outgoing.missing:
413 if not outgoing.missing:
414 # nothing to push
414 # nothing to push
415 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
415 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
416 return False
416 return False
417 # something to push
417 # something to push
418 if not pushop.force:
418 if not pushop.force:
419 # if repo.obsstore == False --> no obsolete
419 # if repo.obsstore == False --> no obsolete
420 # then, save the iteration
420 # then, save the iteration
421 if unfi.obsstore:
421 if unfi.obsstore:
422 # this message are here for 80 char limit reason
422 # this message are here for 80 char limit reason
423 mso = _("push includes obsolete changeset: %s!")
423 mso = _("push includes obsolete changeset: %s!")
424 mst = {"unstable": _("push includes unstable changeset: %s!"),
424 mst = {"unstable": _("push includes unstable changeset: %s!"),
425 "bumped": _("push includes bumped changeset: %s!"),
425 "bumped": _("push includes bumped changeset: %s!"),
426 "divergent": _("push includes divergent changeset: %s!")}
426 "divergent": _("push includes divergent changeset: %s!")}
427 # If we are to push if there is at least one
427 # If we are to push if there is at least one
428 # obsolete or unstable changeset in missing, at
428 # obsolete or unstable changeset in missing, at
429 # least one of the missinghead will be obsolete or
429 # least one of the missinghead will be obsolete or
430 # unstable. So checking heads only is ok
430 # unstable. So checking heads only is ok
431 for node in outgoing.missingheads:
431 for node in outgoing.missingheads:
432 ctx = unfi[node]
432 ctx = unfi[node]
433 if ctx.obsolete():
433 if ctx.obsolete():
434 raise util.Abort(mso % ctx)
434 raise util.Abort(mso % ctx)
435 elif ctx.troubled():
435 elif ctx.troubled():
436 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
436 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
437 newbm = pushop.ui.configlist('bookmarks', 'pushing')
437 newbm = pushop.ui.configlist('bookmarks', 'pushing')
438 discovery.checkheads(unfi, pushop.remote, outgoing,
438 discovery.checkheads(unfi, pushop.remote, outgoing,
439 pushop.remoteheads,
439 pushop.remoteheads,
440 pushop.newbranch,
440 pushop.newbranch,
441 bool(pushop.incoming),
441 bool(pushop.incoming),
442 newbm)
442 newbm)
443 return True
443 return True
444
444
445 # List of names of steps to perform for an outgoing bundle2, order matters.
445 # List of names of steps to perform for an outgoing bundle2, order matters.
446 b2partsgenorder = []
446 b2partsgenorder = []
447
447
448 # Mapping between step name and function
448 # Mapping between step name and function
449 #
449 #
450 # This exists to help extensions wrap steps if necessary
450 # This exists to help extensions wrap steps if necessary
451 b2partsgenmapping = {}
451 b2partsgenmapping = {}
452
452
453 def b2partsgenerator(stepname, idx=None):
453 def b2partsgenerator(stepname, idx=None):
454 """decorator for function generating bundle2 part
454 """decorator for function generating bundle2 part
455
455
456 The function is added to the step -> function mapping and appended to the
456 The function is added to the step -> function mapping and appended to the
457 list of steps. Beware that decorated functions will be added in order
457 list of steps. Beware that decorated functions will be added in order
458 (this may matter).
458 (this may matter).
459
459
460 You can only use this decorator for new steps, if you want to wrap a step
460 You can only use this decorator for new steps, if you want to wrap a step
461 from an extension, attack the b2partsgenmapping dictionary directly."""
461 from an extension, attack the b2partsgenmapping dictionary directly."""
462 def dec(func):
462 def dec(func):
463 assert stepname not in b2partsgenmapping
463 assert stepname not in b2partsgenmapping
464 b2partsgenmapping[stepname] = func
464 b2partsgenmapping[stepname] = func
465 if idx is None:
465 if idx is None:
466 b2partsgenorder.append(stepname)
466 b2partsgenorder.append(stepname)
467 else:
467 else:
468 b2partsgenorder.insert(idx, stepname)
468 b2partsgenorder.insert(idx, stepname)
469 return func
469 return func
470 return dec
470 return dec
471
471
472 @b2partsgenerator('changeset')
472 @b2partsgenerator('changeset')
473 def _pushb2ctx(pushop, bundler):
473 def _pushb2ctx(pushop, bundler):
474 """handle changegroup push through bundle2
474 """handle changegroup push through bundle2
475
475
476 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
476 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
477 """
477 """
478 if 'changesets' in pushop.stepsdone:
478 if 'changesets' in pushop.stepsdone:
479 return
479 return
480 pushop.stepsdone.add('changesets')
480 pushop.stepsdone.add('changesets')
481 # Send known heads to the server for race detection.
481 # Send known heads to the server for race detection.
482 if not _pushcheckoutgoing(pushop):
482 if not _pushcheckoutgoing(pushop):
483 return
483 return
484 pushop.repo.prepushoutgoinghooks(pushop.repo,
484 pushop.repo.prepushoutgoinghooks(pushop.repo,
485 pushop.remote,
485 pushop.remote,
486 pushop.outgoing)
486 pushop.outgoing)
487 if not pushop.force:
487 if not pushop.force:
488 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
488 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
489 b2caps = bundle2.bundle2caps(pushop.remote)
489 b2caps = bundle2.bundle2caps(pushop.remote)
490 version = None
490 version = None
491 cgversions = b2caps.get('changegroup')
491 cgversions = b2caps.get('changegroup')
492 if not cgversions: # 3.1 and 3.2 ship with an empty value
492 if not cgversions: # 3.1 and 3.2 ship with an empty value
493 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
493 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
494 pushop.outgoing)
494 pushop.outgoing)
495 else:
495 else:
496 cgversions = [v for v in cgversions if v in changegroup.packermap]
496 cgversions = [v for v in cgversions if v in changegroup.packermap]
497 if not cgversions:
497 if not cgversions:
498 raise ValueError(_('no common changegroup version'))
498 raise ValueError(_('no common changegroup version'))
499 version = max(cgversions)
499 version = max(cgversions)
500 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
500 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
501 pushop.outgoing,
501 pushop.outgoing,
502 version=version)
502 version=version)
503 cgpart = bundler.newpart('changegroup', data=cg)
503 cgpart = bundler.newpart('changegroup', data=cg)
504 if version is not None:
504 if version is not None:
505 cgpart.addparam('version', version)
505 cgpart.addparam('version', version)
506 def handlereply(op):
506 def handlereply(op):
507 """extract addchangegroup returns from server reply"""
507 """extract addchangegroup returns from server reply"""
508 cgreplies = op.records.getreplies(cgpart.id)
508 cgreplies = op.records.getreplies(cgpart.id)
509 assert len(cgreplies['changegroup']) == 1
509 assert len(cgreplies['changegroup']) == 1
510 pushop.cgresult = cgreplies['changegroup'][0]['return']
510 pushop.cgresult = cgreplies['changegroup'][0]['return']
511 return handlereply
511 return handlereply
512
512
513 @b2partsgenerator('phase')
513 @b2partsgenerator('phase')
514 def _pushb2phases(pushop, bundler):
514 def _pushb2phases(pushop, bundler):
515 """handle phase push through bundle2"""
515 """handle phase push through bundle2"""
516 if 'phases' in pushop.stepsdone:
516 if 'phases' in pushop.stepsdone:
517 return
517 return
518 b2caps = bundle2.bundle2caps(pushop.remote)
518 b2caps = bundle2.bundle2caps(pushop.remote)
519 if not 'pushkey' in b2caps:
519 if not 'pushkey' in b2caps:
520 return
520 return
521 pushop.stepsdone.add('phases')
521 pushop.stepsdone.add('phases')
522 part2node = []
522 part2node = []
523 enc = pushkey.encode
523 enc = pushkey.encode
524 for newremotehead in pushop.outdatedphases:
524 for newremotehead in pushop.outdatedphases:
525 part = bundler.newpart('pushkey')
525 part = bundler.newpart('pushkey')
526 part.addparam('namespace', enc('phases'))
526 part.addparam('namespace', enc('phases'))
527 part.addparam('key', enc(newremotehead.hex()))
527 part.addparam('key', enc(newremotehead.hex()))
528 part.addparam('old', enc(str(phases.draft)))
528 part.addparam('old', enc(str(phases.draft)))
529 part.addparam('new', enc(str(phases.public)))
529 part.addparam('new', enc(str(phases.public)))
530 part2node.append((part.id, newremotehead))
530 part2node.append((part.id, newremotehead))
531 def handlereply(op):
531 def handlereply(op):
532 for partid, node in part2node:
532 for partid, node in part2node:
533 partrep = op.records.getreplies(partid)
533 partrep = op.records.getreplies(partid)
534 results = partrep['pushkey']
534 results = partrep['pushkey']
535 assert len(results) <= 1
535 assert len(results) <= 1
536 msg = None
536 msg = None
537 if not results:
537 if not results:
538 msg = _('server ignored update of %s to public!\n') % node
538 msg = _('server ignored update of %s to public!\n') % node
539 elif not int(results[0]['return']):
539 elif not int(results[0]['return']):
540 msg = _('updating %s to public failed!\n') % node
540 msg = _('updating %s to public failed!\n') % node
541 if msg is not None:
541 if msg is not None:
542 pushop.ui.warn(msg)
542 pushop.ui.warn(msg)
543 return handlereply
543 return handlereply
544
544
545 @b2partsgenerator('obsmarkers')
545 @b2partsgenerator('obsmarkers')
546 def _pushb2obsmarkers(pushop, bundler):
546 def _pushb2obsmarkers(pushop, bundler):
547 if 'obsmarkers' in pushop.stepsdone:
547 if 'obsmarkers' in pushop.stepsdone:
548 return
548 return
549 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
549 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
550 if obsolete.commonversion(remoteversions) is None:
550 if obsolete.commonversion(remoteversions) is None:
551 return
551 return
552 pushop.stepsdone.add('obsmarkers')
552 pushop.stepsdone.add('obsmarkers')
553 if pushop.outobsmarkers:
553 if pushop.outobsmarkers:
554 markers = sorted(pushop.outobsmarkers)
554 markers = sorted(pushop.outobsmarkers)
555 buildobsmarkerspart(bundler, markers)
555 buildobsmarkerspart(bundler, markers)
556
556
557 @b2partsgenerator('bookmarks')
557 @b2partsgenerator('bookmarks')
558 def _pushb2bookmarks(pushop, bundler):
558 def _pushb2bookmarks(pushop, bundler):
559 """handle phase push through bundle2"""
559 """handle phase push through bundle2"""
560 if 'bookmarks' in pushop.stepsdone:
560 if 'bookmarks' in pushop.stepsdone:
561 return
561 return
562 b2caps = bundle2.bundle2caps(pushop.remote)
562 b2caps = bundle2.bundle2caps(pushop.remote)
563 if 'pushkey' not in b2caps:
563 if 'pushkey' not in b2caps:
564 return
564 return
565 pushop.stepsdone.add('bookmarks')
565 pushop.stepsdone.add('bookmarks')
566 part2book = []
566 part2book = []
567 enc = pushkey.encode
567 enc = pushkey.encode
568 for book, old, new in pushop.outbookmarks:
568 for book, old, new in pushop.outbookmarks:
569 part = bundler.newpart('pushkey')
569 part = bundler.newpart('pushkey')
570 part.addparam('namespace', enc('bookmarks'))
570 part.addparam('namespace', enc('bookmarks'))
571 part.addparam('key', enc(book))
571 part.addparam('key', enc(book))
572 part.addparam('old', enc(old))
572 part.addparam('old', enc(old))
573 part.addparam('new', enc(new))
573 part.addparam('new', enc(new))
574 action = 'update'
574 action = 'update'
575 if not old:
575 if not old:
576 action = 'export'
576 action = 'export'
577 elif not new:
577 elif not new:
578 action = 'delete'
578 action = 'delete'
579 part2book.append((part.id, book, action))
579 part2book.append((part.id, book, action))
580
580
581
581
582 def handlereply(op):
582 def handlereply(op):
583 ui = pushop.ui
583 ui = pushop.ui
584 for partid, book, action in part2book:
584 for partid, book, action in part2book:
585 partrep = op.records.getreplies(partid)
585 partrep = op.records.getreplies(partid)
586 results = partrep['pushkey']
586 results = partrep['pushkey']
587 assert len(results) <= 1
587 assert len(results) <= 1
588 if not results:
588 if not results:
589 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
589 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
590 else:
590 else:
591 ret = int(results[0]['return'])
591 ret = int(results[0]['return'])
592 if ret:
592 if ret:
593 ui.status(bookmsgmap[action][0] % book)
593 ui.status(bookmsgmap[action][0] % book)
594 else:
594 else:
595 ui.warn(bookmsgmap[action][1] % book)
595 ui.warn(bookmsgmap[action][1] % book)
596 if pushop.bkresult is not None:
596 if pushop.bkresult is not None:
597 pushop.bkresult = 1
597 pushop.bkresult = 1
598 return handlereply
598 return handlereply
599
599
600
600
601 def _pushbundle2(pushop):
601 def _pushbundle2(pushop):
602 """push data to the remote using bundle2
602 """push data to the remote using bundle2
603
603
604 The only currently supported type of data is changegroup but this will
604 The only currently supported type of data is changegroup but this will
605 evolve in the future."""
605 evolve in the future."""
606 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
606 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
607 pushback = (pushop.trmanager
607 pushback = (pushop.trmanager
608 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
608 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
609
609
610 # create reply capability
610 # create reply capability
611 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
611 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
612 allowpushback=pushback))
612 allowpushback=pushback))
613 bundler.newpart('replycaps', data=capsblob)
613 bundler.newpart('replycaps', data=capsblob)
614 replyhandlers = []
614 replyhandlers = []
615 for partgenname in b2partsgenorder:
615 for partgenname in b2partsgenorder:
616 partgen = b2partsgenmapping[partgenname]
616 partgen = b2partsgenmapping[partgenname]
617 ret = partgen(pushop, bundler)
617 ret = partgen(pushop, bundler)
618 if callable(ret):
618 if callable(ret):
619 replyhandlers.append(ret)
619 replyhandlers.append(ret)
620 # do not push if nothing to push
620 # do not push if nothing to push
621 if bundler.nbparts <= 1:
621 if bundler.nbparts <= 1:
622 return
622 return
623 stream = util.chunkbuffer(bundler.getchunks())
623 stream = util.chunkbuffer(bundler.getchunks())
624 try:
624 try:
625 reply = pushop.remote.unbundle(stream, ['force'], 'push')
625 reply = pushop.remote.unbundle(stream, ['force'], 'push')
626 except error.BundleValueError, exc:
626 except error.BundleValueError, exc:
627 raise util.Abort('missing support for %s' % exc)
627 raise util.Abort('missing support for %s' % exc)
628 try:
628 try:
629 trgetter = None
629 trgetter = None
630 if pushback:
630 if pushback:
631 trgetter = pushop.trmanager.transaction
631 trgetter = pushop.trmanager.transaction
632 op = bundle2.processbundle(pushop.repo, reply, trgetter)
632 op = bundle2.processbundle(pushop.repo, reply, trgetter)
633 except error.BundleValueError, exc:
633 except error.BundleValueError, exc:
634 raise util.Abort('missing support for %s' % exc)
634 raise util.Abort('missing support for %s' % exc)
635 for rephand in replyhandlers:
635 for rephand in replyhandlers:
636 rephand(op)
636 rephand(op)
637
637
638 def _pushchangeset(pushop):
638 def _pushchangeset(pushop):
639 """Make the actual push of changeset bundle to remote repo"""
639 """Make the actual push of changeset bundle to remote repo"""
640 if 'changesets' in pushop.stepsdone:
640 if 'changesets' in pushop.stepsdone:
641 return
641 return
642 pushop.stepsdone.add('changesets')
642 pushop.stepsdone.add('changesets')
643 if not _pushcheckoutgoing(pushop):
643 if not _pushcheckoutgoing(pushop):
644 return
644 return
645 pushop.repo.prepushoutgoinghooks(pushop.repo,
645 pushop.repo.prepushoutgoinghooks(pushop.repo,
646 pushop.remote,
646 pushop.remote,
647 pushop.outgoing)
647 pushop.outgoing)
648 outgoing = pushop.outgoing
648 outgoing = pushop.outgoing
649 unbundle = pushop.remote.capable('unbundle')
649 unbundle = pushop.remote.capable('unbundle')
650 # TODO: get bundlecaps from remote
650 # TODO: get bundlecaps from remote
651 bundlecaps = None
651 bundlecaps = None
652 # create a changegroup from local
652 # create a changegroup from local
653 if pushop.revs is None and not (outgoing.excluded
653 if pushop.revs is None and not (outgoing.excluded
654 or pushop.repo.changelog.filteredrevs):
654 or pushop.repo.changelog.filteredrevs):
655 # push everything,
655 # push everything,
656 # use the fast path, no race possible on push
656 # use the fast path, no race possible on push
657 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
657 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
658 cg = changegroup.getsubset(pushop.repo,
658 cg = changegroup.getsubset(pushop.repo,
659 outgoing,
659 outgoing,
660 bundler,
660 bundler,
661 'push',
661 'push',
662 fastpath=True)
662 fastpath=True)
663 else:
663 else:
664 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
664 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
665 bundlecaps)
665 bundlecaps)
666
666
667 # apply changegroup to remote
667 # apply changegroup to remote
668 if unbundle:
668 if unbundle:
669 # local repo finds heads on server, finds out what
669 # local repo finds heads on server, finds out what
670 # revs it must push. once revs transferred, if server
670 # revs it must push. once revs transferred, if server
671 # finds it has different heads (someone else won
671 # finds it has different heads (someone else won
672 # commit/push race), server aborts.
672 # commit/push race), server aborts.
673 if pushop.force:
673 if pushop.force:
674 remoteheads = ['force']
674 remoteheads = ['force']
675 else:
675 else:
676 remoteheads = pushop.remoteheads
676 remoteheads = pushop.remoteheads
677 # ssh: return remote's addchangegroup()
677 # ssh: return remote's addchangegroup()
678 # http: return remote's addchangegroup() or 0 for error
678 # http: return remote's addchangegroup() or 0 for error
679 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
679 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
680 pushop.repo.url())
680 pushop.repo.url())
681 else:
681 else:
682 # we return an integer indicating remote head count
682 # we return an integer indicating remote head count
683 # change
683 # change
684 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
684 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
685 pushop.repo.url())
685 pushop.repo.url())
686
686
687 def _pushsyncphase(pushop):
687 def _pushsyncphase(pushop):
688 """synchronise phase information locally and remotely"""
688 """synchronise phase information locally and remotely"""
689 cheads = pushop.commonheads
689 cheads = pushop.commonheads
690 # even when we don't push, exchanging phase data is useful
690 # even when we don't push, exchanging phase data is useful
691 remotephases = pushop.remote.listkeys('phases')
691 remotephases = pushop.remote.listkeys('phases')
692 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
692 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
693 and remotephases # server supports phases
693 and remotephases # server supports phases
694 and pushop.cgresult is None # nothing was pushed
694 and pushop.cgresult is None # nothing was pushed
695 and remotephases.get('publishing', False)):
695 and remotephases.get('publishing', False)):
696 # When:
696 # When:
697 # - this is a subrepo push
697 # - this is a subrepo push
698 # - and remote support phase
698 # - and remote support phase
699 # - and no changeset was pushed
699 # - and no changeset was pushed
700 # - and remote is publishing
700 # - and remote is publishing
701 # We may be in issue 3871 case!
701 # We may be in issue 3871 case!
702 # We drop the possible phase synchronisation done by
702 # We drop the possible phase synchronisation done by
703 # courtesy to publish changesets possibly locally draft
703 # courtesy to publish changesets possibly locally draft
704 # on the remote.
704 # on the remote.
705 remotephases = {'publishing': 'True'}
705 remotephases = {'publishing': 'True'}
706 if not remotephases: # old server or public only reply from non-publishing
706 if not remotephases: # old server or public only reply from non-publishing
707 _localphasemove(pushop, cheads)
707 _localphasemove(pushop, cheads)
708 # don't push any phase data as there is nothing to push
708 # don't push any phase data as there is nothing to push
709 else:
709 else:
710 ana = phases.analyzeremotephases(pushop.repo, cheads,
710 ana = phases.analyzeremotephases(pushop.repo, cheads,
711 remotephases)
711 remotephases)
712 pheads, droots = ana
712 pheads, droots = ana
713 ### Apply remote phase on local
713 ### Apply remote phase on local
714 if remotephases.get('publishing', False):
714 if remotephases.get('publishing', False):
715 _localphasemove(pushop, cheads)
715 _localphasemove(pushop, cheads)
716 else: # publish = False
716 else: # publish = False
717 _localphasemove(pushop, pheads)
717 _localphasemove(pushop, pheads)
718 _localphasemove(pushop, cheads, phases.draft)
718 _localphasemove(pushop, cheads, phases.draft)
719 ### Apply local phase on remote
719 ### Apply local phase on remote
720
720
721 if pushop.cgresult:
721 if pushop.cgresult:
722 if 'phases' in pushop.stepsdone:
722 if 'phases' in pushop.stepsdone:
723 # phases already pushed though bundle2
723 # phases already pushed though bundle2
724 return
724 return
725 outdated = pushop.outdatedphases
725 outdated = pushop.outdatedphases
726 else:
726 else:
727 outdated = pushop.fallbackoutdatedphases
727 outdated = pushop.fallbackoutdatedphases
728
728
729 pushop.stepsdone.add('phases')
729 pushop.stepsdone.add('phases')
730
730
731 # filter heads already turned public by the push
731 # filter heads already turned public by the push
732 outdated = [c for c in outdated if c.node() not in pheads]
732 outdated = [c for c in outdated if c.node() not in pheads]
733 # fallback to independent pushkey command
733 # fallback to independent pushkey command
734 for newremotehead in outdated:
734 for newremotehead in outdated:
735 r = pushop.remote.pushkey('phases',
735 r = pushop.remote.pushkey('phases',
736 newremotehead.hex(),
736 newremotehead.hex(),
737 str(phases.draft),
737 str(phases.draft),
738 str(phases.public))
738 str(phases.public))
739 if not r:
739 if not r:
740 pushop.ui.warn(_('updating %s to public failed!\n')
740 pushop.ui.warn(_('updating %s to public failed!\n')
741 % newremotehead)
741 % newremotehead)
742
742
743 def _localphasemove(pushop, nodes, phase=phases.public):
743 def _localphasemove(pushop, nodes, phase=phases.public):
744 """move <nodes> to <phase> in the local source repo"""
744 """move <nodes> to <phase> in the local source repo"""
745 if pushop.trmanager:
745 if pushop.trmanager:
746 phases.advanceboundary(pushop.repo,
746 phases.advanceboundary(pushop.repo,
747 pushop.trmanager.transaction(),
747 pushop.trmanager.transaction(),
748 phase,
748 phase,
749 nodes)
749 nodes)
750 else:
750 else:
751 # repo is not locked, do not change any phases!
751 # repo is not locked, do not change any phases!
752 # Informs the user that phases should have been moved when
752 # Informs the user that phases should have been moved when
753 # applicable.
753 # applicable.
754 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
754 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
755 phasestr = phases.phasenames[phase]
755 phasestr = phases.phasenames[phase]
756 if actualmoves:
756 if actualmoves:
757 pushop.ui.status(_('cannot lock source repo, skipping '
757 pushop.ui.status(_('cannot lock source repo, skipping '
758 'local %s phase update\n') % phasestr)
758 'local %s phase update\n') % phasestr)
759
759
760 def _pushobsolete(pushop):
760 def _pushobsolete(pushop):
761 """utility function to push obsolete markers to a remote"""
761 """utility function to push obsolete markers to a remote"""
762 if 'obsmarkers' in pushop.stepsdone:
762 if 'obsmarkers' in pushop.stepsdone:
763 return
763 return
764 pushop.ui.debug('try to push obsolete markers to remote\n')
764 pushop.ui.debug('try to push obsolete markers to remote\n')
765 repo = pushop.repo
765 repo = pushop.repo
766 remote = pushop.remote
766 remote = pushop.remote
767 pushop.stepsdone.add('obsmarkers')
767 pushop.stepsdone.add('obsmarkers')
768 if pushop.outobsmarkers:
768 if pushop.outobsmarkers:
769 rslts = []
769 rslts = []
770 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
770 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
771 for key in sorted(remotedata, reverse=True):
771 for key in sorted(remotedata, reverse=True):
772 # reverse sort to ensure we end with dump0
772 # reverse sort to ensure we end with dump0
773 data = remotedata[key]
773 data = remotedata[key]
774 rslts.append(remote.pushkey('obsolete', key, '', data))
774 rslts.append(remote.pushkey('obsolete', key, '', data))
775 if [r for r in rslts if not r]:
775 if [r for r in rslts if not r]:
776 msg = _('failed to push some obsolete markers!\n')
776 msg = _('failed to push some obsolete markers!\n')
777 repo.ui.warn(msg)
777 repo.ui.warn(msg)
778
778
779 def _pushbookmark(pushop):
779 def _pushbookmark(pushop):
780 """Update bookmark position on remote"""
780 """Update bookmark position on remote"""
781 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
781 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
782 return
782 return
783 pushop.stepsdone.add('bookmarks')
783 pushop.stepsdone.add('bookmarks')
784 ui = pushop.ui
784 ui = pushop.ui
785 remote = pushop.remote
785 remote = pushop.remote
786
786
787 for b, old, new in pushop.outbookmarks:
787 for b, old, new in pushop.outbookmarks:
788 action = 'update'
788 action = 'update'
789 if not old:
789 if not old:
790 action = 'export'
790 action = 'export'
791 elif not new:
791 elif not new:
792 action = 'delete'
792 action = 'delete'
793 if remote.pushkey('bookmarks', b, old, new):
793 if remote.pushkey('bookmarks', b, old, new):
794 ui.status(bookmsgmap[action][0] % b)
794 ui.status(bookmsgmap[action][0] % b)
795 else:
795 else:
796 ui.warn(bookmsgmap[action][1] % b)
796 ui.warn(bookmsgmap[action][1] % b)
797 # discovery can have set the value form invalid entry
797 # discovery can have set the value form invalid entry
798 if pushop.bkresult is not None:
798 if pushop.bkresult is not None:
799 pushop.bkresult = 1
799 pushop.bkresult = 1
800
800
801 class pulloperation(object):
801 class pulloperation(object):
802 """A object that represent a single pull operation
802 """A object that represent a single pull operation
803
803
804 It purpose is to carry pull related state and very common operation.
804 It purpose is to carry pull related state and very common operation.
805
805
806 A new should be created at the beginning of each pull and discarded
806 A new should be created at the beginning of each pull and discarded
807 afterward.
807 afterward.
808 """
808 """
809
809
810 def __init__(self, repo, remote, heads=None, force=False, bookmarks=()):
810 def __init__(self, repo, remote, heads=None, force=False, bookmarks=()):
811 # repo we pull into
811 # repo we pull into
812 self.repo = repo
812 self.repo = repo
813 # repo we pull from
813 # repo we pull from
814 self.remote = remote
814 self.remote = remote
815 # revision we try to pull (None is "all")
815 # revision we try to pull (None is "all")
816 self.heads = heads
816 self.heads = heads
817 # bookmark pulled explicitly
817 # bookmark pulled explicitly
818 self.explicitbookmarks = bookmarks
818 self.explicitbookmarks = bookmarks
819 # do we force pull?
819 # do we force pull?
820 self.force = force
820 self.force = force
821 # transaction manager
821 # transaction manager
822 self.trmanager = None
822 self.trmanager = None
823 # set of common changeset between local and remote before pull
823 # set of common changeset between local and remote before pull
824 self.common = None
824 self.common = None
825 # set of pulled head
825 # set of pulled head
826 self.rheads = None
826 self.rheads = None
827 # list of missing changeset to fetch remotely
827 # list of missing changeset to fetch remotely
828 self.fetch = None
828 self.fetch = None
829 # remote bookmarks data
829 # remote bookmarks data
830 self.remotebookmarks = None
830 self.remotebookmarks = None
831 # result of changegroup pulling (used as return code by pull)
831 # result of changegroup pulling (used as return code by pull)
832 self.cgresult = None
832 self.cgresult = None
833 # list of step already done
833 # list of step already done
834 self.stepsdone = set()
834 self.stepsdone = set()
835
835
836 @util.propertycache
836 @util.propertycache
837 def pulledsubset(self):
837 def pulledsubset(self):
838 """heads of the set of changeset target by the pull"""
838 """heads of the set of changeset target by the pull"""
839 # compute target subset
839 # compute target subset
840 if self.heads is None:
840 if self.heads is None:
841 # We pulled every thing possible
841 # We pulled every thing possible
842 # sync on everything common
842 # sync on everything common
843 c = set(self.common)
843 c = set(self.common)
844 ret = list(self.common)
844 ret = list(self.common)
845 for n in self.rheads:
845 for n in self.rheads:
846 if n not in c:
846 if n not in c:
847 ret.append(n)
847 ret.append(n)
848 return ret
848 return ret
849 else:
849 else:
850 # We pulled a specific subset
850 # We pulled a specific subset
851 # sync on this subset
851 # sync on this subset
852 return self.heads
852 return self.heads
853
853
854 def gettransaction(self):
854 def gettransaction(self):
855 # deprecated; talk to trmanager directly
855 # deprecated; talk to trmanager directly
856 return self.trmanager.transaction()
856 return self.trmanager.transaction()
857
857
858 class transactionmanager(object):
858 class transactionmanager(object):
859 """An object to manage the life cycle of a transaction
859 """An object to manage the life cycle of a transaction
860
860
861 It creates the transaction on demand and calls the appropriate hooks when
861 It creates the transaction on demand and calls the appropriate hooks when
862 closing the transaction."""
862 closing the transaction."""
863 def __init__(self, repo, source, url):
863 def __init__(self, repo, source, url):
864 self.repo = repo
864 self.repo = repo
865 self.source = source
865 self.source = source
866 self.url = url
866 self.url = url
867 self._tr = None
867 self._tr = None
868
868
869 def transaction(self):
869 def transaction(self):
870 """Return an open transaction object, constructing if necessary"""
870 """Return an open transaction object, constructing if necessary"""
871 if not self._tr:
871 if not self._tr:
872 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
872 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
873 self._tr = self.repo.transaction(trname)
873 self._tr = self.repo.transaction(trname)
874 self._tr.hookargs['source'] = self.source
874 self._tr.hookargs['source'] = self.source
875 self._tr.hookargs['url'] = self.url
875 self._tr.hookargs['url'] = self.url
876 return self._tr
876 return self._tr
877
877
878 def close(self):
878 def close(self):
879 """close transaction if created"""
879 """close transaction if created"""
880 if self._tr is not None:
880 if self._tr is not None:
881 self._tr.close()
881 self._tr.close()
882
882
883 def release(self):
883 def release(self):
884 """release transaction if created"""
884 """release transaction if created"""
885 if self._tr is not None:
885 if self._tr is not None:
886 self._tr.release()
886 self._tr.release()
887
887
888 def pull(repo, remote, heads=None, force=False, bookmarks=()):
888 def pull(repo, remote, heads=None, force=False, bookmarks=()):
889 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks)
889 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks)
890 if pullop.remote.local():
890 if pullop.remote.local():
891 missing = set(pullop.remote.requirements) - pullop.repo.supported
891 missing = set(pullop.remote.requirements) - pullop.repo.supported
892 if missing:
892 if missing:
893 msg = _("required features are not"
893 msg = _("required features are not"
894 " supported in the destination:"
894 " supported in the destination:"
895 " %s") % (', '.join(sorted(missing)))
895 " %s") % (', '.join(sorted(missing)))
896 raise util.Abort(msg)
896 raise util.Abort(msg)
897
897
898 pullop.remotebookmarks = remote.listkeys('bookmarks')
899 lock = pullop.repo.lock()
898 lock = pullop.repo.lock()
900 try:
899 try:
901 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
900 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
902 _pulldiscovery(pullop)
901 _pulldiscovery(pullop)
903 if _canusebundle2(pullop):
902 if _canusebundle2(pullop):
904 _pullbundle2(pullop)
903 _pullbundle2(pullop)
905 _pullchangeset(pullop)
904 _pullchangeset(pullop)
906 _pullphase(pullop)
905 _pullphase(pullop)
907 _pullbookmarks(pullop)
906 _pullbookmarks(pullop)
908 _pullobsolete(pullop)
907 _pullobsolete(pullop)
909 pullop.trmanager.close()
908 pullop.trmanager.close()
910 finally:
909 finally:
911 pullop.trmanager.release()
910 pullop.trmanager.release()
912 lock.release()
911 lock.release()
913
912
914 return pullop
913 return pullop
915
914
916 # list of steps to perform discovery before pull
915 # list of steps to perform discovery before pull
917 pulldiscoveryorder = []
916 pulldiscoveryorder = []
918
917
919 # Mapping between step name and function
918 # Mapping between step name and function
920 #
919 #
921 # This exists to help extensions wrap steps if necessary
920 # This exists to help extensions wrap steps if necessary
922 pulldiscoverymapping = {}
921 pulldiscoverymapping = {}
923
922
924 def pulldiscovery(stepname):
923 def pulldiscovery(stepname):
925 """decorator for function performing discovery before pull
924 """decorator for function performing discovery before pull
926
925
927 The function is added to the step -> function mapping and appended to the
926 The function is added to the step -> function mapping and appended to the
928 list of steps. Beware that decorated function will be added in order (this
927 list of steps. Beware that decorated function will be added in order (this
929 may matter).
928 may matter).
930
929
931 You can only use this decorator for a new step, if you want to wrap a step
930 You can only use this decorator for a new step, if you want to wrap a step
932 from an extension, change the pulldiscovery dictionary directly."""
931 from an extension, change the pulldiscovery dictionary directly."""
933 def dec(func):
932 def dec(func):
934 assert stepname not in pulldiscoverymapping
933 assert stepname not in pulldiscoverymapping
935 pulldiscoverymapping[stepname] = func
934 pulldiscoverymapping[stepname] = func
936 pulldiscoveryorder.append(stepname)
935 pulldiscoveryorder.append(stepname)
937 return func
936 return func
938 return dec
937 return dec
939
938
940 def _pulldiscovery(pullop):
939 def _pulldiscovery(pullop):
941 """Run all discovery steps"""
940 """Run all discovery steps"""
942 for stepname in pulldiscoveryorder:
941 for stepname in pulldiscoveryorder:
943 step = pulldiscoverymapping[stepname]
942 step = pulldiscoverymapping[stepname]
944 step(pullop)
943 step(pullop)
945
944
945 @pulldiscovery('b1:bookmarks')
946 def _pullbookmarkbundle1(pullop):
947 """fetch bookmark data in bundle1 case
948
949 If not using bundle2, we have to fetch bookmarks before changeset
950 discovery to reduce the chance and impact of race conditions."""
951 if not _canusebundle2(pullop): # all bundle2 server now support listkeys
952 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
953
954
946 @pulldiscovery('changegroup')
955 @pulldiscovery('changegroup')
947 def _pulldiscoverychangegroup(pullop):
956 def _pulldiscoverychangegroup(pullop):
948 """discovery phase for the pull
957 """discovery phase for the pull
949
958
950 Current handle changeset discovery only, will change handle all discovery
959 Current handle changeset discovery only, will change handle all discovery
951 at some point."""
960 at some point."""
952 tmp = discovery.findcommonincoming(pullop.repo,
961 tmp = discovery.findcommonincoming(pullop.repo,
953 pullop.remote,
962 pullop.remote,
954 heads=pullop.heads,
963 heads=pullop.heads,
955 force=pullop.force)
964 force=pullop.force)
956 common, fetch, rheads = tmp
965 common, fetch, rheads = tmp
957 nm = pullop.repo.unfiltered().changelog.nodemap
966 nm = pullop.repo.unfiltered().changelog.nodemap
958 if fetch and rheads:
967 if fetch and rheads:
959 # If a remote heads in filtered locally, lets drop it from the unknown
968 # If a remote heads in filtered locally, lets drop it from the unknown
960 # remote heads and put in back in common.
969 # remote heads and put in back in common.
961 #
970 #
962 # This is a hackish solution to catch most of "common but locally
971 # This is a hackish solution to catch most of "common but locally
963 # hidden situation". We do not performs discovery on unfiltered
972 # hidden situation". We do not performs discovery on unfiltered
964 # repository because it end up doing a pathological amount of round
973 # repository because it end up doing a pathological amount of round
965 # trip for w huge amount of changeset we do not care about.
974 # trip for w huge amount of changeset we do not care about.
966 #
975 #
967 # If a set of such "common but filtered" changeset exist on the server
976 # If a set of such "common but filtered" changeset exist on the server
968 # but are not including a remote heads, we'll not be able to detect it,
977 # but are not including a remote heads, we'll not be able to detect it,
969 scommon = set(common)
978 scommon = set(common)
970 filteredrheads = []
979 filteredrheads = []
971 for n in rheads:
980 for n in rheads:
972 if n in nm:
981 if n in nm:
973 if n not in scommon:
982 if n not in scommon:
974 common.append(n)
983 common.append(n)
975 else:
984 else:
976 filteredrheads.append(n)
985 filteredrheads.append(n)
977 if not filteredrheads:
986 if not filteredrheads:
978 fetch = []
987 fetch = []
979 rheads = filteredrheads
988 rheads = filteredrheads
980 pullop.common = common
989 pullop.common = common
981 pullop.fetch = fetch
990 pullop.fetch = fetch
982 pullop.rheads = rheads
991 pullop.rheads = rheads
983
992
984 def _pullbundle2(pullop):
993 def _pullbundle2(pullop):
985 """pull data using bundle2
994 """pull data using bundle2
986
995
987 For now, the only supported data are changegroup."""
996 For now, the only supported data are changegroup."""
988 remotecaps = bundle2.bundle2caps(pullop.remote)
997 remotecaps = bundle2.bundle2caps(pullop.remote)
989 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
998 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
990 # pulling changegroup
999 # pulling changegroup
991 pullop.stepsdone.add('changegroup')
1000 pullop.stepsdone.add('changegroup')
992
1001
993 kwargs['common'] = pullop.common
1002 kwargs['common'] = pullop.common
994 kwargs['heads'] = pullop.heads or pullop.rheads
1003 kwargs['heads'] = pullop.heads or pullop.rheads
995 kwargs['cg'] = pullop.fetch
1004 kwargs['cg'] = pullop.fetch
996 if 'listkeys' in remotecaps:
1005 if 'listkeys' in remotecaps:
997 kwargs['listkeys'] = ['phase', 'bookmarks']
1006 kwargs['listkeys'] = ['phase', 'bookmarks']
998 if not pullop.fetch:
1007 if not pullop.fetch:
999 pullop.repo.ui.status(_("no changes found\n"))
1008 pullop.repo.ui.status(_("no changes found\n"))
1000 pullop.cgresult = 0
1009 pullop.cgresult = 0
1001 else:
1010 else:
1002 if pullop.heads is None and list(pullop.common) == [nullid]:
1011 if pullop.heads is None and list(pullop.common) == [nullid]:
1003 pullop.repo.ui.status(_("requesting all changes\n"))
1012 pullop.repo.ui.status(_("requesting all changes\n"))
1004 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1013 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1005 remoteversions = bundle2.obsmarkersversion(remotecaps)
1014 remoteversions = bundle2.obsmarkersversion(remotecaps)
1006 if obsolete.commonversion(remoteversions) is not None:
1015 if obsolete.commonversion(remoteversions) is not None:
1007 kwargs['obsmarkers'] = True
1016 kwargs['obsmarkers'] = True
1008 pullop.stepsdone.add('obsmarkers')
1017 pullop.stepsdone.add('obsmarkers')
1009 _pullbundle2extraprepare(pullop, kwargs)
1018 _pullbundle2extraprepare(pullop, kwargs)
1010 bundle = pullop.remote.getbundle('pull', **kwargs)
1019 bundle = pullop.remote.getbundle('pull', **kwargs)
1011 try:
1020 try:
1012 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1021 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1013 except error.BundleValueError, exc:
1022 except error.BundleValueError, exc:
1014 raise util.Abort('missing support for %s' % exc)
1023 raise util.Abort('missing support for %s' % exc)
1015
1024
1016 if pullop.fetch:
1025 if pullop.fetch:
1017 results = [cg['return'] for cg in op.records['changegroup']]
1026 results = [cg['return'] for cg in op.records['changegroup']]
1018 pullop.cgresult = changegroup.combineresults(results)
1027 pullop.cgresult = changegroup.combineresults(results)
1019
1028
1020 # processing phases change
1029 # processing phases change
1021 for namespace, value in op.records['listkeys']:
1030 for namespace, value in op.records['listkeys']:
1022 if namespace == 'phases':
1031 if namespace == 'phases':
1023 _pullapplyphases(pullop, value)
1032 _pullapplyphases(pullop, value)
1024
1033
1025 # processing bookmark update
1034 # processing bookmark update
1026 for namespace, value in op.records['listkeys']:
1035 for namespace, value in op.records['listkeys']:
1027 if namespace == 'bookmarks':
1036 if namespace == 'bookmarks':
1028 pullop.remotebookmarks = value
1037 pullop.remotebookmarks = value
1029 _pullbookmarks(pullop)
1038 _pullbookmarks(pullop)
1030
1039
1031 def _pullbundle2extraprepare(pullop, kwargs):
1040 def _pullbundle2extraprepare(pullop, kwargs):
1032 """hook function so that extensions can extend the getbundle call"""
1041 """hook function so that extensions can extend the getbundle call"""
1033 pass
1042 pass
1034
1043
1035 def _pullchangeset(pullop):
1044 def _pullchangeset(pullop):
1036 """pull changeset from unbundle into the local repo"""
1045 """pull changeset from unbundle into the local repo"""
1037 # We delay the open of the transaction as late as possible so we
1046 # We delay the open of the transaction as late as possible so we
1038 # don't open transaction for nothing or you break future useful
1047 # don't open transaction for nothing or you break future useful
1039 # rollback call
1048 # rollback call
1040 if 'changegroup' in pullop.stepsdone:
1049 if 'changegroup' in pullop.stepsdone:
1041 return
1050 return
1042 pullop.stepsdone.add('changegroup')
1051 pullop.stepsdone.add('changegroup')
1043 if not pullop.fetch:
1052 if not pullop.fetch:
1044 pullop.repo.ui.status(_("no changes found\n"))
1053 pullop.repo.ui.status(_("no changes found\n"))
1045 pullop.cgresult = 0
1054 pullop.cgresult = 0
1046 return
1055 return
1047 pullop.gettransaction()
1056 pullop.gettransaction()
1048 if pullop.heads is None and list(pullop.common) == [nullid]:
1057 if pullop.heads is None and list(pullop.common) == [nullid]:
1049 pullop.repo.ui.status(_("requesting all changes\n"))
1058 pullop.repo.ui.status(_("requesting all changes\n"))
1050 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1059 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1051 # issue1320, avoid a race if remote changed after discovery
1060 # issue1320, avoid a race if remote changed after discovery
1052 pullop.heads = pullop.rheads
1061 pullop.heads = pullop.rheads
1053
1062
1054 if pullop.remote.capable('getbundle'):
1063 if pullop.remote.capable('getbundle'):
1055 # TODO: get bundlecaps from remote
1064 # TODO: get bundlecaps from remote
1056 cg = pullop.remote.getbundle('pull', common=pullop.common,
1065 cg = pullop.remote.getbundle('pull', common=pullop.common,
1057 heads=pullop.heads or pullop.rheads)
1066 heads=pullop.heads or pullop.rheads)
1058 elif pullop.heads is None:
1067 elif pullop.heads is None:
1059 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1068 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1060 elif not pullop.remote.capable('changegroupsubset'):
1069 elif not pullop.remote.capable('changegroupsubset'):
1061 raise util.Abort(_("partial pull cannot be done because "
1070 raise util.Abort(_("partial pull cannot be done because "
1062 "other repository doesn't support "
1071 "other repository doesn't support "
1063 "changegroupsubset."))
1072 "changegroupsubset."))
1064 else:
1073 else:
1065 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1074 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1066 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1075 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1067 pullop.remote.url())
1076 pullop.remote.url())
1068
1077
1069 def _pullphase(pullop):
1078 def _pullphase(pullop):
1070 # Get remote phases data from remote
1079 # Get remote phases data from remote
1071 if 'phases' in pullop.stepsdone:
1080 if 'phases' in pullop.stepsdone:
1072 return
1081 return
1073 remotephases = pullop.remote.listkeys('phases')
1082 remotephases = pullop.remote.listkeys('phases')
1074 _pullapplyphases(pullop, remotephases)
1083 _pullapplyphases(pullop, remotephases)
1075
1084
1076 def _pullapplyphases(pullop, remotephases):
1085 def _pullapplyphases(pullop, remotephases):
1077 """apply phase movement from observed remote state"""
1086 """apply phase movement from observed remote state"""
1078 if 'phases' in pullop.stepsdone:
1087 if 'phases' in pullop.stepsdone:
1079 return
1088 return
1080 pullop.stepsdone.add('phases')
1089 pullop.stepsdone.add('phases')
1081 publishing = bool(remotephases.get('publishing', False))
1090 publishing = bool(remotephases.get('publishing', False))
1082 if remotephases and not publishing:
1091 if remotephases and not publishing:
1083 # remote is new and unpublishing
1092 # remote is new and unpublishing
1084 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1093 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1085 pullop.pulledsubset,
1094 pullop.pulledsubset,
1086 remotephases)
1095 remotephases)
1087 dheads = pullop.pulledsubset
1096 dheads = pullop.pulledsubset
1088 else:
1097 else:
1089 # Remote is old or publishing all common changesets
1098 # Remote is old or publishing all common changesets
1090 # should be seen as public
1099 # should be seen as public
1091 pheads = pullop.pulledsubset
1100 pheads = pullop.pulledsubset
1092 dheads = []
1101 dheads = []
1093 unfi = pullop.repo.unfiltered()
1102 unfi = pullop.repo.unfiltered()
1094 phase = unfi._phasecache.phase
1103 phase = unfi._phasecache.phase
1095 rev = unfi.changelog.nodemap.get
1104 rev = unfi.changelog.nodemap.get
1096 public = phases.public
1105 public = phases.public
1097 draft = phases.draft
1106 draft = phases.draft
1098
1107
1099 # exclude changesets already public locally and update the others
1108 # exclude changesets already public locally and update the others
1100 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1109 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1101 if pheads:
1110 if pheads:
1102 tr = pullop.gettransaction()
1111 tr = pullop.gettransaction()
1103 phases.advanceboundary(pullop.repo, tr, public, pheads)
1112 phases.advanceboundary(pullop.repo, tr, public, pheads)
1104
1113
1105 # exclude changesets already draft locally and update the others
1114 # exclude changesets already draft locally and update the others
1106 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1115 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1107 if dheads:
1116 if dheads:
1108 tr = pullop.gettransaction()
1117 tr = pullop.gettransaction()
1109 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1118 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1110
1119
1111 def _pullbookmarks(pullop):
1120 def _pullbookmarks(pullop):
1112 """process the remote bookmark information to update the local one"""
1121 """process the remote bookmark information to update the local one"""
1113 if 'bookmarks' in pullop.stepsdone:
1122 if 'bookmarks' in pullop.stepsdone:
1114 return
1123 return
1115 pullop.stepsdone.add('bookmarks')
1124 pullop.stepsdone.add('bookmarks')
1116 repo = pullop.repo
1125 repo = pullop.repo
1117 remotebookmarks = pullop.remotebookmarks
1126 remotebookmarks = pullop.remotebookmarks
1118 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1127 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1119 pullop.remote.url(),
1128 pullop.remote.url(),
1120 pullop.gettransaction,
1129 pullop.gettransaction,
1121 explicit=pullop.explicitbookmarks)
1130 explicit=pullop.explicitbookmarks)
1122
1131
1123 def _pullobsolete(pullop):
1132 def _pullobsolete(pullop):
1124 """utility function to pull obsolete markers from a remote
1133 """utility function to pull obsolete markers from a remote
1125
1134
1126 The `gettransaction` is function that return the pull transaction, creating
1135 The `gettransaction` is function that return the pull transaction, creating
1127 one if necessary. We return the transaction to inform the calling code that
1136 one if necessary. We return the transaction to inform the calling code that
1128 a new transaction have been created (when applicable).
1137 a new transaction have been created (when applicable).
1129
1138
1130 Exists mostly to allow overriding for experimentation purpose"""
1139 Exists mostly to allow overriding for experimentation purpose"""
1131 if 'obsmarkers' in pullop.stepsdone:
1140 if 'obsmarkers' in pullop.stepsdone:
1132 return
1141 return
1133 pullop.stepsdone.add('obsmarkers')
1142 pullop.stepsdone.add('obsmarkers')
1134 tr = None
1143 tr = None
1135 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1144 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1136 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1145 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1137 remoteobs = pullop.remote.listkeys('obsolete')
1146 remoteobs = pullop.remote.listkeys('obsolete')
1138 if 'dump0' in remoteobs:
1147 if 'dump0' in remoteobs:
1139 tr = pullop.gettransaction()
1148 tr = pullop.gettransaction()
1140 for key in sorted(remoteobs, reverse=True):
1149 for key in sorted(remoteobs, reverse=True):
1141 if key.startswith('dump'):
1150 if key.startswith('dump'):
1142 data = base85.b85decode(remoteobs[key])
1151 data = base85.b85decode(remoteobs[key])
1143 pullop.repo.obsstore.mergemarkers(tr, data)
1152 pullop.repo.obsstore.mergemarkers(tr, data)
1144 pullop.repo.invalidatevolatilesets()
1153 pullop.repo.invalidatevolatilesets()
1145 return tr
1154 return tr
1146
1155
1147 def caps20to10(repo):
1156 def caps20to10(repo):
1148 """return a set with appropriate options to use bundle20 during getbundle"""
1157 """return a set with appropriate options to use bundle20 during getbundle"""
1149 caps = set(['HG20'])
1158 caps = set(['HG20'])
1150 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1159 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1151 caps.add('bundle2=' + urllib.quote(capsblob))
1160 caps.add('bundle2=' + urllib.quote(capsblob))
1152 return caps
1161 return caps
1153
1162
1154 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1163 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1155 getbundle2partsorder = []
1164 getbundle2partsorder = []
1156
1165
1157 # Mapping between step name and function
1166 # Mapping between step name and function
1158 #
1167 #
1159 # This exists to help extensions wrap steps if necessary
1168 # This exists to help extensions wrap steps if necessary
1160 getbundle2partsmapping = {}
1169 getbundle2partsmapping = {}
1161
1170
1162 def getbundle2partsgenerator(stepname, idx=None):
1171 def getbundle2partsgenerator(stepname, idx=None):
1163 """decorator for function generating bundle2 part for getbundle
1172 """decorator for function generating bundle2 part for getbundle
1164
1173
1165 The function is added to the step -> function mapping and appended to the
1174 The function is added to the step -> function mapping and appended to the
1166 list of steps. Beware that decorated functions will be added in order
1175 list of steps. Beware that decorated functions will be added in order
1167 (this may matter).
1176 (this may matter).
1168
1177
1169 You can only use this decorator for new steps, if you want to wrap a step
1178 You can only use this decorator for new steps, if you want to wrap a step
1170 from an extension, attack the getbundle2partsmapping dictionary directly."""
1179 from an extension, attack the getbundle2partsmapping dictionary directly."""
1171 def dec(func):
1180 def dec(func):
1172 assert stepname not in getbundle2partsmapping
1181 assert stepname not in getbundle2partsmapping
1173 getbundle2partsmapping[stepname] = func
1182 getbundle2partsmapping[stepname] = func
1174 if idx is None:
1183 if idx is None:
1175 getbundle2partsorder.append(stepname)
1184 getbundle2partsorder.append(stepname)
1176 else:
1185 else:
1177 getbundle2partsorder.insert(idx, stepname)
1186 getbundle2partsorder.insert(idx, stepname)
1178 return func
1187 return func
1179 return dec
1188 return dec
1180
1189
1181 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1190 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1182 **kwargs):
1191 **kwargs):
1183 """return a full bundle (with potentially multiple kind of parts)
1192 """return a full bundle (with potentially multiple kind of parts)
1184
1193
1185 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1194 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1186 passed. For now, the bundle can contain only changegroup, but this will
1195 passed. For now, the bundle can contain only changegroup, but this will
1187 changes when more part type will be available for bundle2.
1196 changes when more part type will be available for bundle2.
1188
1197
1189 This is different from changegroup.getchangegroup that only returns an HG10
1198 This is different from changegroup.getchangegroup that only returns an HG10
1190 changegroup bundle. They may eventually get reunited in the future when we
1199 changegroup bundle. They may eventually get reunited in the future when we
1191 have a clearer idea of the API we what to query different data.
1200 have a clearer idea of the API we what to query different data.
1192
1201
1193 The implementation is at a very early stage and will get massive rework
1202 The implementation is at a very early stage and will get massive rework
1194 when the API of bundle is refined.
1203 when the API of bundle is refined.
1195 """
1204 """
1196 # bundle10 case
1205 # bundle10 case
1197 usebundle2 = False
1206 usebundle2 = False
1198 if bundlecaps is not None:
1207 if bundlecaps is not None:
1199 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1208 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1200 if not usebundle2:
1209 if not usebundle2:
1201 if bundlecaps and not kwargs.get('cg', True):
1210 if bundlecaps and not kwargs.get('cg', True):
1202 raise ValueError(_('request for bundle10 must include changegroup'))
1211 raise ValueError(_('request for bundle10 must include changegroup'))
1203
1212
1204 if kwargs:
1213 if kwargs:
1205 raise ValueError(_('unsupported getbundle arguments: %s')
1214 raise ValueError(_('unsupported getbundle arguments: %s')
1206 % ', '.join(sorted(kwargs.keys())))
1215 % ', '.join(sorted(kwargs.keys())))
1207 return changegroup.getchangegroup(repo, source, heads=heads,
1216 return changegroup.getchangegroup(repo, source, heads=heads,
1208 common=common, bundlecaps=bundlecaps)
1217 common=common, bundlecaps=bundlecaps)
1209
1218
1210 # bundle20 case
1219 # bundle20 case
1211 b2caps = {}
1220 b2caps = {}
1212 for bcaps in bundlecaps:
1221 for bcaps in bundlecaps:
1213 if bcaps.startswith('bundle2='):
1222 if bcaps.startswith('bundle2='):
1214 blob = urllib.unquote(bcaps[len('bundle2='):])
1223 blob = urllib.unquote(bcaps[len('bundle2='):])
1215 b2caps.update(bundle2.decodecaps(blob))
1224 b2caps.update(bundle2.decodecaps(blob))
1216 bundler = bundle2.bundle20(repo.ui, b2caps)
1225 bundler = bundle2.bundle20(repo.ui, b2caps)
1217
1226
1218 kwargs['heads'] = heads
1227 kwargs['heads'] = heads
1219 kwargs['common'] = common
1228 kwargs['common'] = common
1220
1229
1221 for name in getbundle2partsorder:
1230 for name in getbundle2partsorder:
1222 func = getbundle2partsmapping[name]
1231 func = getbundle2partsmapping[name]
1223 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1232 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1224 **kwargs)
1233 **kwargs)
1225
1234
1226 return util.chunkbuffer(bundler.getchunks())
1235 return util.chunkbuffer(bundler.getchunks())
1227
1236
1228 @getbundle2partsgenerator('changegroup')
1237 @getbundle2partsgenerator('changegroup')
1229 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1238 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1230 b2caps=None, heads=None, common=None, **kwargs):
1239 b2caps=None, heads=None, common=None, **kwargs):
1231 """add a changegroup part to the requested bundle"""
1240 """add a changegroup part to the requested bundle"""
1232 cg = None
1241 cg = None
1233 if kwargs.get('cg', True):
1242 if kwargs.get('cg', True):
1234 # build changegroup bundle here.
1243 # build changegroup bundle here.
1235 version = None
1244 version = None
1236 cgversions = b2caps.get('changegroup')
1245 cgversions = b2caps.get('changegroup')
1237 if not cgversions: # 3.1 and 3.2 ship with an empty value
1246 if not cgversions: # 3.1 and 3.2 ship with an empty value
1238 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1247 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1239 common=common,
1248 common=common,
1240 bundlecaps=bundlecaps)
1249 bundlecaps=bundlecaps)
1241 else:
1250 else:
1242 cgversions = [v for v in cgversions if v in changegroup.packermap]
1251 cgversions = [v for v in cgversions if v in changegroup.packermap]
1243 if not cgversions:
1252 if not cgversions:
1244 raise ValueError(_('no common changegroup version'))
1253 raise ValueError(_('no common changegroup version'))
1245 version = max(cgversions)
1254 version = max(cgversions)
1246 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1255 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1247 common=common,
1256 common=common,
1248 bundlecaps=bundlecaps,
1257 bundlecaps=bundlecaps,
1249 version=version)
1258 version=version)
1250
1259
1251 if cg:
1260 if cg:
1252 part = bundler.newpart('changegroup', data=cg)
1261 part = bundler.newpart('changegroup', data=cg)
1253 if version is not None:
1262 if version is not None:
1254 part.addparam('version', version)
1263 part.addparam('version', version)
1255
1264
1256 @getbundle2partsgenerator('listkeys')
1265 @getbundle2partsgenerator('listkeys')
1257 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1266 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1258 b2caps=None, **kwargs):
1267 b2caps=None, **kwargs):
1259 """add parts containing listkeys namespaces to the requested bundle"""
1268 """add parts containing listkeys namespaces to the requested bundle"""
1260 listkeys = kwargs.get('listkeys', ())
1269 listkeys = kwargs.get('listkeys', ())
1261 for namespace in listkeys:
1270 for namespace in listkeys:
1262 part = bundler.newpart('listkeys')
1271 part = bundler.newpart('listkeys')
1263 part.addparam('namespace', namespace)
1272 part.addparam('namespace', namespace)
1264 keys = repo.listkeys(namespace).items()
1273 keys = repo.listkeys(namespace).items()
1265 part.data = pushkey.encodekeys(keys)
1274 part.data = pushkey.encodekeys(keys)
1266
1275
1267 @getbundle2partsgenerator('obsmarkers')
1276 @getbundle2partsgenerator('obsmarkers')
1268 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1277 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1269 b2caps=None, heads=None, **kwargs):
1278 b2caps=None, heads=None, **kwargs):
1270 """add an obsolescence markers part to the requested bundle"""
1279 """add an obsolescence markers part to the requested bundle"""
1271 if kwargs.get('obsmarkers', False):
1280 if kwargs.get('obsmarkers', False):
1272 if heads is None:
1281 if heads is None:
1273 heads = repo.heads()
1282 heads = repo.heads()
1274 subset = [c.node() for c in repo.set('::%ln', heads)]
1283 subset = [c.node() for c in repo.set('::%ln', heads)]
1275 markers = repo.obsstore.relevantmarkers(subset)
1284 markers = repo.obsstore.relevantmarkers(subset)
1276 markers = sorted(markers)
1285 markers = sorted(markers)
1277 buildobsmarkerspart(bundler, markers)
1286 buildobsmarkerspart(bundler, markers)
1278
1287
1279 def check_heads(repo, their_heads, context):
1288 def check_heads(repo, their_heads, context):
1280 """check if the heads of a repo have been modified
1289 """check if the heads of a repo have been modified
1281
1290
1282 Used by peer for unbundling.
1291 Used by peer for unbundling.
1283 """
1292 """
1284 heads = repo.heads()
1293 heads = repo.heads()
1285 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1294 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1286 if not (their_heads == ['force'] or their_heads == heads or
1295 if not (their_heads == ['force'] or their_heads == heads or
1287 their_heads == ['hashed', heads_hash]):
1296 their_heads == ['hashed', heads_hash]):
1288 # someone else committed/pushed/unbundled while we
1297 # someone else committed/pushed/unbundled while we
1289 # were transferring data
1298 # were transferring data
1290 raise error.PushRaced('repository changed while %s - '
1299 raise error.PushRaced('repository changed while %s - '
1291 'please try again' % context)
1300 'please try again' % context)
1292
1301
1293 def unbundle(repo, cg, heads, source, url):
1302 def unbundle(repo, cg, heads, source, url):
1294 """Apply a bundle to a repo.
1303 """Apply a bundle to a repo.
1295
1304
1296 this function makes sure the repo is locked during the application and have
1305 this function makes sure the repo is locked during the application and have
1297 mechanism to check that no push race occurred between the creation of the
1306 mechanism to check that no push race occurred between the creation of the
1298 bundle and its application.
1307 bundle and its application.
1299
1308
1300 If the push was raced as PushRaced exception is raised."""
1309 If the push was raced as PushRaced exception is raised."""
1301 r = 0
1310 r = 0
1302 # need a transaction when processing a bundle2 stream
1311 # need a transaction when processing a bundle2 stream
1303 wlock = lock = tr = None
1312 wlock = lock = tr = None
1304 recordout = None
1313 recordout = None
1305 # quick fix for output mismatch with bundle2 in 3.4
1314 # quick fix for output mismatch with bundle2 in 3.4
1306 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1315 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1307 False)
1316 False)
1308 if url.startswith('remote:'):
1317 if url.startswith('remote:'):
1309 captureoutput = True
1318 captureoutput = True
1310 try:
1319 try:
1311 check_heads(repo, heads, 'uploading changes')
1320 check_heads(repo, heads, 'uploading changes')
1312 # push can proceed
1321 # push can proceed
1313 if util.safehasattr(cg, 'params'):
1322 if util.safehasattr(cg, 'params'):
1314 r = None
1323 r = None
1315 try:
1324 try:
1316 wlock = repo.wlock()
1325 wlock = repo.wlock()
1317 lock = repo.lock()
1326 lock = repo.lock()
1318 tr = repo.transaction(source)
1327 tr = repo.transaction(source)
1319 tr.hookargs['source'] = source
1328 tr.hookargs['source'] = source
1320 tr.hookargs['url'] = url
1329 tr.hookargs['url'] = url
1321 tr.hookargs['bundle2'] = '1'
1330 tr.hookargs['bundle2'] = '1'
1322 op = bundle2.bundleoperation(repo, lambda: tr,
1331 op = bundle2.bundleoperation(repo, lambda: tr,
1323 captureoutput=captureoutput)
1332 captureoutput=captureoutput)
1324 try:
1333 try:
1325 r = bundle2.processbundle(repo, cg, op=op)
1334 r = bundle2.processbundle(repo, cg, op=op)
1326 finally:
1335 finally:
1327 r = op.reply
1336 r = op.reply
1328 if captureoutput and r is not None:
1337 if captureoutput and r is not None:
1329 repo.ui.pushbuffer(error=True, subproc=True)
1338 repo.ui.pushbuffer(error=True, subproc=True)
1330 def recordout(output):
1339 def recordout(output):
1331 r.newpart('output', data=output, mandatory=False)
1340 r.newpart('output', data=output, mandatory=False)
1332 tr.close()
1341 tr.close()
1333 except BaseException, exc:
1342 except BaseException, exc:
1334 exc.duringunbundle2 = True
1343 exc.duringunbundle2 = True
1335 if captureoutput and r is not None:
1344 if captureoutput and r is not None:
1336 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1345 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1337 def recordout(output):
1346 def recordout(output):
1338 part = bundle2.bundlepart('output', data=output,
1347 part = bundle2.bundlepart('output', data=output,
1339 mandatory=False)
1348 mandatory=False)
1340 parts.append(part)
1349 parts.append(part)
1341 raise
1350 raise
1342 else:
1351 else:
1343 lock = repo.lock()
1352 lock = repo.lock()
1344 r = changegroup.addchangegroup(repo, cg, source, url)
1353 r = changegroup.addchangegroup(repo, cg, source, url)
1345 finally:
1354 finally:
1346 lockmod.release(tr, lock, wlock)
1355 lockmod.release(tr, lock, wlock)
1347 if recordout is not None:
1356 if recordout is not None:
1348 recordout(repo.ui.popbuffer())
1357 recordout(repo.ui.popbuffer())
1349 return r
1358 return r
1350
1359
1351 # This is it's own function so extensions can override it.
1360 # This is it's own function so extensions can override it.
1352 def _walkstreamfiles(repo):
1361 def _walkstreamfiles(repo):
1353 return repo.store.walk()
1362 return repo.store.walk()
1354
1363
1355 def generatestreamclone(repo):
1364 def generatestreamclone(repo):
1356 """Emit content for a streaming clone.
1365 """Emit content for a streaming clone.
1357
1366
1358 This is a generator of raw chunks that constitute a streaming clone.
1367 This is a generator of raw chunks that constitute a streaming clone.
1359
1368
1360 The stream begins with a line of 2 space-delimited integers containing the
1369 The stream begins with a line of 2 space-delimited integers containing the
1361 number of entries and total bytes size.
1370 number of entries and total bytes size.
1362
1371
1363 Next, are N entries for each file being transferred. Each file entry starts
1372 Next, are N entries for each file being transferred. Each file entry starts
1364 as a line with the file name and integer size delimited by a null byte.
1373 as a line with the file name and integer size delimited by a null byte.
1365 The raw file data follows. Following the raw file data is the next file
1374 The raw file data follows. Following the raw file data is the next file
1366 entry, or EOF.
1375 entry, or EOF.
1367
1376
1368 When used on the wire protocol, an additional line indicating protocol
1377 When used on the wire protocol, an additional line indicating protocol
1369 success will be prepended to the stream. This function is not responsible
1378 success will be prepended to the stream. This function is not responsible
1370 for adding it.
1379 for adding it.
1371
1380
1372 This function will obtain a repository lock to ensure a consistent view of
1381 This function will obtain a repository lock to ensure a consistent view of
1373 the store is captured. It therefore may raise LockError.
1382 the store is captured. It therefore may raise LockError.
1374 """
1383 """
1375 entries = []
1384 entries = []
1376 total_bytes = 0
1385 total_bytes = 0
1377 # Get consistent snapshot of repo, lock during scan.
1386 # Get consistent snapshot of repo, lock during scan.
1378 lock = repo.lock()
1387 lock = repo.lock()
1379 try:
1388 try:
1380 repo.ui.debug('scanning\n')
1389 repo.ui.debug('scanning\n')
1381 for name, ename, size in _walkstreamfiles(repo):
1390 for name, ename, size in _walkstreamfiles(repo):
1382 if size:
1391 if size:
1383 entries.append((name, size))
1392 entries.append((name, size))
1384 total_bytes += size
1393 total_bytes += size
1385 finally:
1394 finally:
1386 lock.release()
1395 lock.release()
1387
1396
1388 repo.ui.debug('%d files, %d bytes to transfer\n' %
1397 repo.ui.debug('%d files, %d bytes to transfer\n' %
1389 (len(entries), total_bytes))
1398 (len(entries), total_bytes))
1390 yield '%d %d\n' % (len(entries), total_bytes)
1399 yield '%d %d\n' % (len(entries), total_bytes)
1391
1400
1392 sopener = repo.svfs
1401 sopener = repo.svfs
1393 oldaudit = sopener.mustaudit
1402 oldaudit = sopener.mustaudit
1394 debugflag = repo.ui.debugflag
1403 debugflag = repo.ui.debugflag
1395 sopener.mustaudit = False
1404 sopener.mustaudit = False
1396
1405
1397 try:
1406 try:
1398 for name, size in entries:
1407 for name, size in entries:
1399 if debugflag:
1408 if debugflag:
1400 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
1409 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
1401 # partially encode name over the wire for backwards compat
1410 # partially encode name over the wire for backwards compat
1402 yield '%s\0%d\n' % (store.encodedir(name), size)
1411 yield '%s\0%d\n' % (store.encodedir(name), size)
1403 if size <= 65536:
1412 if size <= 65536:
1404 fp = sopener(name)
1413 fp = sopener(name)
1405 try:
1414 try:
1406 data = fp.read(size)
1415 data = fp.read(size)
1407 finally:
1416 finally:
1408 fp.close()
1417 fp.close()
1409 yield data
1418 yield data
1410 else:
1419 else:
1411 for chunk in util.filechunkiter(sopener(name), limit=size):
1420 for chunk in util.filechunkiter(sopener(name), limit=size):
1412 yield chunk
1421 yield chunk
1413 finally:
1422 finally:
1414 sopener.mustaudit = oldaudit
1423 sopener.mustaudit = oldaudit
1415
1424
1416 def consumestreamclone(repo, fp):
1425 def consumestreamclone(repo, fp):
1417 """Apply the contents from a streaming clone file.
1426 """Apply the contents from a streaming clone file.
1418
1427
1419 This takes the output from "streamout" and applies it to the specified
1428 This takes the output from "streamout" and applies it to the specified
1420 repository.
1429 repository.
1421
1430
1422 Like "streamout," the status line added by the wire protocol is not handled
1431 Like "streamout," the status line added by the wire protocol is not handled
1423 by this function.
1432 by this function.
1424 """
1433 """
1425 lock = repo.lock()
1434 lock = repo.lock()
1426 try:
1435 try:
1427 repo.ui.status(_('streaming all changes\n'))
1436 repo.ui.status(_('streaming all changes\n'))
1428 l = fp.readline()
1437 l = fp.readline()
1429 try:
1438 try:
1430 total_files, total_bytes = map(int, l.split(' ', 1))
1439 total_files, total_bytes = map(int, l.split(' ', 1))
1431 except (ValueError, TypeError):
1440 except (ValueError, TypeError):
1432 raise error.ResponseError(
1441 raise error.ResponseError(
1433 _('unexpected response from remote server:'), l)
1442 _('unexpected response from remote server:'), l)
1434 repo.ui.status(_('%d files to transfer, %s of data\n') %
1443 repo.ui.status(_('%d files to transfer, %s of data\n') %
1435 (total_files, util.bytecount(total_bytes)))
1444 (total_files, util.bytecount(total_bytes)))
1436 handled_bytes = 0
1445 handled_bytes = 0
1437 repo.ui.progress(_('clone'), 0, total=total_bytes)
1446 repo.ui.progress(_('clone'), 0, total=total_bytes)
1438 start = time.time()
1447 start = time.time()
1439
1448
1440 tr = repo.transaction(_('clone'))
1449 tr = repo.transaction(_('clone'))
1441 try:
1450 try:
1442 for i in xrange(total_files):
1451 for i in xrange(total_files):
1443 # XXX doesn't support '\n' or '\r' in filenames
1452 # XXX doesn't support '\n' or '\r' in filenames
1444 l = fp.readline()
1453 l = fp.readline()
1445 try:
1454 try:
1446 name, size = l.split('\0', 1)
1455 name, size = l.split('\0', 1)
1447 size = int(size)
1456 size = int(size)
1448 except (ValueError, TypeError):
1457 except (ValueError, TypeError):
1449 raise error.ResponseError(
1458 raise error.ResponseError(
1450 _('unexpected response from remote server:'), l)
1459 _('unexpected response from remote server:'), l)
1451 if repo.ui.debugflag:
1460 if repo.ui.debugflag:
1452 repo.ui.debug('adding %s (%s)\n' %
1461 repo.ui.debug('adding %s (%s)\n' %
1453 (name, util.bytecount(size)))
1462 (name, util.bytecount(size)))
1454 # for backwards compat, name was partially encoded
1463 # for backwards compat, name was partially encoded
1455 ofp = repo.svfs(store.decodedir(name), 'w')
1464 ofp = repo.svfs(store.decodedir(name), 'w')
1456 for chunk in util.filechunkiter(fp, limit=size):
1465 for chunk in util.filechunkiter(fp, limit=size):
1457 handled_bytes += len(chunk)
1466 handled_bytes += len(chunk)
1458 repo.ui.progress(_('clone'), handled_bytes,
1467 repo.ui.progress(_('clone'), handled_bytes,
1459 total=total_bytes)
1468 total=total_bytes)
1460 ofp.write(chunk)
1469 ofp.write(chunk)
1461 ofp.close()
1470 ofp.close()
1462 tr.close()
1471 tr.close()
1463 finally:
1472 finally:
1464 tr.release()
1473 tr.release()
1465
1474
1466 # Writing straight to files circumvented the inmemory caches
1475 # Writing straight to files circumvented the inmemory caches
1467 repo.invalidate()
1476 repo.invalidate()
1468
1477
1469 elapsed = time.time() - start
1478 elapsed = time.time() - start
1470 if elapsed <= 0:
1479 if elapsed <= 0:
1471 elapsed = 0.001
1480 elapsed = 0.001
1472 repo.ui.progress(_('clone'), None)
1481 repo.ui.progress(_('clone'), None)
1473 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1482 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1474 (util.bytecount(total_bytes), elapsed,
1483 (util.bytecount(total_bytes), elapsed,
1475 util.bytecount(total_bytes / elapsed)))
1484 util.bytecount(total_bytes / elapsed)))
1476 finally:
1485 finally:
1477 lock.release()
1486 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now