##// END OF EJS Templates
pull: skip pulling remote bookmarks with bundle2 if a value already exists...
Pierre-Yves David -
r25444:1d1fd5d4 default
parent child Browse files
Show More
@@ -1,1532 +1,1539 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import time
8 import time
9 from i18n import _
9 from i18n import _
10 from node import hex, nullid
10 from node import hex, nullid
11 import errno, urllib
11 import errno, urllib
12 import util, scmutil, changegroup, base85, error, store
12 import util, scmutil, changegroup, base85, error, store
13 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
14 import lock as lockmod
14 import lock as lockmod
15 import tags
15 import tags
16
16
17 def readbundle(ui, fh, fname, vfs=None):
17 def readbundle(ui, fh, fname, vfs=None):
18 header = changegroup.readexactly(fh, 4)
18 header = changegroup.readexactly(fh, 4)
19
19
20 alg = None
20 alg = None
21 if not fname:
21 if not fname:
22 fname = "stream"
22 fname = "stream"
23 if not header.startswith('HG') and header.startswith('\0'):
23 if not header.startswith('HG') and header.startswith('\0'):
24 fh = changegroup.headerlessfixup(fh, header)
24 fh = changegroup.headerlessfixup(fh, header)
25 header = "HG10"
25 header = "HG10"
26 alg = 'UN'
26 alg = 'UN'
27 elif vfs:
27 elif vfs:
28 fname = vfs.join(fname)
28 fname = vfs.join(fname)
29
29
30 magic, version = header[0:2], header[2:4]
30 magic, version = header[0:2], header[2:4]
31
31
32 if magic != 'HG':
32 if magic != 'HG':
33 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
33 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
34 if version == '10':
34 if version == '10':
35 if alg is None:
35 if alg is None:
36 alg = changegroup.readexactly(fh, 2)
36 alg = changegroup.readexactly(fh, 2)
37 return changegroup.cg1unpacker(fh, alg)
37 return changegroup.cg1unpacker(fh, alg)
38 elif version.startswith('2'):
38 elif version.startswith('2'):
39 return bundle2.getunbundler(ui, fh, header=magic + version)
39 return bundle2.getunbundler(ui, fh, header=magic + version)
40 else:
40 else:
41 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
41 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
42
42
43 def buildobsmarkerspart(bundler, markers):
43 def buildobsmarkerspart(bundler, markers):
44 """add an obsmarker part to the bundler with <markers>
44 """add an obsmarker part to the bundler with <markers>
45
45
46 No part is created if markers is empty.
46 No part is created if markers is empty.
47 Raises ValueError if the bundler doesn't support any known obsmarker format.
47 Raises ValueError if the bundler doesn't support any known obsmarker format.
48 """
48 """
49 if markers:
49 if markers:
50 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
50 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
51 version = obsolete.commonversion(remoteversions)
51 version = obsolete.commonversion(remoteversions)
52 if version is None:
52 if version is None:
53 raise ValueError('bundler do not support common obsmarker format')
53 raise ValueError('bundler do not support common obsmarker format')
54 stream = obsolete.encodemarkers(markers, True, version=version)
54 stream = obsolete.encodemarkers(markers, True, version=version)
55 return bundler.newpart('obsmarkers', data=stream)
55 return bundler.newpart('obsmarkers', data=stream)
56 return None
56 return None
57
57
58 def _canusebundle2(op):
58 def _canusebundle2(op):
59 """return true if a pull/push can use bundle2
59 """return true if a pull/push can use bundle2
60
60
61 Feel free to nuke this function when we drop the experimental option"""
61 Feel free to nuke this function when we drop the experimental option"""
62 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
62 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
63 and op.remote.capable('bundle2'))
63 and op.remote.capable('bundle2'))
64
64
65
65
66 class pushoperation(object):
66 class pushoperation(object):
67 """A object that represent a single push operation
67 """A object that represent a single push operation
68
68
69 It purpose is to carry push related state and very common operation.
69 It purpose is to carry push related state and very common operation.
70
70
71 A new should be created at the beginning of each push and discarded
71 A new should be created at the beginning of each push and discarded
72 afterward.
72 afterward.
73 """
73 """
74
74
75 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
75 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
76 bookmarks=()):
76 bookmarks=()):
77 # repo we push from
77 # repo we push from
78 self.repo = repo
78 self.repo = repo
79 self.ui = repo.ui
79 self.ui = repo.ui
80 # repo we push to
80 # repo we push to
81 self.remote = remote
81 self.remote = remote
82 # force option provided
82 # force option provided
83 self.force = force
83 self.force = force
84 # revs to be pushed (None is "all")
84 # revs to be pushed (None is "all")
85 self.revs = revs
85 self.revs = revs
86 # bookmark explicitly pushed
86 # bookmark explicitly pushed
87 self.bookmarks = bookmarks
87 self.bookmarks = bookmarks
88 # allow push of new branch
88 # allow push of new branch
89 self.newbranch = newbranch
89 self.newbranch = newbranch
90 # did a local lock get acquired?
90 # did a local lock get acquired?
91 self.locallocked = None
91 self.locallocked = None
92 # step already performed
92 # step already performed
93 # (used to check what steps have been already performed through bundle2)
93 # (used to check what steps have been already performed through bundle2)
94 self.stepsdone = set()
94 self.stepsdone = set()
95 # Integer version of the changegroup push result
95 # Integer version of the changegroup push result
96 # - None means nothing to push
96 # - None means nothing to push
97 # - 0 means HTTP error
97 # - 0 means HTTP error
98 # - 1 means we pushed and remote head count is unchanged *or*
98 # - 1 means we pushed and remote head count is unchanged *or*
99 # we have outgoing changesets but refused to push
99 # we have outgoing changesets but refused to push
100 # - other values as described by addchangegroup()
100 # - other values as described by addchangegroup()
101 self.cgresult = None
101 self.cgresult = None
102 # Boolean value for the bookmark push
102 # Boolean value for the bookmark push
103 self.bkresult = None
103 self.bkresult = None
104 # discover.outgoing object (contains common and outgoing data)
104 # discover.outgoing object (contains common and outgoing data)
105 self.outgoing = None
105 self.outgoing = None
106 # all remote heads before the push
106 # all remote heads before the push
107 self.remoteheads = None
107 self.remoteheads = None
108 # testable as a boolean indicating if any nodes are missing locally.
108 # testable as a boolean indicating if any nodes are missing locally.
109 self.incoming = None
109 self.incoming = None
110 # phases changes that must be pushed along side the changesets
110 # phases changes that must be pushed along side the changesets
111 self.outdatedphases = None
111 self.outdatedphases = None
112 # phases changes that must be pushed if changeset push fails
112 # phases changes that must be pushed if changeset push fails
113 self.fallbackoutdatedphases = None
113 self.fallbackoutdatedphases = None
114 # outgoing obsmarkers
114 # outgoing obsmarkers
115 self.outobsmarkers = set()
115 self.outobsmarkers = set()
116 # outgoing bookmarks
116 # outgoing bookmarks
117 self.outbookmarks = []
117 self.outbookmarks = []
118 # transaction manager
118 # transaction manager
119 self.trmanager = None
119 self.trmanager = None
120
120
121 @util.propertycache
121 @util.propertycache
122 def futureheads(self):
122 def futureheads(self):
123 """future remote heads if the changeset push succeeds"""
123 """future remote heads if the changeset push succeeds"""
124 return self.outgoing.missingheads
124 return self.outgoing.missingheads
125
125
126 @util.propertycache
126 @util.propertycache
127 def fallbackheads(self):
127 def fallbackheads(self):
128 """future remote heads if the changeset push fails"""
128 """future remote heads if the changeset push fails"""
129 if self.revs is None:
129 if self.revs is None:
130 # not target to push, all common are relevant
130 # not target to push, all common are relevant
131 return self.outgoing.commonheads
131 return self.outgoing.commonheads
132 unfi = self.repo.unfiltered()
132 unfi = self.repo.unfiltered()
133 # I want cheads = heads(::missingheads and ::commonheads)
133 # I want cheads = heads(::missingheads and ::commonheads)
134 # (missingheads is revs with secret changeset filtered out)
134 # (missingheads is revs with secret changeset filtered out)
135 #
135 #
136 # This can be expressed as:
136 # This can be expressed as:
137 # cheads = ( (missingheads and ::commonheads)
137 # cheads = ( (missingheads and ::commonheads)
138 # + (commonheads and ::missingheads))"
138 # + (commonheads and ::missingheads))"
139 # )
139 # )
140 #
140 #
141 # while trying to push we already computed the following:
141 # while trying to push we already computed the following:
142 # common = (::commonheads)
142 # common = (::commonheads)
143 # missing = ((commonheads::missingheads) - commonheads)
143 # missing = ((commonheads::missingheads) - commonheads)
144 #
144 #
145 # We can pick:
145 # We can pick:
146 # * missingheads part of common (::commonheads)
146 # * missingheads part of common (::commonheads)
147 common = set(self.outgoing.common)
147 common = set(self.outgoing.common)
148 nm = self.repo.changelog.nodemap
148 nm = self.repo.changelog.nodemap
149 cheads = [node for node in self.revs if nm[node] in common]
149 cheads = [node for node in self.revs if nm[node] in common]
150 # and
150 # and
151 # * commonheads parents on missing
151 # * commonheads parents on missing
152 revset = unfi.set('%ln and parents(roots(%ln))',
152 revset = unfi.set('%ln and parents(roots(%ln))',
153 self.outgoing.commonheads,
153 self.outgoing.commonheads,
154 self.outgoing.missing)
154 self.outgoing.missing)
155 cheads.extend(c.node() for c in revset)
155 cheads.extend(c.node() for c in revset)
156 return cheads
156 return cheads
157
157
158 @property
158 @property
159 def commonheads(self):
159 def commonheads(self):
160 """set of all common heads after changeset bundle push"""
160 """set of all common heads after changeset bundle push"""
161 if self.cgresult:
161 if self.cgresult:
162 return self.futureheads
162 return self.futureheads
163 else:
163 else:
164 return self.fallbackheads
164 return self.fallbackheads
165
165
166 # mapping of message used when pushing bookmark
166 # mapping of message used when pushing bookmark
167 bookmsgmap = {'update': (_("updating bookmark %s\n"),
167 bookmsgmap = {'update': (_("updating bookmark %s\n"),
168 _('updating bookmark %s failed!\n')),
168 _('updating bookmark %s failed!\n')),
169 'export': (_("exporting bookmark %s\n"),
169 'export': (_("exporting bookmark %s\n"),
170 _('exporting bookmark %s failed!\n')),
170 _('exporting bookmark %s failed!\n')),
171 'delete': (_("deleting remote bookmark %s\n"),
171 'delete': (_("deleting remote bookmark %s\n"),
172 _('deleting remote bookmark %s failed!\n')),
172 _('deleting remote bookmark %s failed!\n')),
173 }
173 }
174
174
175
175
176 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
176 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
177 '''Push outgoing changesets (limited by revs) from a local
177 '''Push outgoing changesets (limited by revs) from a local
178 repository to remote. Return an integer:
178 repository to remote. Return an integer:
179 - None means nothing to push
179 - None means nothing to push
180 - 0 means HTTP error
180 - 0 means HTTP error
181 - 1 means we pushed and remote head count is unchanged *or*
181 - 1 means we pushed and remote head count is unchanged *or*
182 we have outgoing changesets but refused to push
182 we have outgoing changesets but refused to push
183 - other values as described by addchangegroup()
183 - other values as described by addchangegroup()
184 '''
184 '''
185 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
185 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
186 if pushop.remote.local():
186 if pushop.remote.local():
187 missing = (set(pushop.repo.requirements)
187 missing = (set(pushop.repo.requirements)
188 - pushop.remote.local().supported)
188 - pushop.remote.local().supported)
189 if missing:
189 if missing:
190 msg = _("required features are not"
190 msg = _("required features are not"
191 " supported in the destination:"
191 " supported in the destination:"
192 " %s") % (', '.join(sorted(missing)))
192 " %s") % (', '.join(sorted(missing)))
193 raise util.Abort(msg)
193 raise util.Abort(msg)
194
194
195 # there are two ways to push to remote repo:
195 # there are two ways to push to remote repo:
196 #
196 #
197 # addchangegroup assumes local user can lock remote
197 # addchangegroup assumes local user can lock remote
198 # repo (local filesystem, old ssh servers).
198 # repo (local filesystem, old ssh servers).
199 #
199 #
200 # unbundle assumes local user cannot lock remote repo (new ssh
200 # unbundle assumes local user cannot lock remote repo (new ssh
201 # servers, http servers).
201 # servers, http servers).
202
202
203 if not pushop.remote.canpush():
203 if not pushop.remote.canpush():
204 raise util.Abort(_("destination does not support push"))
204 raise util.Abort(_("destination does not support push"))
205 # get local lock as we might write phase data
205 # get local lock as we might write phase data
206 localwlock = locallock = None
206 localwlock = locallock = None
207 try:
207 try:
208 # bundle2 push may receive a reply bundle touching bookmarks or other
208 # bundle2 push may receive a reply bundle touching bookmarks or other
209 # things requiring the wlock. Take it now to ensure proper ordering.
209 # things requiring the wlock. Take it now to ensure proper ordering.
210 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
210 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
211 if _canusebundle2(pushop) and maypushback:
211 if _canusebundle2(pushop) and maypushback:
212 localwlock = pushop.repo.wlock()
212 localwlock = pushop.repo.wlock()
213 locallock = pushop.repo.lock()
213 locallock = pushop.repo.lock()
214 pushop.locallocked = True
214 pushop.locallocked = True
215 except IOError, err:
215 except IOError, err:
216 pushop.locallocked = False
216 pushop.locallocked = False
217 if err.errno != errno.EACCES:
217 if err.errno != errno.EACCES:
218 raise
218 raise
219 # source repo cannot be locked.
219 # source repo cannot be locked.
220 # We do not abort the push, but just disable the local phase
220 # We do not abort the push, but just disable the local phase
221 # synchronisation.
221 # synchronisation.
222 msg = 'cannot lock source repository: %s\n' % err
222 msg = 'cannot lock source repository: %s\n' % err
223 pushop.ui.debug(msg)
223 pushop.ui.debug(msg)
224 try:
224 try:
225 if pushop.locallocked:
225 if pushop.locallocked:
226 pushop.trmanager = transactionmanager(repo,
226 pushop.trmanager = transactionmanager(repo,
227 'push-response',
227 'push-response',
228 pushop.remote.url())
228 pushop.remote.url())
229 pushop.repo.checkpush(pushop)
229 pushop.repo.checkpush(pushop)
230 lock = None
230 lock = None
231 unbundle = pushop.remote.capable('unbundle')
231 unbundle = pushop.remote.capable('unbundle')
232 if not unbundle:
232 if not unbundle:
233 lock = pushop.remote.lock()
233 lock = pushop.remote.lock()
234 try:
234 try:
235 _pushdiscovery(pushop)
235 _pushdiscovery(pushop)
236 if _canusebundle2(pushop):
236 if _canusebundle2(pushop):
237 _pushbundle2(pushop)
237 _pushbundle2(pushop)
238 _pushchangeset(pushop)
238 _pushchangeset(pushop)
239 _pushsyncphase(pushop)
239 _pushsyncphase(pushop)
240 _pushobsolete(pushop)
240 _pushobsolete(pushop)
241 _pushbookmark(pushop)
241 _pushbookmark(pushop)
242 finally:
242 finally:
243 if lock is not None:
243 if lock is not None:
244 lock.release()
244 lock.release()
245 if pushop.trmanager:
245 if pushop.trmanager:
246 pushop.trmanager.close()
246 pushop.trmanager.close()
247 finally:
247 finally:
248 if pushop.trmanager:
248 if pushop.trmanager:
249 pushop.trmanager.release()
249 pushop.trmanager.release()
250 if locallock is not None:
250 if locallock is not None:
251 locallock.release()
251 locallock.release()
252 if localwlock is not None:
252 if localwlock is not None:
253 localwlock.release()
253 localwlock.release()
254
254
255 return pushop
255 return pushop
256
256
257 # list of steps to perform discovery before push
257 # list of steps to perform discovery before push
258 pushdiscoveryorder = []
258 pushdiscoveryorder = []
259
259
260 # Mapping between step name and function
260 # Mapping between step name and function
261 #
261 #
262 # This exists to help extensions wrap steps if necessary
262 # This exists to help extensions wrap steps if necessary
263 pushdiscoverymapping = {}
263 pushdiscoverymapping = {}
264
264
265 def pushdiscovery(stepname):
265 def pushdiscovery(stepname):
266 """decorator for function performing discovery before push
266 """decorator for function performing discovery before push
267
267
268 The function is added to the step -> function mapping and appended to the
268 The function is added to the step -> function mapping and appended to the
269 list of steps. Beware that decorated function will be added in order (this
269 list of steps. Beware that decorated function will be added in order (this
270 may matter).
270 may matter).
271
271
272 You can only use this decorator for a new step, if you want to wrap a step
272 You can only use this decorator for a new step, if you want to wrap a step
273 from an extension, change the pushdiscovery dictionary directly."""
273 from an extension, change the pushdiscovery dictionary directly."""
274 def dec(func):
274 def dec(func):
275 assert stepname not in pushdiscoverymapping
275 assert stepname not in pushdiscoverymapping
276 pushdiscoverymapping[stepname] = func
276 pushdiscoverymapping[stepname] = func
277 pushdiscoveryorder.append(stepname)
277 pushdiscoveryorder.append(stepname)
278 return func
278 return func
279 return dec
279 return dec
280
280
281 def _pushdiscovery(pushop):
281 def _pushdiscovery(pushop):
282 """Run all discovery steps"""
282 """Run all discovery steps"""
283 for stepname in pushdiscoveryorder:
283 for stepname in pushdiscoveryorder:
284 step = pushdiscoverymapping[stepname]
284 step = pushdiscoverymapping[stepname]
285 step(pushop)
285 step(pushop)
286
286
287 @pushdiscovery('changeset')
287 @pushdiscovery('changeset')
288 def _pushdiscoverychangeset(pushop):
288 def _pushdiscoverychangeset(pushop):
289 """discover the changeset that need to be pushed"""
289 """discover the changeset that need to be pushed"""
290 fci = discovery.findcommonincoming
290 fci = discovery.findcommonincoming
291 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
291 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
292 common, inc, remoteheads = commoninc
292 common, inc, remoteheads = commoninc
293 fco = discovery.findcommonoutgoing
293 fco = discovery.findcommonoutgoing
294 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
294 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
295 commoninc=commoninc, force=pushop.force)
295 commoninc=commoninc, force=pushop.force)
296 pushop.outgoing = outgoing
296 pushop.outgoing = outgoing
297 pushop.remoteheads = remoteheads
297 pushop.remoteheads = remoteheads
298 pushop.incoming = inc
298 pushop.incoming = inc
299
299
300 @pushdiscovery('phase')
300 @pushdiscovery('phase')
301 def _pushdiscoveryphase(pushop):
301 def _pushdiscoveryphase(pushop):
302 """discover the phase that needs to be pushed
302 """discover the phase that needs to be pushed
303
303
304 (computed for both success and failure case for changesets push)"""
304 (computed for both success and failure case for changesets push)"""
305 outgoing = pushop.outgoing
305 outgoing = pushop.outgoing
306 unfi = pushop.repo.unfiltered()
306 unfi = pushop.repo.unfiltered()
307 remotephases = pushop.remote.listkeys('phases')
307 remotephases = pushop.remote.listkeys('phases')
308 publishing = remotephases.get('publishing', False)
308 publishing = remotephases.get('publishing', False)
309 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
309 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
310 and remotephases # server supports phases
310 and remotephases # server supports phases
311 and not pushop.outgoing.missing # no changesets to be pushed
311 and not pushop.outgoing.missing # no changesets to be pushed
312 and publishing):
312 and publishing):
313 # When:
313 # When:
314 # - this is a subrepo push
314 # - this is a subrepo push
315 # - and remote support phase
315 # - and remote support phase
316 # - and no changeset are to be pushed
316 # - and no changeset are to be pushed
317 # - and remote is publishing
317 # - and remote is publishing
318 # We may be in issue 3871 case!
318 # We may be in issue 3871 case!
319 # We drop the possible phase synchronisation done by
319 # We drop the possible phase synchronisation done by
320 # courtesy to publish changesets possibly locally draft
320 # courtesy to publish changesets possibly locally draft
321 # on the remote.
321 # on the remote.
322 remotephases = {'publishing': 'True'}
322 remotephases = {'publishing': 'True'}
323 ana = phases.analyzeremotephases(pushop.repo,
323 ana = phases.analyzeremotephases(pushop.repo,
324 pushop.fallbackheads,
324 pushop.fallbackheads,
325 remotephases)
325 remotephases)
326 pheads, droots = ana
326 pheads, droots = ana
327 extracond = ''
327 extracond = ''
328 if not publishing:
328 if not publishing:
329 extracond = ' and public()'
329 extracond = ' and public()'
330 revset = 'heads((%%ln::%%ln) %s)' % extracond
330 revset = 'heads((%%ln::%%ln) %s)' % extracond
331 # Get the list of all revs draft on remote by public here.
331 # Get the list of all revs draft on remote by public here.
332 # XXX Beware that revset break if droots is not strictly
332 # XXX Beware that revset break if droots is not strictly
333 # XXX root we may want to ensure it is but it is costly
333 # XXX root we may want to ensure it is but it is costly
334 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
334 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
335 if not outgoing.missing:
335 if not outgoing.missing:
336 future = fallback
336 future = fallback
337 else:
337 else:
338 # adds changeset we are going to push as draft
338 # adds changeset we are going to push as draft
339 #
339 #
340 # should not be necessary for publishing server, but because of an
340 # should not be necessary for publishing server, but because of an
341 # issue fixed in xxxxx we have to do it anyway.
341 # issue fixed in xxxxx we have to do it anyway.
342 fdroots = list(unfi.set('roots(%ln + %ln::)',
342 fdroots = list(unfi.set('roots(%ln + %ln::)',
343 outgoing.missing, droots))
343 outgoing.missing, droots))
344 fdroots = [f.node() for f in fdroots]
344 fdroots = [f.node() for f in fdroots]
345 future = list(unfi.set(revset, fdroots, pushop.futureheads))
345 future = list(unfi.set(revset, fdroots, pushop.futureheads))
346 pushop.outdatedphases = future
346 pushop.outdatedphases = future
347 pushop.fallbackoutdatedphases = fallback
347 pushop.fallbackoutdatedphases = fallback
348
348
349 @pushdiscovery('obsmarker')
349 @pushdiscovery('obsmarker')
350 def _pushdiscoveryobsmarkers(pushop):
350 def _pushdiscoveryobsmarkers(pushop):
351 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
351 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
352 and pushop.repo.obsstore
352 and pushop.repo.obsstore
353 and 'obsolete' in pushop.remote.listkeys('namespaces')):
353 and 'obsolete' in pushop.remote.listkeys('namespaces')):
354 repo = pushop.repo
354 repo = pushop.repo
355 # very naive computation, that can be quite expensive on big repo.
355 # very naive computation, that can be quite expensive on big repo.
356 # However: evolution is currently slow on them anyway.
356 # However: evolution is currently slow on them anyway.
357 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
357 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
358 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
358 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
359
359
360 @pushdiscovery('bookmarks')
360 @pushdiscovery('bookmarks')
361 def _pushdiscoverybookmarks(pushop):
361 def _pushdiscoverybookmarks(pushop):
362 ui = pushop.ui
362 ui = pushop.ui
363 repo = pushop.repo.unfiltered()
363 repo = pushop.repo.unfiltered()
364 remote = pushop.remote
364 remote = pushop.remote
365 ui.debug("checking for updated bookmarks\n")
365 ui.debug("checking for updated bookmarks\n")
366 ancestors = ()
366 ancestors = ()
367 if pushop.revs:
367 if pushop.revs:
368 revnums = map(repo.changelog.rev, pushop.revs)
368 revnums = map(repo.changelog.rev, pushop.revs)
369 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
369 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
370 remotebookmark = remote.listkeys('bookmarks')
370 remotebookmark = remote.listkeys('bookmarks')
371
371
372 explicit = set(pushop.bookmarks)
372 explicit = set(pushop.bookmarks)
373
373
374 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
374 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
375 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
375 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
376 for b, scid, dcid in advsrc:
376 for b, scid, dcid in advsrc:
377 if b in explicit:
377 if b in explicit:
378 explicit.remove(b)
378 explicit.remove(b)
379 if not ancestors or repo[scid].rev() in ancestors:
379 if not ancestors or repo[scid].rev() in ancestors:
380 pushop.outbookmarks.append((b, dcid, scid))
380 pushop.outbookmarks.append((b, dcid, scid))
381 # search added bookmark
381 # search added bookmark
382 for b, scid, dcid in addsrc:
382 for b, scid, dcid in addsrc:
383 if b in explicit:
383 if b in explicit:
384 explicit.remove(b)
384 explicit.remove(b)
385 pushop.outbookmarks.append((b, '', scid))
385 pushop.outbookmarks.append((b, '', scid))
386 # search for overwritten bookmark
386 # search for overwritten bookmark
387 for b, scid, dcid in advdst + diverge + differ:
387 for b, scid, dcid in advdst + diverge + differ:
388 if b in explicit:
388 if b in explicit:
389 explicit.remove(b)
389 explicit.remove(b)
390 pushop.outbookmarks.append((b, dcid, scid))
390 pushop.outbookmarks.append((b, dcid, scid))
391 # search for bookmark to delete
391 # search for bookmark to delete
392 for b, scid, dcid in adddst:
392 for b, scid, dcid in adddst:
393 if b in explicit:
393 if b in explicit:
394 explicit.remove(b)
394 explicit.remove(b)
395 # treat as "deleted locally"
395 # treat as "deleted locally"
396 pushop.outbookmarks.append((b, dcid, ''))
396 pushop.outbookmarks.append((b, dcid, ''))
397 # identical bookmarks shouldn't get reported
397 # identical bookmarks shouldn't get reported
398 for b, scid, dcid in same:
398 for b, scid, dcid in same:
399 if b in explicit:
399 if b in explicit:
400 explicit.remove(b)
400 explicit.remove(b)
401
401
402 if explicit:
402 if explicit:
403 explicit = sorted(explicit)
403 explicit = sorted(explicit)
404 # we should probably list all of them
404 # we should probably list all of them
405 ui.warn(_('bookmark %s does not exist on the local '
405 ui.warn(_('bookmark %s does not exist on the local '
406 'or remote repository!\n') % explicit[0])
406 'or remote repository!\n') % explicit[0])
407 pushop.bkresult = 2
407 pushop.bkresult = 2
408
408
409 pushop.outbookmarks.sort()
409 pushop.outbookmarks.sort()
410
410
411 def _pushcheckoutgoing(pushop):
411 def _pushcheckoutgoing(pushop):
412 outgoing = pushop.outgoing
412 outgoing = pushop.outgoing
413 unfi = pushop.repo.unfiltered()
413 unfi = pushop.repo.unfiltered()
414 if not outgoing.missing:
414 if not outgoing.missing:
415 # nothing to push
415 # nothing to push
416 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
416 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
417 return False
417 return False
418 # something to push
418 # something to push
419 if not pushop.force:
419 if not pushop.force:
420 # if repo.obsstore == False --> no obsolete
420 # if repo.obsstore == False --> no obsolete
421 # then, save the iteration
421 # then, save the iteration
422 if unfi.obsstore:
422 if unfi.obsstore:
423 # this message are here for 80 char limit reason
423 # this message are here for 80 char limit reason
424 mso = _("push includes obsolete changeset: %s!")
424 mso = _("push includes obsolete changeset: %s!")
425 mst = {"unstable": _("push includes unstable changeset: %s!"),
425 mst = {"unstable": _("push includes unstable changeset: %s!"),
426 "bumped": _("push includes bumped changeset: %s!"),
426 "bumped": _("push includes bumped changeset: %s!"),
427 "divergent": _("push includes divergent changeset: %s!")}
427 "divergent": _("push includes divergent changeset: %s!")}
428 # If we are to push if there is at least one
428 # If we are to push if there is at least one
429 # obsolete or unstable changeset in missing, at
429 # obsolete or unstable changeset in missing, at
430 # least one of the missinghead will be obsolete or
430 # least one of the missinghead will be obsolete or
431 # unstable. So checking heads only is ok
431 # unstable. So checking heads only is ok
432 for node in outgoing.missingheads:
432 for node in outgoing.missingheads:
433 ctx = unfi[node]
433 ctx = unfi[node]
434 if ctx.obsolete():
434 if ctx.obsolete():
435 raise util.Abort(mso % ctx)
435 raise util.Abort(mso % ctx)
436 elif ctx.troubled():
436 elif ctx.troubled():
437 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
437 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
438 newbm = pushop.ui.configlist('bookmarks', 'pushing')
438 newbm = pushop.ui.configlist('bookmarks', 'pushing')
439 discovery.checkheads(unfi, pushop.remote, outgoing,
439 discovery.checkheads(unfi, pushop.remote, outgoing,
440 pushop.remoteheads,
440 pushop.remoteheads,
441 pushop.newbranch,
441 pushop.newbranch,
442 bool(pushop.incoming),
442 bool(pushop.incoming),
443 newbm)
443 newbm)
444 return True
444 return True
445
445
446 # List of names of steps to perform for an outgoing bundle2, order matters.
446 # List of names of steps to perform for an outgoing bundle2, order matters.
447 b2partsgenorder = []
447 b2partsgenorder = []
448
448
449 # Mapping between step name and function
449 # Mapping between step name and function
450 #
450 #
451 # This exists to help extensions wrap steps if necessary
451 # This exists to help extensions wrap steps if necessary
452 b2partsgenmapping = {}
452 b2partsgenmapping = {}
453
453
454 def b2partsgenerator(stepname, idx=None):
454 def b2partsgenerator(stepname, idx=None):
455 """decorator for function generating bundle2 part
455 """decorator for function generating bundle2 part
456
456
457 The function is added to the step -> function mapping and appended to the
457 The function is added to the step -> function mapping and appended to the
458 list of steps. Beware that decorated functions will be added in order
458 list of steps. Beware that decorated functions will be added in order
459 (this may matter).
459 (this may matter).
460
460
461 You can only use this decorator for new steps, if you want to wrap a step
461 You can only use this decorator for new steps, if you want to wrap a step
462 from an extension, attack the b2partsgenmapping dictionary directly."""
462 from an extension, attack the b2partsgenmapping dictionary directly."""
463 def dec(func):
463 def dec(func):
464 assert stepname not in b2partsgenmapping
464 assert stepname not in b2partsgenmapping
465 b2partsgenmapping[stepname] = func
465 b2partsgenmapping[stepname] = func
466 if idx is None:
466 if idx is None:
467 b2partsgenorder.append(stepname)
467 b2partsgenorder.append(stepname)
468 else:
468 else:
469 b2partsgenorder.insert(idx, stepname)
469 b2partsgenorder.insert(idx, stepname)
470 return func
470 return func
471 return dec
471 return dec
472
472
473 @b2partsgenerator('changeset')
473 @b2partsgenerator('changeset')
474 def _pushb2ctx(pushop, bundler):
474 def _pushb2ctx(pushop, bundler):
475 """handle changegroup push through bundle2
475 """handle changegroup push through bundle2
476
476
477 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
477 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
478 """
478 """
479 if 'changesets' in pushop.stepsdone:
479 if 'changesets' in pushop.stepsdone:
480 return
480 return
481 pushop.stepsdone.add('changesets')
481 pushop.stepsdone.add('changesets')
482 # Send known heads to the server for race detection.
482 # Send known heads to the server for race detection.
483 if not _pushcheckoutgoing(pushop):
483 if not _pushcheckoutgoing(pushop):
484 return
484 return
485 pushop.repo.prepushoutgoinghooks(pushop.repo,
485 pushop.repo.prepushoutgoinghooks(pushop.repo,
486 pushop.remote,
486 pushop.remote,
487 pushop.outgoing)
487 pushop.outgoing)
488 if not pushop.force:
488 if not pushop.force:
489 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
489 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
490 b2caps = bundle2.bundle2caps(pushop.remote)
490 b2caps = bundle2.bundle2caps(pushop.remote)
491 version = None
491 version = None
492 cgversions = b2caps.get('changegroup')
492 cgversions = b2caps.get('changegroup')
493 if not cgversions: # 3.1 and 3.2 ship with an empty value
493 if not cgversions: # 3.1 and 3.2 ship with an empty value
494 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
494 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
495 pushop.outgoing)
495 pushop.outgoing)
496 else:
496 else:
497 cgversions = [v for v in cgversions if v in changegroup.packermap]
497 cgversions = [v for v in cgversions if v in changegroup.packermap]
498 if not cgversions:
498 if not cgversions:
499 raise ValueError(_('no common changegroup version'))
499 raise ValueError(_('no common changegroup version'))
500 version = max(cgversions)
500 version = max(cgversions)
501 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
501 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
502 pushop.outgoing,
502 pushop.outgoing,
503 version=version)
503 version=version)
504 cgpart = bundler.newpart('changegroup', data=cg)
504 cgpart = bundler.newpart('changegroup', data=cg)
505 if version is not None:
505 if version is not None:
506 cgpart.addparam('version', version)
506 cgpart.addparam('version', version)
507 def handlereply(op):
507 def handlereply(op):
508 """extract addchangegroup returns from server reply"""
508 """extract addchangegroup returns from server reply"""
509 cgreplies = op.records.getreplies(cgpart.id)
509 cgreplies = op.records.getreplies(cgpart.id)
510 assert len(cgreplies['changegroup']) == 1
510 assert len(cgreplies['changegroup']) == 1
511 pushop.cgresult = cgreplies['changegroup'][0]['return']
511 pushop.cgresult = cgreplies['changegroup'][0]['return']
512 return handlereply
512 return handlereply
513
513
514 @b2partsgenerator('phase')
514 @b2partsgenerator('phase')
515 def _pushb2phases(pushop, bundler):
515 def _pushb2phases(pushop, bundler):
516 """handle phase push through bundle2"""
516 """handle phase push through bundle2"""
517 if 'phases' in pushop.stepsdone:
517 if 'phases' in pushop.stepsdone:
518 return
518 return
519 b2caps = bundle2.bundle2caps(pushop.remote)
519 b2caps = bundle2.bundle2caps(pushop.remote)
520 if not 'pushkey' in b2caps:
520 if not 'pushkey' in b2caps:
521 return
521 return
522 pushop.stepsdone.add('phases')
522 pushop.stepsdone.add('phases')
523 part2node = []
523 part2node = []
524 enc = pushkey.encode
524 enc = pushkey.encode
525 for newremotehead in pushop.outdatedphases:
525 for newremotehead in pushop.outdatedphases:
526 part = bundler.newpart('pushkey')
526 part = bundler.newpart('pushkey')
527 part.addparam('namespace', enc('phases'))
527 part.addparam('namespace', enc('phases'))
528 part.addparam('key', enc(newremotehead.hex()))
528 part.addparam('key', enc(newremotehead.hex()))
529 part.addparam('old', enc(str(phases.draft)))
529 part.addparam('old', enc(str(phases.draft)))
530 part.addparam('new', enc(str(phases.public)))
530 part.addparam('new', enc(str(phases.public)))
531 part2node.append((part.id, newremotehead))
531 part2node.append((part.id, newremotehead))
532 def handlereply(op):
532 def handlereply(op):
533 for partid, node in part2node:
533 for partid, node in part2node:
534 partrep = op.records.getreplies(partid)
534 partrep = op.records.getreplies(partid)
535 results = partrep['pushkey']
535 results = partrep['pushkey']
536 assert len(results) <= 1
536 assert len(results) <= 1
537 msg = None
537 msg = None
538 if not results:
538 if not results:
539 msg = _('server ignored update of %s to public!\n') % node
539 msg = _('server ignored update of %s to public!\n') % node
540 elif not int(results[0]['return']):
540 elif not int(results[0]['return']):
541 msg = _('updating %s to public failed!\n') % node
541 msg = _('updating %s to public failed!\n') % node
542 if msg is not None:
542 if msg is not None:
543 pushop.ui.warn(msg)
543 pushop.ui.warn(msg)
544 return handlereply
544 return handlereply
545
545
546 @b2partsgenerator('obsmarkers')
546 @b2partsgenerator('obsmarkers')
547 def _pushb2obsmarkers(pushop, bundler):
547 def _pushb2obsmarkers(pushop, bundler):
548 if 'obsmarkers' in pushop.stepsdone:
548 if 'obsmarkers' in pushop.stepsdone:
549 return
549 return
550 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
550 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
551 if obsolete.commonversion(remoteversions) is None:
551 if obsolete.commonversion(remoteversions) is None:
552 return
552 return
553 pushop.stepsdone.add('obsmarkers')
553 pushop.stepsdone.add('obsmarkers')
554 if pushop.outobsmarkers:
554 if pushop.outobsmarkers:
555 markers = sorted(pushop.outobsmarkers)
555 markers = sorted(pushop.outobsmarkers)
556 buildobsmarkerspart(bundler, markers)
556 buildobsmarkerspart(bundler, markers)
557
557
558 @b2partsgenerator('bookmarks')
558 @b2partsgenerator('bookmarks')
559 def _pushb2bookmarks(pushop, bundler):
559 def _pushb2bookmarks(pushop, bundler):
560 """handle phase push through bundle2"""
560 """handle phase push through bundle2"""
561 if 'bookmarks' in pushop.stepsdone:
561 if 'bookmarks' in pushop.stepsdone:
562 return
562 return
563 b2caps = bundle2.bundle2caps(pushop.remote)
563 b2caps = bundle2.bundle2caps(pushop.remote)
564 if 'pushkey' not in b2caps:
564 if 'pushkey' not in b2caps:
565 return
565 return
566 pushop.stepsdone.add('bookmarks')
566 pushop.stepsdone.add('bookmarks')
567 part2book = []
567 part2book = []
568 enc = pushkey.encode
568 enc = pushkey.encode
569 for book, old, new in pushop.outbookmarks:
569 for book, old, new in pushop.outbookmarks:
570 part = bundler.newpart('pushkey')
570 part = bundler.newpart('pushkey')
571 part.addparam('namespace', enc('bookmarks'))
571 part.addparam('namespace', enc('bookmarks'))
572 part.addparam('key', enc(book))
572 part.addparam('key', enc(book))
573 part.addparam('old', enc(old))
573 part.addparam('old', enc(old))
574 part.addparam('new', enc(new))
574 part.addparam('new', enc(new))
575 action = 'update'
575 action = 'update'
576 if not old:
576 if not old:
577 action = 'export'
577 action = 'export'
578 elif not new:
578 elif not new:
579 action = 'delete'
579 action = 'delete'
580 part2book.append((part.id, book, action))
580 part2book.append((part.id, book, action))
581
581
582
582
583 def handlereply(op):
583 def handlereply(op):
584 ui = pushop.ui
584 ui = pushop.ui
585 for partid, book, action in part2book:
585 for partid, book, action in part2book:
586 partrep = op.records.getreplies(partid)
586 partrep = op.records.getreplies(partid)
587 results = partrep['pushkey']
587 results = partrep['pushkey']
588 assert len(results) <= 1
588 assert len(results) <= 1
589 if not results:
589 if not results:
590 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
590 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
591 else:
591 else:
592 ret = int(results[0]['return'])
592 ret = int(results[0]['return'])
593 if ret:
593 if ret:
594 ui.status(bookmsgmap[action][0] % book)
594 ui.status(bookmsgmap[action][0] % book)
595 else:
595 else:
596 ui.warn(bookmsgmap[action][1] % book)
596 ui.warn(bookmsgmap[action][1] % book)
597 if pushop.bkresult is not None:
597 if pushop.bkresult is not None:
598 pushop.bkresult = 1
598 pushop.bkresult = 1
599 return handlereply
599 return handlereply
600
600
601
601
602 def _pushbundle2(pushop):
602 def _pushbundle2(pushop):
603 """push data to the remote using bundle2
603 """push data to the remote using bundle2
604
604
605 The only currently supported type of data is changegroup but this will
605 The only currently supported type of data is changegroup but this will
606 evolve in the future."""
606 evolve in the future."""
607 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
607 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
608 pushback = (pushop.trmanager
608 pushback = (pushop.trmanager
609 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
609 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
610
610
611 # create reply capability
611 # create reply capability
612 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
612 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
613 allowpushback=pushback))
613 allowpushback=pushback))
614 bundler.newpart('replycaps', data=capsblob)
614 bundler.newpart('replycaps', data=capsblob)
615 replyhandlers = []
615 replyhandlers = []
616 for partgenname in b2partsgenorder:
616 for partgenname in b2partsgenorder:
617 partgen = b2partsgenmapping[partgenname]
617 partgen = b2partsgenmapping[partgenname]
618 ret = partgen(pushop, bundler)
618 ret = partgen(pushop, bundler)
619 if callable(ret):
619 if callable(ret):
620 replyhandlers.append(ret)
620 replyhandlers.append(ret)
621 # do not push if nothing to push
621 # do not push if nothing to push
622 if bundler.nbparts <= 1:
622 if bundler.nbparts <= 1:
623 return
623 return
624 stream = util.chunkbuffer(bundler.getchunks())
624 stream = util.chunkbuffer(bundler.getchunks())
625 try:
625 try:
626 reply = pushop.remote.unbundle(stream, ['force'], 'push')
626 reply = pushop.remote.unbundle(stream, ['force'], 'push')
627 except error.BundleValueError, exc:
627 except error.BundleValueError, exc:
628 raise util.Abort('missing support for %s' % exc)
628 raise util.Abort('missing support for %s' % exc)
629 try:
629 try:
630 trgetter = None
630 trgetter = None
631 if pushback:
631 if pushback:
632 trgetter = pushop.trmanager.transaction
632 trgetter = pushop.trmanager.transaction
633 op = bundle2.processbundle(pushop.repo, reply, trgetter)
633 op = bundle2.processbundle(pushop.repo, reply, trgetter)
634 except error.BundleValueError, exc:
634 except error.BundleValueError, exc:
635 raise util.Abort('missing support for %s' % exc)
635 raise util.Abort('missing support for %s' % exc)
636 for rephand in replyhandlers:
636 for rephand in replyhandlers:
637 rephand(op)
637 rephand(op)
638
638
639 def _pushchangeset(pushop):
639 def _pushchangeset(pushop):
640 """Make the actual push of changeset bundle to remote repo"""
640 """Make the actual push of changeset bundle to remote repo"""
641 if 'changesets' in pushop.stepsdone:
641 if 'changesets' in pushop.stepsdone:
642 return
642 return
643 pushop.stepsdone.add('changesets')
643 pushop.stepsdone.add('changesets')
644 if not _pushcheckoutgoing(pushop):
644 if not _pushcheckoutgoing(pushop):
645 return
645 return
646 pushop.repo.prepushoutgoinghooks(pushop.repo,
646 pushop.repo.prepushoutgoinghooks(pushop.repo,
647 pushop.remote,
647 pushop.remote,
648 pushop.outgoing)
648 pushop.outgoing)
649 outgoing = pushop.outgoing
649 outgoing = pushop.outgoing
650 unbundle = pushop.remote.capable('unbundle')
650 unbundle = pushop.remote.capable('unbundle')
651 # TODO: get bundlecaps from remote
651 # TODO: get bundlecaps from remote
652 bundlecaps = None
652 bundlecaps = None
653 # create a changegroup from local
653 # create a changegroup from local
654 if pushop.revs is None and not (outgoing.excluded
654 if pushop.revs is None and not (outgoing.excluded
655 or pushop.repo.changelog.filteredrevs):
655 or pushop.repo.changelog.filteredrevs):
656 # push everything,
656 # push everything,
657 # use the fast path, no race possible on push
657 # use the fast path, no race possible on push
658 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
658 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
659 cg = changegroup.getsubset(pushop.repo,
659 cg = changegroup.getsubset(pushop.repo,
660 outgoing,
660 outgoing,
661 bundler,
661 bundler,
662 'push',
662 'push',
663 fastpath=True)
663 fastpath=True)
664 else:
664 else:
665 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
665 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
666 bundlecaps)
666 bundlecaps)
667
667
668 # apply changegroup to remote
668 # apply changegroup to remote
669 if unbundle:
669 if unbundle:
670 # local repo finds heads on server, finds out what
670 # local repo finds heads on server, finds out what
671 # revs it must push. once revs transferred, if server
671 # revs it must push. once revs transferred, if server
672 # finds it has different heads (someone else won
672 # finds it has different heads (someone else won
673 # commit/push race), server aborts.
673 # commit/push race), server aborts.
674 if pushop.force:
674 if pushop.force:
675 remoteheads = ['force']
675 remoteheads = ['force']
676 else:
676 else:
677 remoteheads = pushop.remoteheads
677 remoteheads = pushop.remoteheads
678 # ssh: return remote's addchangegroup()
678 # ssh: return remote's addchangegroup()
679 # http: return remote's addchangegroup() or 0 for error
679 # http: return remote's addchangegroup() or 0 for error
680 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
680 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
681 pushop.repo.url())
681 pushop.repo.url())
682 else:
682 else:
683 # we return an integer indicating remote head count
683 # we return an integer indicating remote head count
684 # change
684 # change
685 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
685 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
686 pushop.repo.url())
686 pushop.repo.url())
687
687
688 def _pushsyncphase(pushop):
688 def _pushsyncphase(pushop):
689 """synchronise phase information locally and remotely"""
689 """synchronise phase information locally and remotely"""
690 cheads = pushop.commonheads
690 cheads = pushop.commonheads
691 # even when we don't push, exchanging phase data is useful
691 # even when we don't push, exchanging phase data is useful
692 remotephases = pushop.remote.listkeys('phases')
692 remotephases = pushop.remote.listkeys('phases')
693 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
693 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
694 and remotephases # server supports phases
694 and remotephases # server supports phases
695 and pushop.cgresult is None # nothing was pushed
695 and pushop.cgresult is None # nothing was pushed
696 and remotephases.get('publishing', False)):
696 and remotephases.get('publishing', False)):
697 # When:
697 # When:
698 # - this is a subrepo push
698 # - this is a subrepo push
699 # - and remote support phase
699 # - and remote support phase
700 # - and no changeset was pushed
700 # - and no changeset was pushed
701 # - and remote is publishing
701 # - and remote is publishing
702 # We may be in issue 3871 case!
702 # We may be in issue 3871 case!
703 # We drop the possible phase synchronisation done by
703 # We drop the possible phase synchronisation done by
704 # courtesy to publish changesets possibly locally draft
704 # courtesy to publish changesets possibly locally draft
705 # on the remote.
705 # on the remote.
706 remotephases = {'publishing': 'True'}
706 remotephases = {'publishing': 'True'}
707 if not remotephases: # old server or public only reply from non-publishing
707 if not remotephases: # old server or public only reply from non-publishing
708 _localphasemove(pushop, cheads)
708 _localphasemove(pushop, cheads)
709 # don't push any phase data as there is nothing to push
709 # don't push any phase data as there is nothing to push
710 else:
710 else:
711 ana = phases.analyzeremotephases(pushop.repo, cheads,
711 ana = phases.analyzeremotephases(pushop.repo, cheads,
712 remotephases)
712 remotephases)
713 pheads, droots = ana
713 pheads, droots = ana
714 ### Apply remote phase on local
714 ### Apply remote phase on local
715 if remotephases.get('publishing', False):
715 if remotephases.get('publishing', False):
716 _localphasemove(pushop, cheads)
716 _localphasemove(pushop, cheads)
717 else: # publish = False
717 else: # publish = False
718 _localphasemove(pushop, pheads)
718 _localphasemove(pushop, pheads)
719 _localphasemove(pushop, cheads, phases.draft)
719 _localphasemove(pushop, cheads, phases.draft)
720 ### Apply local phase on remote
720 ### Apply local phase on remote
721
721
722 if pushop.cgresult:
722 if pushop.cgresult:
723 if 'phases' in pushop.stepsdone:
723 if 'phases' in pushop.stepsdone:
724 # phases already pushed though bundle2
724 # phases already pushed though bundle2
725 return
725 return
726 outdated = pushop.outdatedphases
726 outdated = pushop.outdatedphases
727 else:
727 else:
728 outdated = pushop.fallbackoutdatedphases
728 outdated = pushop.fallbackoutdatedphases
729
729
730 pushop.stepsdone.add('phases')
730 pushop.stepsdone.add('phases')
731
731
732 # filter heads already turned public by the push
732 # filter heads already turned public by the push
733 outdated = [c for c in outdated if c.node() not in pheads]
733 outdated = [c for c in outdated if c.node() not in pheads]
734 # fallback to independent pushkey command
734 # fallback to independent pushkey command
735 for newremotehead in outdated:
735 for newremotehead in outdated:
736 r = pushop.remote.pushkey('phases',
736 r = pushop.remote.pushkey('phases',
737 newremotehead.hex(),
737 newremotehead.hex(),
738 str(phases.draft),
738 str(phases.draft),
739 str(phases.public))
739 str(phases.public))
740 if not r:
740 if not r:
741 pushop.ui.warn(_('updating %s to public failed!\n')
741 pushop.ui.warn(_('updating %s to public failed!\n')
742 % newremotehead)
742 % newremotehead)
743
743
744 def _localphasemove(pushop, nodes, phase=phases.public):
744 def _localphasemove(pushop, nodes, phase=phases.public):
745 """move <nodes> to <phase> in the local source repo"""
745 """move <nodes> to <phase> in the local source repo"""
746 if pushop.trmanager:
746 if pushop.trmanager:
747 phases.advanceboundary(pushop.repo,
747 phases.advanceboundary(pushop.repo,
748 pushop.trmanager.transaction(),
748 pushop.trmanager.transaction(),
749 phase,
749 phase,
750 nodes)
750 nodes)
751 else:
751 else:
752 # repo is not locked, do not change any phases!
752 # repo is not locked, do not change any phases!
753 # Informs the user that phases should have been moved when
753 # Informs the user that phases should have been moved when
754 # applicable.
754 # applicable.
755 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
755 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
756 phasestr = phases.phasenames[phase]
756 phasestr = phases.phasenames[phase]
757 if actualmoves:
757 if actualmoves:
758 pushop.ui.status(_('cannot lock source repo, skipping '
758 pushop.ui.status(_('cannot lock source repo, skipping '
759 'local %s phase update\n') % phasestr)
759 'local %s phase update\n') % phasestr)
760
760
761 def _pushobsolete(pushop):
761 def _pushobsolete(pushop):
762 """utility function to push obsolete markers to a remote"""
762 """utility function to push obsolete markers to a remote"""
763 if 'obsmarkers' in pushop.stepsdone:
763 if 'obsmarkers' in pushop.stepsdone:
764 return
764 return
765 pushop.ui.debug('try to push obsolete markers to remote\n')
765 pushop.ui.debug('try to push obsolete markers to remote\n')
766 repo = pushop.repo
766 repo = pushop.repo
767 remote = pushop.remote
767 remote = pushop.remote
768 pushop.stepsdone.add('obsmarkers')
768 pushop.stepsdone.add('obsmarkers')
769 if pushop.outobsmarkers:
769 if pushop.outobsmarkers:
770 rslts = []
770 rslts = []
771 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
771 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
772 for key in sorted(remotedata, reverse=True):
772 for key in sorted(remotedata, reverse=True):
773 # reverse sort to ensure we end with dump0
773 # reverse sort to ensure we end with dump0
774 data = remotedata[key]
774 data = remotedata[key]
775 rslts.append(remote.pushkey('obsolete', key, '', data))
775 rslts.append(remote.pushkey('obsolete', key, '', data))
776 if [r for r in rslts if not r]:
776 if [r for r in rslts if not r]:
777 msg = _('failed to push some obsolete markers!\n')
777 msg = _('failed to push some obsolete markers!\n')
778 repo.ui.warn(msg)
778 repo.ui.warn(msg)
779
779
780 def _pushbookmark(pushop):
780 def _pushbookmark(pushop):
781 """Update bookmark position on remote"""
781 """Update bookmark position on remote"""
782 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
782 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
783 return
783 return
784 pushop.stepsdone.add('bookmarks')
784 pushop.stepsdone.add('bookmarks')
785 ui = pushop.ui
785 ui = pushop.ui
786 remote = pushop.remote
786 remote = pushop.remote
787
787
788 for b, old, new in pushop.outbookmarks:
788 for b, old, new in pushop.outbookmarks:
789 action = 'update'
789 action = 'update'
790 if not old:
790 if not old:
791 action = 'export'
791 action = 'export'
792 elif not new:
792 elif not new:
793 action = 'delete'
793 action = 'delete'
794 if remote.pushkey('bookmarks', b, old, new):
794 if remote.pushkey('bookmarks', b, old, new):
795 ui.status(bookmsgmap[action][0] % b)
795 ui.status(bookmsgmap[action][0] % b)
796 else:
796 else:
797 ui.warn(bookmsgmap[action][1] % b)
797 ui.warn(bookmsgmap[action][1] % b)
798 # discovery can have set the value form invalid entry
798 # discovery can have set the value form invalid entry
799 if pushop.bkresult is not None:
799 if pushop.bkresult is not None:
800 pushop.bkresult = 1
800 pushop.bkresult = 1
801
801
802 class pulloperation(object):
802 class pulloperation(object):
803 """A object that represent a single pull operation
803 """A object that represent a single pull operation
804
804
805 It purpose is to carry pull related state and very common operation.
805 It purpose is to carry pull related state and very common operation.
806
806
807 A new should be created at the beginning of each pull and discarded
807 A new should be created at the beginning of each pull and discarded
808 afterward.
808 afterward.
809 """
809 """
810
810
811 def __init__(self, repo, remote, heads=None, force=False, bookmarks=()):
811 def __init__(self, repo, remote, heads=None, force=False, bookmarks=()):
812 # repo we pull into
812 # repo we pull into
813 self.repo = repo
813 self.repo = repo
814 # repo we pull from
814 # repo we pull from
815 self.remote = remote
815 self.remote = remote
816 # revision we try to pull (None is "all")
816 # revision we try to pull (None is "all")
817 self.heads = heads
817 self.heads = heads
818 # bookmark pulled explicitly
818 # bookmark pulled explicitly
819 self.explicitbookmarks = bookmarks
819 self.explicitbookmarks = bookmarks
820 # do we force pull?
820 # do we force pull?
821 self.force = force
821 self.force = force
822 # transaction manager
822 # transaction manager
823 self.trmanager = None
823 self.trmanager = None
824 # set of common changeset between local and remote before pull
824 # set of common changeset between local and remote before pull
825 self.common = None
825 self.common = None
826 # set of pulled head
826 # set of pulled head
827 self.rheads = None
827 self.rheads = None
828 # list of missing changeset to fetch remotely
828 # list of missing changeset to fetch remotely
829 self.fetch = None
829 self.fetch = None
830 # remote bookmarks data
830 # remote bookmarks data
831 self.remotebookmarks = None
831 self.remotebookmarks = None
832 # result of changegroup pulling (used as return code by pull)
832 # result of changegroup pulling (used as return code by pull)
833 self.cgresult = None
833 self.cgresult = None
834 # list of step already done
834 # list of step already done
835 self.stepsdone = set()
835 self.stepsdone = set()
836
836
837 @util.propertycache
837 @util.propertycache
838 def pulledsubset(self):
838 def pulledsubset(self):
839 """heads of the set of changeset target by the pull"""
839 """heads of the set of changeset target by the pull"""
840 # compute target subset
840 # compute target subset
841 if self.heads is None:
841 if self.heads is None:
842 # We pulled every thing possible
842 # We pulled every thing possible
843 # sync on everything common
843 # sync on everything common
844 c = set(self.common)
844 c = set(self.common)
845 ret = list(self.common)
845 ret = list(self.common)
846 for n in self.rheads:
846 for n in self.rheads:
847 if n not in c:
847 if n not in c:
848 ret.append(n)
848 ret.append(n)
849 return ret
849 return ret
850 else:
850 else:
851 # We pulled a specific subset
851 # We pulled a specific subset
852 # sync on this subset
852 # sync on this subset
853 return self.heads
853 return self.heads
854
854
855 def gettransaction(self):
855 def gettransaction(self):
856 # deprecated; talk to trmanager directly
856 # deprecated; talk to trmanager directly
857 return self.trmanager.transaction()
857 return self.trmanager.transaction()
858
858
859 class transactionmanager(object):
859 class transactionmanager(object):
860 """An object to manage the life cycle of a transaction
860 """An object to manage the life cycle of a transaction
861
861
862 It creates the transaction on demand and calls the appropriate hooks when
862 It creates the transaction on demand and calls the appropriate hooks when
863 closing the transaction."""
863 closing the transaction."""
864 def __init__(self, repo, source, url):
864 def __init__(self, repo, source, url):
865 self.repo = repo
865 self.repo = repo
866 self.source = source
866 self.source = source
867 self.url = url
867 self.url = url
868 self._tr = None
868 self._tr = None
869
869
870 def transaction(self):
870 def transaction(self):
871 """Return an open transaction object, constructing if necessary"""
871 """Return an open transaction object, constructing if necessary"""
872 if not self._tr:
872 if not self._tr:
873 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
873 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
874 self._tr = self.repo.transaction(trname)
874 self._tr = self.repo.transaction(trname)
875 self._tr.hookargs['source'] = self.source
875 self._tr.hookargs['source'] = self.source
876 self._tr.hookargs['url'] = self.url
876 self._tr.hookargs['url'] = self.url
877 return self._tr
877 return self._tr
878
878
879 def close(self):
879 def close(self):
880 """close transaction if created"""
880 """close transaction if created"""
881 if self._tr is not None:
881 if self._tr is not None:
882 self._tr.close()
882 self._tr.close()
883
883
884 def release(self):
884 def release(self):
885 """release transaction if created"""
885 """release transaction if created"""
886 if self._tr is not None:
886 if self._tr is not None:
887 self._tr.release()
887 self._tr.release()
888
888
889 def pull(repo, remote, heads=None, force=False, bookmarks=()):
889 def pull(repo, remote, heads=None, force=False, bookmarks=()):
890 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks)
890 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks)
891 if pullop.remote.local():
891 if pullop.remote.local():
892 missing = set(pullop.remote.requirements) - pullop.repo.supported
892 missing = set(pullop.remote.requirements) - pullop.repo.supported
893 if missing:
893 if missing:
894 msg = _("required features are not"
894 msg = _("required features are not"
895 " supported in the destination:"
895 " supported in the destination:"
896 " %s") % (', '.join(sorted(missing)))
896 " %s") % (', '.join(sorted(missing)))
897 raise util.Abort(msg)
897 raise util.Abort(msg)
898
898
899 lock = pullop.repo.lock()
899 lock = pullop.repo.lock()
900 try:
900 try:
901 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
901 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
902 _pulldiscovery(pullop)
902 _pulldiscovery(pullop)
903 if _canusebundle2(pullop):
903 if _canusebundle2(pullop):
904 _pullbundle2(pullop)
904 _pullbundle2(pullop)
905 _pullchangeset(pullop)
905 _pullchangeset(pullop)
906 _pullphase(pullop)
906 _pullphase(pullop)
907 _pullbookmarks(pullop)
907 _pullbookmarks(pullop)
908 _pullobsolete(pullop)
908 _pullobsolete(pullop)
909 pullop.trmanager.close()
909 pullop.trmanager.close()
910 finally:
910 finally:
911 pullop.trmanager.release()
911 pullop.trmanager.release()
912 lock.release()
912 lock.release()
913
913
914 return pullop
914 return pullop
915
915
916 # list of steps to perform discovery before pull
916 # list of steps to perform discovery before pull
917 pulldiscoveryorder = []
917 pulldiscoveryorder = []
918
918
919 # Mapping between step name and function
919 # Mapping between step name and function
920 #
920 #
921 # This exists to help extensions wrap steps if necessary
921 # This exists to help extensions wrap steps if necessary
922 pulldiscoverymapping = {}
922 pulldiscoverymapping = {}
923
923
924 def pulldiscovery(stepname):
924 def pulldiscovery(stepname):
925 """decorator for function performing discovery before pull
925 """decorator for function performing discovery before pull
926
926
927 The function is added to the step -> function mapping and appended to the
927 The function is added to the step -> function mapping and appended to the
928 list of steps. Beware that decorated function will be added in order (this
928 list of steps. Beware that decorated function will be added in order (this
929 may matter).
929 may matter).
930
930
931 You can only use this decorator for a new step, if you want to wrap a step
931 You can only use this decorator for a new step, if you want to wrap a step
932 from an extension, change the pulldiscovery dictionary directly."""
932 from an extension, change the pulldiscovery dictionary directly."""
933 def dec(func):
933 def dec(func):
934 assert stepname not in pulldiscoverymapping
934 assert stepname not in pulldiscoverymapping
935 pulldiscoverymapping[stepname] = func
935 pulldiscoverymapping[stepname] = func
936 pulldiscoveryorder.append(stepname)
936 pulldiscoveryorder.append(stepname)
937 return func
937 return func
938 return dec
938 return dec
939
939
940 def _pulldiscovery(pullop):
940 def _pulldiscovery(pullop):
941 """Run all discovery steps"""
941 """Run all discovery steps"""
942 for stepname in pulldiscoveryorder:
942 for stepname in pulldiscoveryorder:
943 step = pulldiscoverymapping[stepname]
943 step = pulldiscoverymapping[stepname]
944 step(pullop)
944 step(pullop)
945
945
946 @pulldiscovery('b1:bookmarks')
946 @pulldiscovery('b1:bookmarks')
947 def _pullbookmarkbundle1(pullop):
947 def _pullbookmarkbundle1(pullop):
948 """fetch bookmark data in bundle1 case
948 """fetch bookmark data in bundle1 case
949
949
950 If not using bundle2, we have to fetch bookmarks before changeset
950 If not using bundle2, we have to fetch bookmarks before changeset
951 discovery to reduce the chance and impact of race conditions."""
951 discovery to reduce the chance and impact of race conditions."""
952 if pullop.remotebookmarks is not None:
952 if pullop.remotebookmarks is not None:
953 return
953 return
954 if not _canusebundle2(pullop): # all bundle2 server now support listkeys
954 if not _canusebundle2(pullop): # all bundle2 server now support listkeys
955 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
955 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
956
956
957
957
958 @pulldiscovery('changegroup')
958 @pulldiscovery('changegroup')
959 def _pulldiscoverychangegroup(pullop):
959 def _pulldiscoverychangegroup(pullop):
960 """discovery phase for the pull
960 """discovery phase for the pull
961
961
962 Current handle changeset discovery only, will change handle all discovery
962 Current handle changeset discovery only, will change handle all discovery
963 at some point."""
963 at some point."""
964 tmp = discovery.findcommonincoming(pullop.repo,
964 tmp = discovery.findcommonincoming(pullop.repo,
965 pullop.remote,
965 pullop.remote,
966 heads=pullop.heads,
966 heads=pullop.heads,
967 force=pullop.force)
967 force=pullop.force)
968 common, fetch, rheads = tmp
968 common, fetch, rheads = tmp
969 nm = pullop.repo.unfiltered().changelog.nodemap
969 nm = pullop.repo.unfiltered().changelog.nodemap
970 if fetch and rheads:
970 if fetch and rheads:
971 # If a remote heads in filtered locally, lets drop it from the unknown
971 # If a remote heads in filtered locally, lets drop it from the unknown
972 # remote heads and put in back in common.
972 # remote heads and put in back in common.
973 #
973 #
974 # This is a hackish solution to catch most of "common but locally
974 # This is a hackish solution to catch most of "common but locally
975 # hidden situation". We do not performs discovery on unfiltered
975 # hidden situation". We do not performs discovery on unfiltered
976 # repository because it end up doing a pathological amount of round
976 # repository because it end up doing a pathological amount of round
977 # trip for w huge amount of changeset we do not care about.
977 # trip for w huge amount of changeset we do not care about.
978 #
978 #
979 # If a set of such "common but filtered" changeset exist on the server
979 # If a set of such "common but filtered" changeset exist on the server
980 # but are not including a remote heads, we'll not be able to detect it,
980 # but are not including a remote heads, we'll not be able to detect it,
981 scommon = set(common)
981 scommon = set(common)
982 filteredrheads = []
982 filteredrheads = []
983 for n in rheads:
983 for n in rheads:
984 if n in nm:
984 if n in nm:
985 if n not in scommon:
985 if n not in scommon:
986 common.append(n)
986 common.append(n)
987 else:
987 else:
988 filteredrheads.append(n)
988 filteredrheads.append(n)
989 if not filteredrheads:
989 if not filteredrheads:
990 fetch = []
990 fetch = []
991 rheads = filteredrheads
991 rheads = filteredrheads
992 pullop.common = common
992 pullop.common = common
993 pullop.fetch = fetch
993 pullop.fetch = fetch
994 pullop.rheads = rheads
994 pullop.rheads = rheads
995
995
996 def _pullbundle2(pullop):
996 def _pullbundle2(pullop):
997 """pull data using bundle2
997 """pull data using bundle2
998
998
999 For now, the only supported data are changegroup."""
999 For now, the only supported data are changegroup."""
1000 remotecaps = bundle2.bundle2caps(pullop.remote)
1000 remotecaps = bundle2.bundle2caps(pullop.remote)
1001 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1001 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1002 # pulling changegroup
1002 # pulling changegroup
1003 pullop.stepsdone.add('changegroup')
1003 pullop.stepsdone.add('changegroup')
1004
1004
1005 kwargs['common'] = pullop.common
1005 kwargs['common'] = pullop.common
1006 kwargs['heads'] = pullop.heads or pullop.rheads
1006 kwargs['heads'] = pullop.heads or pullop.rheads
1007 kwargs['cg'] = pullop.fetch
1007 kwargs['cg'] = pullop.fetch
1008 if 'listkeys' in remotecaps:
1008 if 'listkeys' in remotecaps:
1009 kwargs['listkeys'] = ['phase', 'bookmarks']
1009 kwargs['listkeys'] = ['phase']
1010 if pullop.remotebookmarks is None:
1011 # make sure to always includes bookmark data when migrating
1012 # `hg incoming --bundle` to using this function.
1013 kwargs['listkeys'].append('bookmarks')
1010 if not pullop.fetch:
1014 if not pullop.fetch:
1011 pullop.repo.ui.status(_("no changes found\n"))
1015 pullop.repo.ui.status(_("no changes found\n"))
1012 pullop.cgresult = 0
1016 pullop.cgresult = 0
1013 else:
1017 else:
1014 if pullop.heads is None and list(pullop.common) == [nullid]:
1018 if pullop.heads is None and list(pullop.common) == [nullid]:
1015 pullop.repo.ui.status(_("requesting all changes\n"))
1019 pullop.repo.ui.status(_("requesting all changes\n"))
1016 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1020 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1017 remoteversions = bundle2.obsmarkersversion(remotecaps)
1021 remoteversions = bundle2.obsmarkersversion(remotecaps)
1018 if obsolete.commonversion(remoteversions) is not None:
1022 if obsolete.commonversion(remoteversions) is not None:
1019 kwargs['obsmarkers'] = True
1023 kwargs['obsmarkers'] = True
1020 pullop.stepsdone.add('obsmarkers')
1024 pullop.stepsdone.add('obsmarkers')
1021 _pullbundle2extraprepare(pullop, kwargs)
1025 _pullbundle2extraprepare(pullop, kwargs)
1022 bundle = pullop.remote.getbundle('pull', **kwargs)
1026 bundle = pullop.remote.getbundle('pull', **kwargs)
1023 try:
1027 try:
1024 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1028 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1025 except error.BundleValueError, exc:
1029 except error.BundleValueError, exc:
1026 raise util.Abort('missing support for %s' % exc)
1030 raise util.Abort('missing support for %s' % exc)
1027
1031
1028 if pullop.fetch:
1032 if pullop.fetch:
1029 results = [cg['return'] for cg in op.records['changegroup']]
1033 results = [cg['return'] for cg in op.records['changegroup']]
1030 pullop.cgresult = changegroup.combineresults(results)
1034 pullop.cgresult = changegroup.combineresults(results)
1031
1035
1032 # processing phases change
1036 # processing phases change
1033 for namespace, value in op.records['listkeys']:
1037 for namespace, value in op.records['listkeys']:
1034 if namespace == 'phases':
1038 if namespace == 'phases':
1035 _pullapplyphases(pullop, value)
1039 _pullapplyphases(pullop, value)
1036
1040
1037 # processing bookmark update
1041 # processing bookmark update
1038 for namespace, value in op.records['listkeys']:
1042 for namespace, value in op.records['listkeys']:
1039 if namespace == 'bookmarks':
1043 if namespace == 'bookmarks':
1040 pullop.remotebookmarks = value
1044 pullop.remotebookmarks = value
1045
1046 # bookmark data were either already there or pulled in the bundle
1047 if pullop.remotebookmarks is not None:
1041 _pullbookmarks(pullop)
1048 _pullbookmarks(pullop)
1042
1049
1043 def _pullbundle2extraprepare(pullop, kwargs):
1050 def _pullbundle2extraprepare(pullop, kwargs):
1044 """hook function so that extensions can extend the getbundle call"""
1051 """hook function so that extensions can extend the getbundle call"""
1045 pass
1052 pass
1046
1053
1047 def _pullchangeset(pullop):
1054 def _pullchangeset(pullop):
1048 """pull changeset from unbundle into the local repo"""
1055 """pull changeset from unbundle into the local repo"""
1049 # We delay the open of the transaction as late as possible so we
1056 # We delay the open of the transaction as late as possible so we
1050 # don't open transaction for nothing or you break future useful
1057 # don't open transaction for nothing or you break future useful
1051 # rollback call
1058 # rollback call
1052 if 'changegroup' in pullop.stepsdone:
1059 if 'changegroup' in pullop.stepsdone:
1053 return
1060 return
1054 pullop.stepsdone.add('changegroup')
1061 pullop.stepsdone.add('changegroup')
1055 if not pullop.fetch:
1062 if not pullop.fetch:
1056 pullop.repo.ui.status(_("no changes found\n"))
1063 pullop.repo.ui.status(_("no changes found\n"))
1057 pullop.cgresult = 0
1064 pullop.cgresult = 0
1058 return
1065 return
1059 pullop.gettransaction()
1066 pullop.gettransaction()
1060 if pullop.heads is None and list(pullop.common) == [nullid]:
1067 if pullop.heads is None and list(pullop.common) == [nullid]:
1061 pullop.repo.ui.status(_("requesting all changes\n"))
1068 pullop.repo.ui.status(_("requesting all changes\n"))
1062 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1069 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1063 # issue1320, avoid a race if remote changed after discovery
1070 # issue1320, avoid a race if remote changed after discovery
1064 pullop.heads = pullop.rheads
1071 pullop.heads = pullop.rheads
1065
1072
1066 if pullop.remote.capable('getbundle'):
1073 if pullop.remote.capable('getbundle'):
1067 # TODO: get bundlecaps from remote
1074 # TODO: get bundlecaps from remote
1068 cg = pullop.remote.getbundle('pull', common=pullop.common,
1075 cg = pullop.remote.getbundle('pull', common=pullop.common,
1069 heads=pullop.heads or pullop.rheads)
1076 heads=pullop.heads or pullop.rheads)
1070 elif pullop.heads is None:
1077 elif pullop.heads is None:
1071 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1078 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1072 elif not pullop.remote.capable('changegroupsubset'):
1079 elif not pullop.remote.capable('changegroupsubset'):
1073 raise util.Abort(_("partial pull cannot be done because "
1080 raise util.Abort(_("partial pull cannot be done because "
1074 "other repository doesn't support "
1081 "other repository doesn't support "
1075 "changegroupsubset."))
1082 "changegroupsubset."))
1076 else:
1083 else:
1077 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1084 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1078 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1085 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1079 pullop.remote.url())
1086 pullop.remote.url())
1080
1087
1081 def _pullphase(pullop):
1088 def _pullphase(pullop):
1082 # Get remote phases data from remote
1089 # Get remote phases data from remote
1083 if 'phases' in pullop.stepsdone:
1090 if 'phases' in pullop.stepsdone:
1084 return
1091 return
1085 remotephases = pullop.remote.listkeys('phases')
1092 remotephases = pullop.remote.listkeys('phases')
1086 _pullapplyphases(pullop, remotephases)
1093 _pullapplyphases(pullop, remotephases)
1087
1094
1088 def _pullapplyphases(pullop, remotephases):
1095 def _pullapplyphases(pullop, remotephases):
1089 """apply phase movement from observed remote state"""
1096 """apply phase movement from observed remote state"""
1090 if 'phases' in pullop.stepsdone:
1097 if 'phases' in pullop.stepsdone:
1091 return
1098 return
1092 pullop.stepsdone.add('phases')
1099 pullop.stepsdone.add('phases')
1093 publishing = bool(remotephases.get('publishing', False))
1100 publishing = bool(remotephases.get('publishing', False))
1094 if remotephases and not publishing:
1101 if remotephases and not publishing:
1095 # remote is new and unpublishing
1102 # remote is new and unpublishing
1096 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1103 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1097 pullop.pulledsubset,
1104 pullop.pulledsubset,
1098 remotephases)
1105 remotephases)
1099 dheads = pullop.pulledsubset
1106 dheads = pullop.pulledsubset
1100 else:
1107 else:
1101 # Remote is old or publishing all common changesets
1108 # Remote is old or publishing all common changesets
1102 # should be seen as public
1109 # should be seen as public
1103 pheads = pullop.pulledsubset
1110 pheads = pullop.pulledsubset
1104 dheads = []
1111 dheads = []
1105 unfi = pullop.repo.unfiltered()
1112 unfi = pullop.repo.unfiltered()
1106 phase = unfi._phasecache.phase
1113 phase = unfi._phasecache.phase
1107 rev = unfi.changelog.nodemap.get
1114 rev = unfi.changelog.nodemap.get
1108 public = phases.public
1115 public = phases.public
1109 draft = phases.draft
1116 draft = phases.draft
1110
1117
1111 # exclude changesets already public locally and update the others
1118 # exclude changesets already public locally and update the others
1112 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1119 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1113 if pheads:
1120 if pheads:
1114 tr = pullop.gettransaction()
1121 tr = pullop.gettransaction()
1115 phases.advanceboundary(pullop.repo, tr, public, pheads)
1122 phases.advanceboundary(pullop.repo, tr, public, pheads)
1116
1123
1117 # exclude changesets already draft locally and update the others
1124 # exclude changesets already draft locally and update the others
1118 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1125 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1119 if dheads:
1126 if dheads:
1120 tr = pullop.gettransaction()
1127 tr = pullop.gettransaction()
1121 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1128 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1122
1129
1123 def _pullbookmarks(pullop):
1130 def _pullbookmarks(pullop):
1124 """process the remote bookmark information to update the local one"""
1131 """process the remote bookmark information to update the local one"""
1125 if 'bookmarks' in pullop.stepsdone:
1132 if 'bookmarks' in pullop.stepsdone:
1126 return
1133 return
1127 pullop.stepsdone.add('bookmarks')
1134 pullop.stepsdone.add('bookmarks')
1128 repo = pullop.repo
1135 repo = pullop.repo
1129 remotebookmarks = pullop.remotebookmarks
1136 remotebookmarks = pullop.remotebookmarks
1130 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1137 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1131 pullop.remote.url(),
1138 pullop.remote.url(),
1132 pullop.gettransaction,
1139 pullop.gettransaction,
1133 explicit=pullop.explicitbookmarks)
1140 explicit=pullop.explicitbookmarks)
1134
1141
1135 def _pullobsolete(pullop):
1142 def _pullobsolete(pullop):
1136 """utility function to pull obsolete markers from a remote
1143 """utility function to pull obsolete markers from a remote
1137
1144
1138 The `gettransaction` is function that return the pull transaction, creating
1145 The `gettransaction` is function that return the pull transaction, creating
1139 one if necessary. We return the transaction to inform the calling code that
1146 one if necessary. We return the transaction to inform the calling code that
1140 a new transaction have been created (when applicable).
1147 a new transaction have been created (when applicable).
1141
1148
1142 Exists mostly to allow overriding for experimentation purpose"""
1149 Exists mostly to allow overriding for experimentation purpose"""
1143 if 'obsmarkers' in pullop.stepsdone:
1150 if 'obsmarkers' in pullop.stepsdone:
1144 return
1151 return
1145 pullop.stepsdone.add('obsmarkers')
1152 pullop.stepsdone.add('obsmarkers')
1146 tr = None
1153 tr = None
1147 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1154 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1148 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1155 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1149 remoteobs = pullop.remote.listkeys('obsolete')
1156 remoteobs = pullop.remote.listkeys('obsolete')
1150 if 'dump0' in remoteobs:
1157 if 'dump0' in remoteobs:
1151 tr = pullop.gettransaction()
1158 tr = pullop.gettransaction()
1152 for key in sorted(remoteobs, reverse=True):
1159 for key in sorted(remoteobs, reverse=True):
1153 if key.startswith('dump'):
1160 if key.startswith('dump'):
1154 data = base85.b85decode(remoteobs[key])
1161 data = base85.b85decode(remoteobs[key])
1155 pullop.repo.obsstore.mergemarkers(tr, data)
1162 pullop.repo.obsstore.mergemarkers(tr, data)
1156 pullop.repo.invalidatevolatilesets()
1163 pullop.repo.invalidatevolatilesets()
1157 return tr
1164 return tr
1158
1165
1159 def caps20to10(repo):
1166 def caps20to10(repo):
1160 """return a set with appropriate options to use bundle20 during getbundle"""
1167 """return a set with appropriate options to use bundle20 during getbundle"""
1161 caps = set(['HG20'])
1168 caps = set(['HG20'])
1162 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1169 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1163 caps.add('bundle2=' + urllib.quote(capsblob))
1170 caps.add('bundle2=' + urllib.quote(capsblob))
1164 return caps
1171 return caps
1165
1172
1166 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1173 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1167 getbundle2partsorder = []
1174 getbundle2partsorder = []
1168
1175
1169 # Mapping between step name and function
1176 # Mapping between step name and function
1170 #
1177 #
1171 # This exists to help extensions wrap steps if necessary
1178 # This exists to help extensions wrap steps if necessary
1172 getbundle2partsmapping = {}
1179 getbundle2partsmapping = {}
1173
1180
1174 def getbundle2partsgenerator(stepname, idx=None):
1181 def getbundle2partsgenerator(stepname, idx=None):
1175 """decorator for function generating bundle2 part for getbundle
1182 """decorator for function generating bundle2 part for getbundle
1176
1183
1177 The function is added to the step -> function mapping and appended to the
1184 The function is added to the step -> function mapping and appended to the
1178 list of steps. Beware that decorated functions will be added in order
1185 list of steps. Beware that decorated functions will be added in order
1179 (this may matter).
1186 (this may matter).
1180
1187
1181 You can only use this decorator for new steps, if you want to wrap a step
1188 You can only use this decorator for new steps, if you want to wrap a step
1182 from an extension, attack the getbundle2partsmapping dictionary directly."""
1189 from an extension, attack the getbundle2partsmapping dictionary directly."""
1183 def dec(func):
1190 def dec(func):
1184 assert stepname not in getbundle2partsmapping
1191 assert stepname not in getbundle2partsmapping
1185 getbundle2partsmapping[stepname] = func
1192 getbundle2partsmapping[stepname] = func
1186 if idx is None:
1193 if idx is None:
1187 getbundle2partsorder.append(stepname)
1194 getbundle2partsorder.append(stepname)
1188 else:
1195 else:
1189 getbundle2partsorder.insert(idx, stepname)
1196 getbundle2partsorder.insert(idx, stepname)
1190 return func
1197 return func
1191 return dec
1198 return dec
1192
1199
1193 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1200 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1194 **kwargs):
1201 **kwargs):
1195 """return a full bundle (with potentially multiple kind of parts)
1202 """return a full bundle (with potentially multiple kind of parts)
1196
1203
1197 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1204 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1198 passed. For now, the bundle can contain only changegroup, but this will
1205 passed. For now, the bundle can contain only changegroup, but this will
1199 changes when more part type will be available for bundle2.
1206 changes when more part type will be available for bundle2.
1200
1207
1201 This is different from changegroup.getchangegroup that only returns an HG10
1208 This is different from changegroup.getchangegroup that only returns an HG10
1202 changegroup bundle. They may eventually get reunited in the future when we
1209 changegroup bundle. They may eventually get reunited in the future when we
1203 have a clearer idea of the API we what to query different data.
1210 have a clearer idea of the API we what to query different data.
1204
1211
1205 The implementation is at a very early stage and will get massive rework
1212 The implementation is at a very early stage and will get massive rework
1206 when the API of bundle is refined.
1213 when the API of bundle is refined.
1207 """
1214 """
1208 # bundle10 case
1215 # bundle10 case
1209 usebundle2 = False
1216 usebundle2 = False
1210 if bundlecaps is not None:
1217 if bundlecaps is not None:
1211 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1218 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1212 if not usebundle2:
1219 if not usebundle2:
1213 if bundlecaps and not kwargs.get('cg', True):
1220 if bundlecaps and not kwargs.get('cg', True):
1214 raise ValueError(_('request for bundle10 must include changegroup'))
1221 raise ValueError(_('request for bundle10 must include changegroup'))
1215
1222
1216 if kwargs:
1223 if kwargs:
1217 raise ValueError(_('unsupported getbundle arguments: %s')
1224 raise ValueError(_('unsupported getbundle arguments: %s')
1218 % ', '.join(sorted(kwargs.keys())))
1225 % ', '.join(sorted(kwargs.keys())))
1219 return changegroup.getchangegroup(repo, source, heads=heads,
1226 return changegroup.getchangegroup(repo, source, heads=heads,
1220 common=common, bundlecaps=bundlecaps)
1227 common=common, bundlecaps=bundlecaps)
1221
1228
1222 # bundle20 case
1229 # bundle20 case
1223 b2caps = {}
1230 b2caps = {}
1224 for bcaps in bundlecaps:
1231 for bcaps in bundlecaps:
1225 if bcaps.startswith('bundle2='):
1232 if bcaps.startswith('bundle2='):
1226 blob = urllib.unquote(bcaps[len('bundle2='):])
1233 blob = urllib.unquote(bcaps[len('bundle2='):])
1227 b2caps.update(bundle2.decodecaps(blob))
1234 b2caps.update(bundle2.decodecaps(blob))
1228 bundler = bundle2.bundle20(repo.ui, b2caps)
1235 bundler = bundle2.bundle20(repo.ui, b2caps)
1229
1236
1230 kwargs['heads'] = heads
1237 kwargs['heads'] = heads
1231 kwargs['common'] = common
1238 kwargs['common'] = common
1232
1239
1233 for name in getbundle2partsorder:
1240 for name in getbundle2partsorder:
1234 func = getbundle2partsmapping[name]
1241 func = getbundle2partsmapping[name]
1235 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1242 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1236 **kwargs)
1243 **kwargs)
1237
1244
1238 return util.chunkbuffer(bundler.getchunks())
1245 return util.chunkbuffer(bundler.getchunks())
1239
1246
1240 @getbundle2partsgenerator('changegroup')
1247 @getbundle2partsgenerator('changegroup')
1241 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1248 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1242 b2caps=None, heads=None, common=None, **kwargs):
1249 b2caps=None, heads=None, common=None, **kwargs):
1243 """add a changegroup part to the requested bundle"""
1250 """add a changegroup part to the requested bundle"""
1244 cg = None
1251 cg = None
1245 if kwargs.get('cg', True):
1252 if kwargs.get('cg', True):
1246 # build changegroup bundle here.
1253 # build changegroup bundle here.
1247 version = None
1254 version = None
1248 cgversions = b2caps.get('changegroup')
1255 cgversions = b2caps.get('changegroup')
1249 if not cgversions: # 3.1 and 3.2 ship with an empty value
1256 if not cgversions: # 3.1 and 3.2 ship with an empty value
1250 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1257 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1251 common=common,
1258 common=common,
1252 bundlecaps=bundlecaps)
1259 bundlecaps=bundlecaps)
1253 else:
1260 else:
1254 cgversions = [v for v in cgversions if v in changegroup.packermap]
1261 cgversions = [v for v in cgversions if v in changegroup.packermap]
1255 if not cgversions:
1262 if not cgversions:
1256 raise ValueError(_('no common changegroup version'))
1263 raise ValueError(_('no common changegroup version'))
1257 version = max(cgversions)
1264 version = max(cgversions)
1258 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1265 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1259 common=common,
1266 common=common,
1260 bundlecaps=bundlecaps,
1267 bundlecaps=bundlecaps,
1261 version=version)
1268 version=version)
1262
1269
1263 if cg:
1270 if cg:
1264 part = bundler.newpart('changegroup', data=cg)
1271 part = bundler.newpart('changegroup', data=cg)
1265 if version is not None:
1272 if version is not None:
1266 part.addparam('version', version)
1273 part.addparam('version', version)
1267
1274
1268 @getbundle2partsgenerator('listkeys')
1275 @getbundle2partsgenerator('listkeys')
1269 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1276 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1270 b2caps=None, **kwargs):
1277 b2caps=None, **kwargs):
1271 """add parts containing listkeys namespaces to the requested bundle"""
1278 """add parts containing listkeys namespaces to the requested bundle"""
1272 listkeys = kwargs.get('listkeys', ())
1279 listkeys = kwargs.get('listkeys', ())
1273 for namespace in listkeys:
1280 for namespace in listkeys:
1274 part = bundler.newpart('listkeys')
1281 part = bundler.newpart('listkeys')
1275 part.addparam('namespace', namespace)
1282 part.addparam('namespace', namespace)
1276 keys = repo.listkeys(namespace).items()
1283 keys = repo.listkeys(namespace).items()
1277 part.data = pushkey.encodekeys(keys)
1284 part.data = pushkey.encodekeys(keys)
1278
1285
1279 @getbundle2partsgenerator('obsmarkers')
1286 @getbundle2partsgenerator('obsmarkers')
1280 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1287 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1281 b2caps=None, heads=None, **kwargs):
1288 b2caps=None, heads=None, **kwargs):
1282 """add an obsolescence markers part to the requested bundle"""
1289 """add an obsolescence markers part to the requested bundle"""
1283 if kwargs.get('obsmarkers', False):
1290 if kwargs.get('obsmarkers', False):
1284 if heads is None:
1291 if heads is None:
1285 heads = repo.heads()
1292 heads = repo.heads()
1286 subset = [c.node() for c in repo.set('::%ln', heads)]
1293 subset = [c.node() for c in repo.set('::%ln', heads)]
1287 markers = repo.obsstore.relevantmarkers(subset)
1294 markers = repo.obsstore.relevantmarkers(subset)
1288 markers = sorted(markers)
1295 markers = sorted(markers)
1289 buildobsmarkerspart(bundler, markers)
1296 buildobsmarkerspart(bundler, markers)
1290
1297
1291 @getbundle2partsgenerator('hgtagsfnodes')
1298 @getbundle2partsgenerator('hgtagsfnodes')
1292 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1299 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1293 b2caps=None, heads=None, common=None,
1300 b2caps=None, heads=None, common=None,
1294 **kwargs):
1301 **kwargs):
1295 """Transfer the .hgtags filenodes mapping.
1302 """Transfer the .hgtags filenodes mapping.
1296
1303
1297 Only values for heads in this bundle will be transferred.
1304 Only values for heads in this bundle will be transferred.
1298
1305
1299 The part data consists of pairs of 20 byte changeset node and .hgtags
1306 The part data consists of pairs of 20 byte changeset node and .hgtags
1300 filenodes raw values.
1307 filenodes raw values.
1301 """
1308 """
1302 # Don't send unless:
1309 # Don't send unless:
1303 # - changeset are being exchanged,
1310 # - changeset are being exchanged,
1304 # - the client supports it.
1311 # - the client supports it.
1305 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1312 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1306 return
1313 return
1307
1314
1308 outgoing = changegroup.computeoutgoing(repo, heads, common)
1315 outgoing = changegroup.computeoutgoing(repo, heads, common)
1309
1316
1310 if not outgoing.missingheads:
1317 if not outgoing.missingheads:
1311 return
1318 return
1312
1319
1313 cache = tags.hgtagsfnodescache(repo.unfiltered())
1320 cache = tags.hgtagsfnodescache(repo.unfiltered())
1314 chunks = []
1321 chunks = []
1315
1322
1316 # .hgtags fnodes are only relevant for head changesets. While we could
1323 # .hgtags fnodes are only relevant for head changesets. While we could
1317 # transfer values for all known nodes, there will likely be little to
1324 # transfer values for all known nodes, there will likely be little to
1318 # no benefit.
1325 # no benefit.
1319 #
1326 #
1320 # We don't bother using a generator to produce output data because
1327 # We don't bother using a generator to produce output data because
1321 # a) we only have 40 bytes per head and even esoteric numbers of heads
1328 # a) we only have 40 bytes per head and even esoteric numbers of heads
1322 # consume little memory (1M heads is 40MB) b) we don't want to send the
1329 # consume little memory (1M heads is 40MB) b) we don't want to send the
1323 # part if we don't have entries and knowing if we have entries requires
1330 # part if we don't have entries and knowing if we have entries requires
1324 # cache lookups.
1331 # cache lookups.
1325 for node in outgoing.missingheads:
1332 for node in outgoing.missingheads:
1326 # Don't compute missing, as this may slow down serving.
1333 # Don't compute missing, as this may slow down serving.
1327 fnode = cache.getfnode(node, computemissing=False)
1334 fnode = cache.getfnode(node, computemissing=False)
1328 if fnode is not None:
1335 if fnode is not None:
1329 chunks.extend([node, fnode])
1336 chunks.extend([node, fnode])
1330
1337
1331 if chunks:
1338 if chunks:
1332 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1339 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1333
1340
1334 def check_heads(repo, their_heads, context):
1341 def check_heads(repo, their_heads, context):
1335 """check if the heads of a repo have been modified
1342 """check if the heads of a repo have been modified
1336
1343
1337 Used by peer for unbundling.
1344 Used by peer for unbundling.
1338 """
1345 """
1339 heads = repo.heads()
1346 heads = repo.heads()
1340 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1347 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1341 if not (their_heads == ['force'] or their_heads == heads or
1348 if not (their_heads == ['force'] or their_heads == heads or
1342 their_heads == ['hashed', heads_hash]):
1349 their_heads == ['hashed', heads_hash]):
1343 # someone else committed/pushed/unbundled while we
1350 # someone else committed/pushed/unbundled while we
1344 # were transferring data
1351 # were transferring data
1345 raise error.PushRaced('repository changed while %s - '
1352 raise error.PushRaced('repository changed while %s - '
1346 'please try again' % context)
1353 'please try again' % context)
1347
1354
1348 def unbundle(repo, cg, heads, source, url):
1355 def unbundle(repo, cg, heads, source, url):
1349 """Apply a bundle to a repo.
1356 """Apply a bundle to a repo.
1350
1357
1351 this function makes sure the repo is locked during the application and have
1358 this function makes sure the repo is locked during the application and have
1352 mechanism to check that no push race occurred between the creation of the
1359 mechanism to check that no push race occurred between the creation of the
1353 bundle and its application.
1360 bundle and its application.
1354
1361
1355 If the push was raced as PushRaced exception is raised."""
1362 If the push was raced as PushRaced exception is raised."""
1356 r = 0
1363 r = 0
1357 # need a transaction when processing a bundle2 stream
1364 # need a transaction when processing a bundle2 stream
1358 wlock = lock = tr = None
1365 wlock = lock = tr = None
1359 recordout = None
1366 recordout = None
1360 # quick fix for output mismatch with bundle2 in 3.4
1367 # quick fix for output mismatch with bundle2 in 3.4
1361 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1368 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1362 False)
1369 False)
1363 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1370 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1364 captureoutput = True
1371 captureoutput = True
1365 try:
1372 try:
1366 check_heads(repo, heads, 'uploading changes')
1373 check_heads(repo, heads, 'uploading changes')
1367 # push can proceed
1374 # push can proceed
1368 if util.safehasattr(cg, 'params'):
1375 if util.safehasattr(cg, 'params'):
1369 r = None
1376 r = None
1370 try:
1377 try:
1371 wlock = repo.wlock()
1378 wlock = repo.wlock()
1372 lock = repo.lock()
1379 lock = repo.lock()
1373 tr = repo.transaction(source)
1380 tr = repo.transaction(source)
1374 tr.hookargs['source'] = source
1381 tr.hookargs['source'] = source
1375 tr.hookargs['url'] = url
1382 tr.hookargs['url'] = url
1376 tr.hookargs['bundle2'] = '1'
1383 tr.hookargs['bundle2'] = '1'
1377 op = bundle2.bundleoperation(repo, lambda: tr,
1384 op = bundle2.bundleoperation(repo, lambda: tr,
1378 captureoutput=captureoutput)
1385 captureoutput=captureoutput)
1379 try:
1386 try:
1380 r = bundle2.processbundle(repo, cg, op=op)
1387 r = bundle2.processbundle(repo, cg, op=op)
1381 finally:
1388 finally:
1382 r = op.reply
1389 r = op.reply
1383 if captureoutput and r is not None:
1390 if captureoutput and r is not None:
1384 repo.ui.pushbuffer(error=True, subproc=True)
1391 repo.ui.pushbuffer(error=True, subproc=True)
1385 def recordout(output):
1392 def recordout(output):
1386 r.newpart('output', data=output, mandatory=False)
1393 r.newpart('output', data=output, mandatory=False)
1387 tr.close()
1394 tr.close()
1388 except BaseException, exc:
1395 except BaseException, exc:
1389 exc.duringunbundle2 = True
1396 exc.duringunbundle2 = True
1390 if captureoutput and r is not None:
1397 if captureoutput and r is not None:
1391 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1398 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1392 def recordout(output):
1399 def recordout(output):
1393 part = bundle2.bundlepart('output', data=output,
1400 part = bundle2.bundlepart('output', data=output,
1394 mandatory=False)
1401 mandatory=False)
1395 parts.append(part)
1402 parts.append(part)
1396 raise
1403 raise
1397 else:
1404 else:
1398 lock = repo.lock()
1405 lock = repo.lock()
1399 r = changegroup.addchangegroup(repo, cg, source, url)
1406 r = changegroup.addchangegroup(repo, cg, source, url)
1400 finally:
1407 finally:
1401 lockmod.release(tr, lock, wlock)
1408 lockmod.release(tr, lock, wlock)
1402 if recordout is not None:
1409 if recordout is not None:
1403 recordout(repo.ui.popbuffer())
1410 recordout(repo.ui.popbuffer())
1404 return r
1411 return r
1405
1412
1406 # This is it's own function so extensions can override it.
1413 # This is it's own function so extensions can override it.
1407 def _walkstreamfiles(repo):
1414 def _walkstreamfiles(repo):
1408 return repo.store.walk()
1415 return repo.store.walk()
1409
1416
1410 def generatestreamclone(repo):
1417 def generatestreamclone(repo):
1411 """Emit content for a streaming clone.
1418 """Emit content for a streaming clone.
1412
1419
1413 This is a generator of raw chunks that constitute a streaming clone.
1420 This is a generator of raw chunks that constitute a streaming clone.
1414
1421
1415 The stream begins with a line of 2 space-delimited integers containing the
1422 The stream begins with a line of 2 space-delimited integers containing the
1416 number of entries and total bytes size.
1423 number of entries and total bytes size.
1417
1424
1418 Next, are N entries for each file being transferred. Each file entry starts
1425 Next, are N entries for each file being transferred. Each file entry starts
1419 as a line with the file name and integer size delimited by a null byte.
1426 as a line with the file name and integer size delimited by a null byte.
1420 The raw file data follows. Following the raw file data is the next file
1427 The raw file data follows. Following the raw file data is the next file
1421 entry, or EOF.
1428 entry, or EOF.
1422
1429
1423 When used on the wire protocol, an additional line indicating protocol
1430 When used on the wire protocol, an additional line indicating protocol
1424 success will be prepended to the stream. This function is not responsible
1431 success will be prepended to the stream. This function is not responsible
1425 for adding it.
1432 for adding it.
1426
1433
1427 This function will obtain a repository lock to ensure a consistent view of
1434 This function will obtain a repository lock to ensure a consistent view of
1428 the store is captured. It therefore may raise LockError.
1435 the store is captured. It therefore may raise LockError.
1429 """
1436 """
1430 entries = []
1437 entries = []
1431 total_bytes = 0
1438 total_bytes = 0
1432 # Get consistent snapshot of repo, lock during scan.
1439 # Get consistent snapshot of repo, lock during scan.
1433 lock = repo.lock()
1440 lock = repo.lock()
1434 try:
1441 try:
1435 repo.ui.debug('scanning\n')
1442 repo.ui.debug('scanning\n')
1436 for name, ename, size in _walkstreamfiles(repo):
1443 for name, ename, size in _walkstreamfiles(repo):
1437 if size:
1444 if size:
1438 entries.append((name, size))
1445 entries.append((name, size))
1439 total_bytes += size
1446 total_bytes += size
1440 finally:
1447 finally:
1441 lock.release()
1448 lock.release()
1442
1449
1443 repo.ui.debug('%d files, %d bytes to transfer\n' %
1450 repo.ui.debug('%d files, %d bytes to transfer\n' %
1444 (len(entries), total_bytes))
1451 (len(entries), total_bytes))
1445 yield '%d %d\n' % (len(entries), total_bytes)
1452 yield '%d %d\n' % (len(entries), total_bytes)
1446
1453
1447 sopener = repo.svfs
1454 sopener = repo.svfs
1448 oldaudit = sopener.mustaudit
1455 oldaudit = sopener.mustaudit
1449 debugflag = repo.ui.debugflag
1456 debugflag = repo.ui.debugflag
1450 sopener.mustaudit = False
1457 sopener.mustaudit = False
1451
1458
1452 try:
1459 try:
1453 for name, size in entries:
1460 for name, size in entries:
1454 if debugflag:
1461 if debugflag:
1455 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
1462 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
1456 # partially encode name over the wire for backwards compat
1463 # partially encode name over the wire for backwards compat
1457 yield '%s\0%d\n' % (store.encodedir(name), size)
1464 yield '%s\0%d\n' % (store.encodedir(name), size)
1458 if size <= 65536:
1465 if size <= 65536:
1459 fp = sopener(name)
1466 fp = sopener(name)
1460 try:
1467 try:
1461 data = fp.read(size)
1468 data = fp.read(size)
1462 finally:
1469 finally:
1463 fp.close()
1470 fp.close()
1464 yield data
1471 yield data
1465 else:
1472 else:
1466 for chunk in util.filechunkiter(sopener(name), limit=size):
1473 for chunk in util.filechunkiter(sopener(name), limit=size):
1467 yield chunk
1474 yield chunk
1468 finally:
1475 finally:
1469 sopener.mustaudit = oldaudit
1476 sopener.mustaudit = oldaudit
1470
1477
1471 def consumestreamclone(repo, fp):
1478 def consumestreamclone(repo, fp):
1472 """Apply the contents from a streaming clone file.
1479 """Apply the contents from a streaming clone file.
1473
1480
1474 This takes the output from "streamout" and applies it to the specified
1481 This takes the output from "streamout" and applies it to the specified
1475 repository.
1482 repository.
1476
1483
1477 Like "streamout," the status line added by the wire protocol is not handled
1484 Like "streamout," the status line added by the wire protocol is not handled
1478 by this function.
1485 by this function.
1479 """
1486 """
1480 lock = repo.lock()
1487 lock = repo.lock()
1481 try:
1488 try:
1482 repo.ui.status(_('streaming all changes\n'))
1489 repo.ui.status(_('streaming all changes\n'))
1483 l = fp.readline()
1490 l = fp.readline()
1484 try:
1491 try:
1485 total_files, total_bytes = map(int, l.split(' ', 1))
1492 total_files, total_bytes = map(int, l.split(' ', 1))
1486 except (ValueError, TypeError):
1493 except (ValueError, TypeError):
1487 raise error.ResponseError(
1494 raise error.ResponseError(
1488 _('unexpected response from remote server:'), l)
1495 _('unexpected response from remote server:'), l)
1489 repo.ui.status(_('%d files to transfer, %s of data\n') %
1496 repo.ui.status(_('%d files to transfer, %s of data\n') %
1490 (total_files, util.bytecount(total_bytes)))
1497 (total_files, util.bytecount(total_bytes)))
1491 handled_bytes = 0
1498 handled_bytes = 0
1492 repo.ui.progress(_('clone'), 0, total=total_bytes)
1499 repo.ui.progress(_('clone'), 0, total=total_bytes)
1493 start = time.time()
1500 start = time.time()
1494
1501
1495 tr = repo.transaction(_('clone'))
1502 tr = repo.transaction(_('clone'))
1496 try:
1503 try:
1497 for i in xrange(total_files):
1504 for i in xrange(total_files):
1498 # XXX doesn't support '\n' or '\r' in filenames
1505 # XXX doesn't support '\n' or '\r' in filenames
1499 l = fp.readline()
1506 l = fp.readline()
1500 try:
1507 try:
1501 name, size = l.split('\0', 1)
1508 name, size = l.split('\0', 1)
1502 size = int(size)
1509 size = int(size)
1503 except (ValueError, TypeError):
1510 except (ValueError, TypeError):
1504 raise error.ResponseError(
1511 raise error.ResponseError(
1505 _('unexpected response from remote server:'), l)
1512 _('unexpected response from remote server:'), l)
1506 if repo.ui.debugflag:
1513 if repo.ui.debugflag:
1507 repo.ui.debug('adding %s (%s)\n' %
1514 repo.ui.debug('adding %s (%s)\n' %
1508 (name, util.bytecount(size)))
1515 (name, util.bytecount(size)))
1509 # for backwards compat, name was partially encoded
1516 # for backwards compat, name was partially encoded
1510 ofp = repo.svfs(store.decodedir(name), 'w')
1517 ofp = repo.svfs(store.decodedir(name), 'w')
1511 for chunk in util.filechunkiter(fp, limit=size):
1518 for chunk in util.filechunkiter(fp, limit=size):
1512 handled_bytes += len(chunk)
1519 handled_bytes += len(chunk)
1513 repo.ui.progress(_('clone'), handled_bytes,
1520 repo.ui.progress(_('clone'), handled_bytes,
1514 total=total_bytes)
1521 total=total_bytes)
1515 ofp.write(chunk)
1522 ofp.write(chunk)
1516 ofp.close()
1523 ofp.close()
1517 tr.close()
1524 tr.close()
1518 finally:
1525 finally:
1519 tr.release()
1526 tr.release()
1520
1527
1521 # Writing straight to files circumvented the inmemory caches
1528 # Writing straight to files circumvented the inmemory caches
1522 repo.invalidate()
1529 repo.invalidate()
1523
1530
1524 elapsed = time.time() - start
1531 elapsed = time.time() - start
1525 if elapsed <= 0:
1532 if elapsed <= 0:
1526 elapsed = 0.001
1533 elapsed = 0.001
1527 repo.ui.progress(_('clone'), None)
1534 repo.ui.progress(_('clone'), None)
1528 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1535 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1529 (util.bytecount(total_bytes), elapsed,
1536 (util.bytecount(total_bytes), elapsed,
1530 util.bytecount(total_bytes / elapsed)))
1537 util.bytecount(total_bytes / elapsed)))
1531 finally:
1538 finally:
1532 lock.release()
1539 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now