##// END OF EJS Templates
bundle2: pull bookmark the old way if no bundle2 listkeys support (issue4701)...
Pierre-Yves David -
r25479:f00a63a4 default
parent child Browse files
Show More
@@ -1,1543 +1,1547 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import time
8 import time
9 from i18n import _
9 from i18n import _
10 from node import hex, nullid
10 from node import hex, nullid
11 import errno, urllib
11 import errno, urllib
12 import util, scmutil, changegroup, base85, error, store
12 import util, scmutil, changegroup, base85, error, store
13 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
14 import lock as lockmod
14 import lock as lockmod
15 import tags
15 import tags
16
16
17 def readbundle(ui, fh, fname, vfs=None):
17 def readbundle(ui, fh, fname, vfs=None):
18 header = changegroup.readexactly(fh, 4)
18 header = changegroup.readexactly(fh, 4)
19
19
20 alg = None
20 alg = None
21 if not fname:
21 if not fname:
22 fname = "stream"
22 fname = "stream"
23 if not header.startswith('HG') and header.startswith('\0'):
23 if not header.startswith('HG') and header.startswith('\0'):
24 fh = changegroup.headerlessfixup(fh, header)
24 fh = changegroup.headerlessfixup(fh, header)
25 header = "HG10"
25 header = "HG10"
26 alg = 'UN'
26 alg = 'UN'
27 elif vfs:
27 elif vfs:
28 fname = vfs.join(fname)
28 fname = vfs.join(fname)
29
29
30 magic, version = header[0:2], header[2:4]
30 magic, version = header[0:2], header[2:4]
31
31
32 if magic != 'HG':
32 if magic != 'HG':
33 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
33 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
34 if version == '10':
34 if version == '10':
35 if alg is None:
35 if alg is None:
36 alg = changegroup.readexactly(fh, 2)
36 alg = changegroup.readexactly(fh, 2)
37 return changegroup.cg1unpacker(fh, alg)
37 return changegroup.cg1unpacker(fh, alg)
38 elif version.startswith('2'):
38 elif version.startswith('2'):
39 return bundle2.getunbundler(ui, fh, header=magic + version)
39 return bundle2.getunbundler(ui, fh, header=magic + version)
40 else:
40 else:
41 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
41 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
42
42
43 def buildobsmarkerspart(bundler, markers):
43 def buildobsmarkerspart(bundler, markers):
44 """add an obsmarker part to the bundler with <markers>
44 """add an obsmarker part to the bundler with <markers>
45
45
46 No part is created if markers is empty.
46 No part is created if markers is empty.
47 Raises ValueError if the bundler doesn't support any known obsmarker format.
47 Raises ValueError if the bundler doesn't support any known obsmarker format.
48 """
48 """
49 if markers:
49 if markers:
50 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
50 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
51 version = obsolete.commonversion(remoteversions)
51 version = obsolete.commonversion(remoteversions)
52 if version is None:
52 if version is None:
53 raise ValueError('bundler do not support common obsmarker format')
53 raise ValueError('bundler do not support common obsmarker format')
54 stream = obsolete.encodemarkers(markers, True, version=version)
54 stream = obsolete.encodemarkers(markers, True, version=version)
55 return bundler.newpart('obsmarkers', data=stream)
55 return bundler.newpart('obsmarkers', data=stream)
56 return None
56 return None
57
57
58 def _canusebundle2(op):
58 def _canusebundle2(op):
59 """return true if a pull/push can use bundle2
59 """return true if a pull/push can use bundle2
60
60
61 Feel free to nuke this function when we drop the experimental option"""
61 Feel free to nuke this function when we drop the experimental option"""
62 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
62 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
63 and op.remote.capable('bundle2'))
63 and op.remote.capable('bundle2'))
64
64
65
65
66 class pushoperation(object):
66 class pushoperation(object):
67 """A object that represent a single push operation
67 """A object that represent a single push operation
68
68
69 It purpose is to carry push related state and very common operation.
69 It purpose is to carry push related state and very common operation.
70
70
71 A new should be created at the beginning of each push and discarded
71 A new should be created at the beginning of each push and discarded
72 afterward.
72 afterward.
73 """
73 """
74
74
75 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
75 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
76 bookmarks=()):
76 bookmarks=()):
77 # repo we push from
77 # repo we push from
78 self.repo = repo
78 self.repo = repo
79 self.ui = repo.ui
79 self.ui = repo.ui
80 # repo we push to
80 # repo we push to
81 self.remote = remote
81 self.remote = remote
82 # force option provided
82 # force option provided
83 self.force = force
83 self.force = force
84 # revs to be pushed (None is "all")
84 # revs to be pushed (None is "all")
85 self.revs = revs
85 self.revs = revs
86 # bookmark explicitly pushed
86 # bookmark explicitly pushed
87 self.bookmarks = bookmarks
87 self.bookmarks = bookmarks
88 # allow push of new branch
88 # allow push of new branch
89 self.newbranch = newbranch
89 self.newbranch = newbranch
90 # did a local lock get acquired?
90 # did a local lock get acquired?
91 self.locallocked = None
91 self.locallocked = None
92 # step already performed
92 # step already performed
93 # (used to check what steps have been already performed through bundle2)
93 # (used to check what steps have been already performed through bundle2)
94 self.stepsdone = set()
94 self.stepsdone = set()
95 # Integer version of the changegroup push result
95 # Integer version of the changegroup push result
96 # - None means nothing to push
96 # - None means nothing to push
97 # - 0 means HTTP error
97 # - 0 means HTTP error
98 # - 1 means we pushed and remote head count is unchanged *or*
98 # - 1 means we pushed and remote head count is unchanged *or*
99 # we have outgoing changesets but refused to push
99 # we have outgoing changesets but refused to push
100 # - other values as described by addchangegroup()
100 # - other values as described by addchangegroup()
101 self.cgresult = None
101 self.cgresult = None
102 # Boolean value for the bookmark push
102 # Boolean value for the bookmark push
103 self.bkresult = None
103 self.bkresult = None
104 # discover.outgoing object (contains common and outgoing data)
104 # discover.outgoing object (contains common and outgoing data)
105 self.outgoing = None
105 self.outgoing = None
106 # all remote heads before the push
106 # all remote heads before the push
107 self.remoteheads = None
107 self.remoteheads = None
108 # testable as a boolean indicating if any nodes are missing locally.
108 # testable as a boolean indicating if any nodes are missing locally.
109 self.incoming = None
109 self.incoming = None
110 # phases changes that must be pushed along side the changesets
110 # phases changes that must be pushed along side the changesets
111 self.outdatedphases = None
111 self.outdatedphases = None
112 # phases changes that must be pushed if changeset push fails
112 # phases changes that must be pushed if changeset push fails
113 self.fallbackoutdatedphases = None
113 self.fallbackoutdatedphases = None
114 # outgoing obsmarkers
114 # outgoing obsmarkers
115 self.outobsmarkers = set()
115 self.outobsmarkers = set()
116 # outgoing bookmarks
116 # outgoing bookmarks
117 self.outbookmarks = []
117 self.outbookmarks = []
118 # transaction manager
118 # transaction manager
119 self.trmanager = None
119 self.trmanager = None
120
120
121 @util.propertycache
121 @util.propertycache
122 def futureheads(self):
122 def futureheads(self):
123 """future remote heads if the changeset push succeeds"""
123 """future remote heads if the changeset push succeeds"""
124 return self.outgoing.missingheads
124 return self.outgoing.missingheads
125
125
126 @util.propertycache
126 @util.propertycache
127 def fallbackheads(self):
127 def fallbackheads(self):
128 """future remote heads if the changeset push fails"""
128 """future remote heads if the changeset push fails"""
129 if self.revs is None:
129 if self.revs is None:
130 # not target to push, all common are relevant
130 # not target to push, all common are relevant
131 return self.outgoing.commonheads
131 return self.outgoing.commonheads
132 unfi = self.repo.unfiltered()
132 unfi = self.repo.unfiltered()
133 # I want cheads = heads(::missingheads and ::commonheads)
133 # I want cheads = heads(::missingheads and ::commonheads)
134 # (missingheads is revs with secret changeset filtered out)
134 # (missingheads is revs with secret changeset filtered out)
135 #
135 #
136 # This can be expressed as:
136 # This can be expressed as:
137 # cheads = ( (missingheads and ::commonheads)
137 # cheads = ( (missingheads and ::commonheads)
138 # + (commonheads and ::missingheads))"
138 # + (commonheads and ::missingheads))"
139 # )
139 # )
140 #
140 #
141 # while trying to push we already computed the following:
141 # while trying to push we already computed the following:
142 # common = (::commonheads)
142 # common = (::commonheads)
143 # missing = ((commonheads::missingheads) - commonheads)
143 # missing = ((commonheads::missingheads) - commonheads)
144 #
144 #
145 # We can pick:
145 # We can pick:
146 # * missingheads part of common (::commonheads)
146 # * missingheads part of common (::commonheads)
147 common = set(self.outgoing.common)
147 common = set(self.outgoing.common)
148 nm = self.repo.changelog.nodemap
148 nm = self.repo.changelog.nodemap
149 cheads = [node for node in self.revs if nm[node] in common]
149 cheads = [node for node in self.revs if nm[node] in common]
150 # and
150 # and
151 # * commonheads parents on missing
151 # * commonheads parents on missing
152 revset = unfi.set('%ln and parents(roots(%ln))',
152 revset = unfi.set('%ln and parents(roots(%ln))',
153 self.outgoing.commonheads,
153 self.outgoing.commonheads,
154 self.outgoing.missing)
154 self.outgoing.missing)
155 cheads.extend(c.node() for c in revset)
155 cheads.extend(c.node() for c in revset)
156 return cheads
156 return cheads
157
157
158 @property
158 @property
159 def commonheads(self):
159 def commonheads(self):
160 """set of all common heads after changeset bundle push"""
160 """set of all common heads after changeset bundle push"""
161 if self.cgresult:
161 if self.cgresult:
162 return self.futureheads
162 return self.futureheads
163 else:
163 else:
164 return self.fallbackheads
164 return self.fallbackheads
165
165
166 # mapping of message used when pushing bookmark
166 # mapping of message used when pushing bookmark
167 bookmsgmap = {'update': (_("updating bookmark %s\n"),
167 bookmsgmap = {'update': (_("updating bookmark %s\n"),
168 _('updating bookmark %s failed!\n')),
168 _('updating bookmark %s failed!\n')),
169 'export': (_("exporting bookmark %s\n"),
169 'export': (_("exporting bookmark %s\n"),
170 _('exporting bookmark %s failed!\n')),
170 _('exporting bookmark %s failed!\n')),
171 'delete': (_("deleting remote bookmark %s\n"),
171 'delete': (_("deleting remote bookmark %s\n"),
172 _('deleting remote bookmark %s failed!\n')),
172 _('deleting remote bookmark %s failed!\n')),
173 }
173 }
174
174
175
175
176 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
176 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
177 '''Push outgoing changesets (limited by revs) from a local
177 '''Push outgoing changesets (limited by revs) from a local
178 repository to remote. Return an integer:
178 repository to remote. Return an integer:
179 - None means nothing to push
179 - None means nothing to push
180 - 0 means HTTP error
180 - 0 means HTTP error
181 - 1 means we pushed and remote head count is unchanged *or*
181 - 1 means we pushed and remote head count is unchanged *or*
182 we have outgoing changesets but refused to push
182 we have outgoing changesets but refused to push
183 - other values as described by addchangegroup()
183 - other values as described by addchangegroup()
184 '''
184 '''
185 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
185 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
186 if pushop.remote.local():
186 if pushop.remote.local():
187 missing = (set(pushop.repo.requirements)
187 missing = (set(pushop.repo.requirements)
188 - pushop.remote.local().supported)
188 - pushop.remote.local().supported)
189 if missing:
189 if missing:
190 msg = _("required features are not"
190 msg = _("required features are not"
191 " supported in the destination:"
191 " supported in the destination:"
192 " %s") % (', '.join(sorted(missing)))
192 " %s") % (', '.join(sorted(missing)))
193 raise util.Abort(msg)
193 raise util.Abort(msg)
194
194
195 # there are two ways to push to remote repo:
195 # there are two ways to push to remote repo:
196 #
196 #
197 # addchangegroup assumes local user can lock remote
197 # addchangegroup assumes local user can lock remote
198 # repo (local filesystem, old ssh servers).
198 # repo (local filesystem, old ssh servers).
199 #
199 #
200 # unbundle assumes local user cannot lock remote repo (new ssh
200 # unbundle assumes local user cannot lock remote repo (new ssh
201 # servers, http servers).
201 # servers, http servers).
202
202
203 if not pushop.remote.canpush():
203 if not pushop.remote.canpush():
204 raise util.Abort(_("destination does not support push"))
204 raise util.Abort(_("destination does not support push"))
205 # get local lock as we might write phase data
205 # get local lock as we might write phase data
206 localwlock = locallock = None
206 localwlock = locallock = None
207 try:
207 try:
208 # bundle2 push may receive a reply bundle touching bookmarks or other
208 # bundle2 push may receive a reply bundle touching bookmarks or other
209 # things requiring the wlock. Take it now to ensure proper ordering.
209 # things requiring the wlock. Take it now to ensure proper ordering.
210 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
210 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
211 if _canusebundle2(pushop) and maypushback:
211 if _canusebundle2(pushop) and maypushback:
212 localwlock = pushop.repo.wlock()
212 localwlock = pushop.repo.wlock()
213 locallock = pushop.repo.lock()
213 locallock = pushop.repo.lock()
214 pushop.locallocked = True
214 pushop.locallocked = True
215 except IOError, err:
215 except IOError, err:
216 pushop.locallocked = False
216 pushop.locallocked = False
217 if err.errno != errno.EACCES:
217 if err.errno != errno.EACCES:
218 raise
218 raise
219 # source repo cannot be locked.
219 # source repo cannot be locked.
220 # We do not abort the push, but just disable the local phase
220 # We do not abort the push, but just disable the local phase
221 # synchronisation.
221 # synchronisation.
222 msg = 'cannot lock source repository: %s\n' % err
222 msg = 'cannot lock source repository: %s\n' % err
223 pushop.ui.debug(msg)
223 pushop.ui.debug(msg)
224 try:
224 try:
225 if pushop.locallocked:
225 if pushop.locallocked:
226 pushop.trmanager = transactionmanager(repo,
226 pushop.trmanager = transactionmanager(repo,
227 'push-response',
227 'push-response',
228 pushop.remote.url())
228 pushop.remote.url())
229 pushop.repo.checkpush(pushop)
229 pushop.repo.checkpush(pushop)
230 lock = None
230 lock = None
231 unbundle = pushop.remote.capable('unbundle')
231 unbundle = pushop.remote.capable('unbundle')
232 if not unbundle:
232 if not unbundle:
233 lock = pushop.remote.lock()
233 lock = pushop.remote.lock()
234 try:
234 try:
235 _pushdiscovery(pushop)
235 _pushdiscovery(pushop)
236 if _canusebundle2(pushop):
236 if _canusebundle2(pushop):
237 _pushbundle2(pushop)
237 _pushbundle2(pushop)
238 _pushchangeset(pushop)
238 _pushchangeset(pushop)
239 _pushsyncphase(pushop)
239 _pushsyncphase(pushop)
240 _pushobsolete(pushop)
240 _pushobsolete(pushop)
241 _pushbookmark(pushop)
241 _pushbookmark(pushop)
242 finally:
242 finally:
243 if lock is not None:
243 if lock is not None:
244 lock.release()
244 lock.release()
245 if pushop.trmanager:
245 if pushop.trmanager:
246 pushop.trmanager.close()
246 pushop.trmanager.close()
247 finally:
247 finally:
248 if pushop.trmanager:
248 if pushop.trmanager:
249 pushop.trmanager.release()
249 pushop.trmanager.release()
250 if locallock is not None:
250 if locallock is not None:
251 locallock.release()
251 locallock.release()
252 if localwlock is not None:
252 if localwlock is not None:
253 localwlock.release()
253 localwlock.release()
254
254
255 return pushop
255 return pushop
256
256
257 # list of steps to perform discovery before push
257 # list of steps to perform discovery before push
258 pushdiscoveryorder = []
258 pushdiscoveryorder = []
259
259
260 # Mapping between step name and function
260 # Mapping between step name and function
261 #
261 #
262 # This exists to help extensions wrap steps if necessary
262 # This exists to help extensions wrap steps if necessary
263 pushdiscoverymapping = {}
263 pushdiscoverymapping = {}
264
264
265 def pushdiscovery(stepname):
265 def pushdiscovery(stepname):
266 """decorator for function performing discovery before push
266 """decorator for function performing discovery before push
267
267
268 The function is added to the step -> function mapping and appended to the
268 The function is added to the step -> function mapping and appended to the
269 list of steps. Beware that decorated function will be added in order (this
269 list of steps. Beware that decorated function will be added in order (this
270 may matter).
270 may matter).
271
271
272 You can only use this decorator for a new step, if you want to wrap a step
272 You can only use this decorator for a new step, if you want to wrap a step
273 from an extension, change the pushdiscovery dictionary directly."""
273 from an extension, change the pushdiscovery dictionary directly."""
274 def dec(func):
274 def dec(func):
275 assert stepname not in pushdiscoverymapping
275 assert stepname not in pushdiscoverymapping
276 pushdiscoverymapping[stepname] = func
276 pushdiscoverymapping[stepname] = func
277 pushdiscoveryorder.append(stepname)
277 pushdiscoveryorder.append(stepname)
278 return func
278 return func
279 return dec
279 return dec
280
280
281 def _pushdiscovery(pushop):
281 def _pushdiscovery(pushop):
282 """Run all discovery steps"""
282 """Run all discovery steps"""
283 for stepname in pushdiscoveryorder:
283 for stepname in pushdiscoveryorder:
284 step = pushdiscoverymapping[stepname]
284 step = pushdiscoverymapping[stepname]
285 step(pushop)
285 step(pushop)
286
286
287 @pushdiscovery('changeset')
287 @pushdiscovery('changeset')
288 def _pushdiscoverychangeset(pushop):
288 def _pushdiscoverychangeset(pushop):
289 """discover the changeset that need to be pushed"""
289 """discover the changeset that need to be pushed"""
290 fci = discovery.findcommonincoming
290 fci = discovery.findcommonincoming
291 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
291 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
292 common, inc, remoteheads = commoninc
292 common, inc, remoteheads = commoninc
293 fco = discovery.findcommonoutgoing
293 fco = discovery.findcommonoutgoing
294 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
294 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
295 commoninc=commoninc, force=pushop.force)
295 commoninc=commoninc, force=pushop.force)
296 pushop.outgoing = outgoing
296 pushop.outgoing = outgoing
297 pushop.remoteheads = remoteheads
297 pushop.remoteheads = remoteheads
298 pushop.incoming = inc
298 pushop.incoming = inc
299
299
300 @pushdiscovery('phase')
300 @pushdiscovery('phase')
301 def _pushdiscoveryphase(pushop):
301 def _pushdiscoveryphase(pushop):
302 """discover the phase that needs to be pushed
302 """discover the phase that needs to be pushed
303
303
304 (computed for both success and failure case for changesets push)"""
304 (computed for both success and failure case for changesets push)"""
305 outgoing = pushop.outgoing
305 outgoing = pushop.outgoing
306 unfi = pushop.repo.unfiltered()
306 unfi = pushop.repo.unfiltered()
307 remotephases = pushop.remote.listkeys('phases')
307 remotephases = pushop.remote.listkeys('phases')
308 publishing = remotephases.get('publishing', False)
308 publishing = remotephases.get('publishing', False)
309 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
309 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
310 and remotephases # server supports phases
310 and remotephases # server supports phases
311 and not pushop.outgoing.missing # no changesets to be pushed
311 and not pushop.outgoing.missing # no changesets to be pushed
312 and publishing):
312 and publishing):
313 # When:
313 # When:
314 # - this is a subrepo push
314 # - this is a subrepo push
315 # - and remote support phase
315 # - and remote support phase
316 # - and no changeset are to be pushed
316 # - and no changeset are to be pushed
317 # - and remote is publishing
317 # - and remote is publishing
318 # We may be in issue 3871 case!
318 # We may be in issue 3871 case!
319 # We drop the possible phase synchronisation done by
319 # We drop the possible phase synchronisation done by
320 # courtesy to publish changesets possibly locally draft
320 # courtesy to publish changesets possibly locally draft
321 # on the remote.
321 # on the remote.
322 remotephases = {'publishing': 'True'}
322 remotephases = {'publishing': 'True'}
323 ana = phases.analyzeremotephases(pushop.repo,
323 ana = phases.analyzeremotephases(pushop.repo,
324 pushop.fallbackheads,
324 pushop.fallbackheads,
325 remotephases)
325 remotephases)
326 pheads, droots = ana
326 pheads, droots = ana
327 extracond = ''
327 extracond = ''
328 if not publishing:
328 if not publishing:
329 extracond = ' and public()'
329 extracond = ' and public()'
330 revset = 'heads((%%ln::%%ln) %s)' % extracond
330 revset = 'heads((%%ln::%%ln) %s)' % extracond
331 # Get the list of all revs draft on remote by public here.
331 # Get the list of all revs draft on remote by public here.
332 # XXX Beware that revset break if droots is not strictly
332 # XXX Beware that revset break if droots is not strictly
333 # XXX root we may want to ensure it is but it is costly
333 # XXX root we may want to ensure it is but it is costly
334 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
334 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
335 if not outgoing.missing:
335 if not outgoing.missing:
336 future = fallback
336 future = fallback
337 else:
337 else:
338 # adds changeset we are going to push as draft
338 # adds changeset we are going to push as draft
339 #
339 #
340 # should not be necessary for publishing server, but because of an
340 # should not be necessary for publishing server, but because of an
341 # issue fixed in xxxxx we have to do it anyway.
341 # issue fixed in xxxxx we have to do it anyway.
342 fdroots = list(unfi.set('roots(%ln + %ln::)',
342 fdroots = list(unfi.set('roots(%ln + %ln::)',
343 outgoing.missing, droots))
343 outgoing.missing, droots))
344 fdroots = [f.node() for f in fdroots]
344 fdroots = [f.node() for f in fdroots]
345 future = list(unfi.set(revset, fdroots, pushop.futureheads))
345 future = list(unfi.set(revset, fdroots, pushop.futureheads))
346 pushop.outdatedphases = future
346 pushop.outdatedphases = future
347 pushop.fallbackoutdatedphases = fallback
347 pushop.fallbackoutdatedphases = fallback
348
348
349 @pushdiscovery('obsmarker')
349 @pushdiscovery('obsmarker')
350 def _pushdiscoveryobsmarkers(pushop):
350 def _pushdiscoveryobsmarkers(pushop):
351 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
351 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
352 and pushop.repo.obsstore
352 and pushop.repo.obsstore
353 and 'obsolete' in pushop.remote.listkeys('namespaces')):
353 and 'obsolete' in pushop.remote.listkeys('namespaces')):
354 repo = pushop.repo
354 repo = pushop.repo
355 # very naive computation, that can be quite expensive on big repo.
355 # very naive computation, that can be quite expensive on big repo.
356 # However: evolution is currently slow on them anyway.
356 # However: evolution is currently slow on them anyway.
357 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
357 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
358 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
358 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
359
359
360 @pushdiscovery('bookmarks')
360 @pushdiscovery('bookmarks')
361 def _pushdiscoverybookmarks(pushop):
361 def _pushdiscoverybookmarks(pushop):
362 ui = pushop.ui
362 ui = pushop.ui
363 repo = pushop.repo.unfiltered()
363 repo = pushop.repo.unfiltered()
364 remote = pushop.remote
364 remote = pushop.remote
365 ui.debug("checking for updated bookmarks\n")
365 ui.debug("checking for updated bookmarks\n")
366 ancestors = ()
366 ancestors = ()
367 if pushop.revs:
367 if pushop.revs:
368 revnums = map(repo.changelog.rev, pushop.revs)
368 revnums = map(repo.changelog.rev, pushop.revs)
369 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
369 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
370 remotebookmark = remote.listkeys('bookmarks')
370 remotebookmark = remote.listkeys('bookmarks')
371
371
372 explicit = set(pushop.bookmarks)
372 explicit = set(pushop.bookmarks)
373
373
374 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
374 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
375 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
375 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
376 for b, scid, dcid in advsrc:
376 for b, scid, dcid in advsrc:
377 if b in explicit:
377 if b in explicit:
378 explicit.remove(b)
378 explicit.remove(b)
379 if not ancestors or repo[scid].rev() in ancestors:
379 if not ancestors or repo[scid].rev() in ancestors:
380 pushop.outbookmarks.append((b, dcid, scid))
380 pushop.outbookmarks.append((b, dcid, scid))
381 # search added bookmark
381 # search added bookmark
382 for b, scid, dcid in addsrc:
382 for b, scid, dcid in addsrc:
383 if b in explicit:
383 if b in explicit:
384 explicit.remove(b)
384 explicit.remove(b)
385 pushop.outbookmarks.append((b, '', scid))
385 pushop.outbookmarks.append((b, '', scid))
386 # search for overwritten bookmark
386 # search for overwritten bookmark
387 for b, scid, dcid in advdst + diverge + differ:
387 for b, scid, dcid in advdst + diverge + differ:
388 if b in explicit:
388 if b in explicit:
389 explicit.remove(b)
389 explicit.remove(b)
390 pushop.outbookmarks.append((b, dcid, scid))
390 pushop.outbookmarks.append((b, dcid, scid))
391 # search for bookmark to delete
391 # search for bookmark to delete
392 for b, scid, dcid in adddst:
392 for b, scid, dcid in adddst:
393 if b in explicit:
393 if b in explicit:
394 explicit.remove(b)
394 explicit.remove(b)
395 # treat as "deleted locally"
395 # treat as "deleted locally"
396 pushop.outbookmarks.append((b, dcid, ''))
396 pushop.outbookmarks.append((b, dcid, ''))
397 # identical bookmarks shouldn't get reported
397 # identical bookmarks shouldn't get reported
398 for b, scid, dcid in same:
398 for b, scid, dcid in same:
399 if b in explicit:
399 if b in explicit:
400 explicit.remove(b)
400 explicit.remove(b)
401
401
402 if explicit:
402 if explicit:
403 explicit = sorted(explicit)
403 explicit = sorted(explicit)
404 # we should probably list all of them
404 # we should probably list all of them
405 ui.warn(_('bookmark %s does not exist on the local '
405 ui.warn(_('bookmark %s does not exist on the local '
406 'or remote repository!\n') % explicit[0])
406 'or remote repository!\n') % explicit[0])
407 pushop.bkresult = 2
407 pushop.bkresult = 2
408
408
409 pushop.outbookmarks.sort()
409 pushop.outbookmarks.sort()
410
410
411 def _pushcheckoutgoing(pushop):
411 def _pushcheckoutgoing(pushop):
412 outgoing = pushop.outgoing
412 outgoing = pushop.outgoing
413 unfi = pushop.repo.unfiltered()
413 unfi = pushop.repo.unfiltered()
414 if not outgoing.missing:
414 if not outgoing.missing:
415 # nothing to push
415 # nothing to push
416 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
416 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
417 return False
417 return False
418 # something to push
418 # something to push
419 if not pushop.force:
419 if not pushop.force:
420 # if repo.obsstore == False --> no obsolete
420 # if repo.obsstore == False --> no obsolete
421 # then, save the iteration
421 # then, save the iteration
422 if unfi.obsstore:
422 if unfi.obsstore:
423 # this message are here for 80 char limit reason
423 # this message are here for 80 char limit reason
424 mso = _("push includes obsolete changeset: %s!")
424 mso = _("push includes obsolete changeset: %s!")
425 mst = {"unstable": _("push includes unstable changeset: %s!"),
425 mst = {"unstable": _("push includes unstable changeset: %s!"),
426 "bumped": _("push includes bumped changeset: %s!"),
426 "bumped": _("push includes bumped changeset: %s!"),
427 "divergent": _("push includes divergent changeset: %s!")}
427 "divergent": _("push includes divergent changeset: %s!")}
428 # If we are to push if there is at least one
428 # If we are to push if there is at least one
429 # obsolete or unstable changeset in missing, at
429 # obsolete or unstable changeset in missing, at
430 # least one of the missinghead will be obsolete or
430 # least one of the missinghead will be obsolete or
431 # unstable. So checking heads only is ok
431 # unstable. So checking heads only is ok
432 for node in outgoing.missingheads:
432 for node in outgoing.missingheads:
433 ctx = unfi[node]
433 ctx = unfi[node]
434 if ctx.obsolete():
434 if ctx.obsolete():
435 raise util.Abort(mso % ctx)
435 raise util.Abort(mso % ctx)
436 elif ctx.troubled():
436 elif ctx.troubled():
437 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
437 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
438 newbm = pushop.ui.configlist('bookmarks', 'pushing')
438 newbm = pushop.ui.configlist('bookmarks', 'pushing')
439 discovery.checkheads(unfi, pushop.remote, outgoing,
439 discovery.checkheads(unfi, pushop.remote, outgoing,
440 pushop.remoteheads,
440 pushop.remoteheads,
441 pushop.newbranch,
441 pushop.newbranch,
442 bool(pushop.incoming),
442 bool(pushop.incoming),
443 newbm)
443 newbm)
444 return True
444 return True
445
445
446 # List of names of steps to perform for an outgoing bundle2, order matters.
446 # List of names of steps to perform for an outgoing bundle2, order matters.
447 b2partsgenorder = []
447 b2partsgenorder = []
448
448
449 # Mapping between step name and function
449 # Mapping between step name and function
450 #
450 #
451 # This exists to help extensions wrap steps if necessary
451 # This exists to help extensions wrap steps if necessary
452 b2partsgenmapping = {}
452 b2partsgenmapping = {}
453
453
454 def b2partsgenerator(stepname, idx=None):
454 def b2partsgenerator(stepname, idx=None):
455 """decorator for function generating bundle2 part
455 """decorator for function generating bundle2 part
456
456
457 The function is added to the step -> function mapping and appended to the
457 The function is added to the step -> function mapping and appended to the
458 list of steps. Beware that decorated functions will be added in order
458 list of steps. Beware that decorated functions will be added in order
459 (this may matter).
459 (this may matter).
460
460
461 You can only use this decorator for new steps, if you want to wrap a step
461 You can only use this decorator for new steps, if you want to wrap a step
462 from an extension, attack the b2partsgenmapping dictionary directly."""
462 from an extension, attack the b2partsgenmapping dictionary directly."""
463 def dec(func):
463 def dec(func):
464 assert stepname not in b2partsgenmapping
464 assert stepname not in b2partsgenmapping
465 b2partsgenmapping[stepname] = func
465 b2partsgenmapping[stepname] = func
466 if idx is None:
466 if idx is None:
467 b2partsgenorder.append(stepname)
467 b2partsgenorder.append(stepname)
468 else:
468 else:
469 b2partsgenorder.insert(idx, stepname)
469 b2partsgenorder.insert(idx, stepname)
470 return func
470 return func
471 return dec
471 return dec
472
472
473 @b2partsgenerator('changeset')
473 @b2partsgenerator('changeset')
474 def _pushb2ctx(pushop, bundler):
474 def _pushb2ctx(pushop, bundler):
475 """handle changegroup push through bundle2
475 """handle changegroup push through bundle2
476
476
477 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
477 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
478 """
478 """
479 if 'changesets' in pushop.stepsdone:
479 if 'changesets' in pushop.stepsdone:
480 return
480 return
481 pushop.stepsdone.add('changesets')
481 pushop.stepsdone.add('changesets')
482 # Send known heads to the server for race detection.
482 # Send known heads to the server for race detection.
483 if not _pushcheckoutgoing(pushop):
483 if not _pushcheckoutgoing(pushop):
484 return
484 return
485 pushop.repo.prepushoutgoinghooks(pushop.repo,
485 pushop.repo.prepushoutgoinghooks(pushop.repo,
486 pushop.remote,
486 pushop.remote,
487 pushop.outgoing)
487 pushop.outgoing)
488 if not pushop.force:
488 if not pushop.force:
489 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
489 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
490 b2caps = bundle2.bundle2caps(pushop.remote)
490 b2caps = bundle2.bundle2caps(pushop.remote)
491 version = None
491 version = None
492 cgversions = b2caps.get('changegroup')
492 cgversions = b2caps.get('changegroup')
493 if not cgversions: # 3.1 and 3.2 ship with an empty value
493 if not cgversions: # 3.1 and 3.2 ship with an empty value
494 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
494 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
495 pushop.outgoing)
495 pushop.outgoing)
496 else:
496 else:
497 cgversions = [v for v in cgversions if v in changegroup.packermap]
497 cgversions = [v for v in cgversions if v in changegroup.packermap]
498 if not cgversions:
498 if not cgversions:
499 raise ValueError(_('no common changegroup version'))
499 raise ValueError(_('no common changegroup version'))
500 version = max(cgversions)
500 version = max(cgversions)
501 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
501 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
502 pushop.outgoing,
502 pushop.outgoing,
503 version=version)
503 version=version)
504 cgpart = bundler.newpart('changegroup', data=cg)
504 cgpart = bundler.newpart('changegroup', data=cg)
505 if version is not None:
505 if version is not None:
506 cgpart.addparam('version', version)
506 cgpart.addparam('version', version)
507 def handlereply(op):
507 def handlereply(op):
508 """extract addchangegroup returns from server reply"""
508 """extract addchangegroup returns from server reply"""
509 cgreplies = op.records.getreplies(cgpart.id)
509 cgreplies = op.records.getreplies(cgpart.id)
510 assert len(cgreplies['changegroup']) == 1
510 assert len(cgreplies['changegroup']) == 1
511 pushop.cgresult = cgreplies['changegroup'][0]['return']
511 pushop.cgresult = cgreplies['changegroup'][0]['return']
512 return handlereply
512 return handlereply
513
513
514 @b2partsgenerator('phase')
514 @b2partsgenerator('phase')
515 def _pushb2phases(pushop, bundler):
515 def _pushb2phases(pushop, bundler):
516 """handle phase push through bundle2"""
516 """handle phase push through bundle2"""
517 if 'phases' in pushop.stepsdone:
517 if 'phases' in pushop.stepsdone:
518 return
518 return
519 b2caps = bundle2.bundle2caps(pushop.remote)
519 b2caps = bundle2.bundle2caps(pushop.remote)
520 if not 'pushkey' in b2caps:
520 if not 'pushkey' in b2caps:
521 return
521 return
522 pushop.stepsdone.add('phases')
522 pushop.stepsdone.add('phases')
523 part2node = []
523 part2node = []
524 enc = pushkey.encode
524 enc = pushkey.encode
525 for newremotehead in pushop.outdatedphases:
525 for newremotehead in pushop.outdatedphases:
526 part = bundler.newpart('pushkey')
526 part = bundler.newpart('pushkey')
527 part.addparam('namespace', enc('phases'))
527 part.addparam('namespace', enc('phases'))
528 part.addparam('key', enc(newremotehead.hex()))
528 part.addparam('key', enc(newremotehead.hex()))
529 part.addparam('old', enc(str(phases.draft)))
529 part.addparam('old', enc(str(phases.draft)))
530 part.addparam('new', enc(str(phases.public)))
530 part.addparam('new', enc(str(phases.public)))
531 part2node.append((part.id, newremotehead))
531 part2node.append((part.id, newremotehead))
532 def handlereply(op):
532 def handlereply(op):
533 for partid, node in part2node:
533 for partid, node in part2node:
534 partrep = op.records.getreplies(partid)
534 partrep = op.records.getreplies(partid)
535 results = partrep['pushkey']
535 results = partrep['pushkey']
536 assert len(results) <= 1
536 assert len(results) <= 1
537 msg = None
537 msg = None
538 if not results:
538 if not results:
539 msg = _('server ignored update of %s to public!\n') % node
539 msg = _('server ignored update of %s to public!\n') % node
540 elif not int(results[0]['return']):
540 elif not int(results[0]['return']):
541 msg = _('updating %s to public failed!\n') % node
541 msg = _('updating %s to public failed!\n') % node
542 if msg is not None:
542 if msg is not None:
543 pushop.ui.warn(msg)
543 pushop.ui.warn(msg)
544 return handlereply
544 return handlereply
545
545
546 @b2partsgenerator('obsmarkers')
546 @b2partsgenerator('obsmarkers')
547 def _pushb2obsmarkers(pushop, bundler):
547 def _pushb2obsmarkers(pushop, bundler):
548 if 'obsmarkers' in pushop.stepsdone:
548 if 'obsmarkers' in pushop.stepsdone:
549 return
549 return
550 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
550 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
551 if obsolete.commonversion(remoteversions) is None:
551 if obsolete.commonversion(remoteversions) is None:
552 return
552 return
553 pushop.stepsdone.add('obsmarkers')
553 pushop.stepsdone.add('obsmarkers')
554 if pushop.outobsmarkers:
554 if pushop.outobsmarkers:
555 markers = sorted(pushop.outobsmarkers)
555 markers = sorted(pushop.outobsmarkers)
556 buildobsmarkerspart(bundler, markers)
556 buildobsmarkerspart(bundler, markers)
557
557
558 @b2partsgenerator('bookmarks')
558 @b2partsgenerator('bookmarks')
559 def _pushb2bookmarks(pushop, bundler):
559 def _pushb2bookmarks(pushop, bundler):
560 """handle phase push through bundle2"""
560 """handle phase push through bundle2"""
561 if 'bookmarks' in pushop.stepsdone:
561 if 'bookmarks' in pushop.stepsdone:
562 return
562 return
563 b2caps = bundle2.bundle2caps(pushop.remote)
563 b2caps = bundle2.bundle2caps(pushop.remote)
564 if 'pushkey' not in b2caps:
564 if 'pushkey' not in b2caps:
565 return
565 return
566 pushop.stepsdone.add('bookmarks')
566 pushop.stepsdone.add('bookmarks')
567 part2book = []
567 part2book = []
568 enc = pushkey.encode
568 enc = pushkey.encode
569 for book, old, new in pushop.outbookmarks:
569 for book, old, new in pushop.outbookmarks:
570 part = bundler.newpart('pushkey')
570 part = bundler.newpart('pushkey')
571 part.addparam('namespace', enc('bookmarks'))
571 part.addparam('namespace', enc('bookmarks'))
572 part.addparam('key', enc(book))
572 part.addparam('key', enc(book))
573 part.addparam('old', enc(old))
573 part.addparam('old', enc(old))
574 part.addparam('new', enc(new))
574 part.addparam('new', enc(new))
575 action = 'update'
575 action = 'update'
576 if not old:
576 if not old:
577 action = 'export'
577 action = 'export'
578 elif not new:
578 elif not new:
579 action = 'delete'
579 action = 'delete'
580 part2book.append((part.id, book, action))
580 part2book.append((part.id, book, action))
581
581
582
582
583 def handlereply(op):
583 def handlereply(op):
584 ui = pushop.ui
584 ui = pushop.ui
585 for partid, book, action in part2book:
585 for partid, book, action in part2book:
586 partrep = op.records.getreplies(partid)
586 partrep = op.records.getreplies(partid)
587 results = partrep['pushkey']
587 results = partrep['pushkey']
588 assert len(results) <= 1
588 assert len(results) <= 1
589 if not results:
589 if not results:
590 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
590 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
591 else:
591 else:
592 ret = int(results[0]['return'])
592 ret = int(results[0]['return'])
593 if ret:
593 if ret:
594 ui.status(bookmsgmap[action][0] % book)
594 ui.status(bookmsgmap[action][0] % book)
595 else:
595 else:
596 ui.warn(bookmsgmap[action][1] % book)
596 ui.warn(bookmsgmap[action][1] % book)
597 if pushop.bkresult is not None:
597 if pushop.bkresult is not None:
598 pushop.bkresult = 1
598 pushop.bkresult = 1
599 return handlereply
599 return handlereply
600
600
601
601
602 def _pushbundle2(pushop):
602 def _pushbundle2(pushop):
603 """push data to the remote using bundle2
603 """push data to the remote using bundle2
604
604
605 The only currently supported type of data is changegroup but this will
605 The only currently supported type of data is changegroup but this will
606 evolve in the future."""
606 evolve in the future."""
607 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
607 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
608 pushback = (pushop.trmanager
608 pushback = (pushop.trmanager
609 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
609 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
610
610
611 # create reply capability
611 # create reply capability
612 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
612 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
613 allowpushback=pushback))
613 allowpushback=pushback))
614 bundler.newpart('replycaps', data=capsblob)
614 bundler.newpart('replycaps', data=capsblob)
615 replyhandlers = []
615 replyhandlers = []
616 for partgenname in b2partsgenorder:
616 for partgenname in b2partsgenorder:
617 partgen = b2partsgenmapping[partgenname]
617 partgen = b2partsgenmapping[partgenname]
618 ret = partgen(pushop, bundler)
618 ret = partgen(pushop, bundler)
619 if callable(ret):
619 if callable(ret):
620 replyhandlers.append(ret)
620 replyhandlers.append(ret)
621 # do not push if nothing to push
621 # do not push if nothing to push
622 if bundler.nbparts <= 1:
622 if bundler.nbparts <= 1:
623 return
623 return
624 stream = util.chunkbuffer(bundler.getchunks())
624 stream = util.chunkbuffer(bundler.getchunks())
625 try:
625 try:
626 reply = pushop.remote.unbundle(stream, ['force'], 'push')
626 reply = pushop.remote.unbundle(stream, ['force'], 'push')
627 except error.BundleValueError, exc:
627 except error.BundleValueError, exc:
628 raise util.Abort('missing support for %s' % exc)
628 raise util.Abort('missing support for %s' % exc)
629 try:
629 try:
630 trgetter = None
630 trgetter = None
631 if pushback:
631 if pushback:
632 trgetter = pushop.trmanager.transaction
632 trgetter = pushop.trmanager.transaction
633 op = bundle2.processbundle(pushop.repo, reply, trgetter)
633 op = bundle2.processbundle(pushop.repo, reply, trgetter)
634 except error.BundleValueError, exc:
634 except error.BundleValueError, exc:
635 raise util.Abort('missing support for %s' % exc)
635 raise util.Abort('missing support for %s' % exc)
636 for rephand in replyhandlers:
636 for rephand in replyhandlers:
637 rephand(op)
637 rephand(op)
638
638
639 def _pushchangeset(pushop):
639 def _pushchangeset(pushop):
640 """Make the actual push of changeset bundle to remote repo"""
640 """Make the actual push of changeset bundle to remote repo"""
641 if 'changesets' in pushop.stepsdone:
641 if 'changesets' in pushop.stepsdone:
642 return
642 return
643 pushop.stepsdone.add('changesets')
643 pushop.stepsdone.add('changesets')
644 if not _pushcheckoutgoing(pushop):
644 if not _pushcheckoutgoing(pushop):
645 return
645 return
646 pushop.repo.prepushoutgoinghooks(pushop.repo,
646 pushop.repo.prepushoutgoinghooks(pushop.repo,
647 pushop.remote,
647 pushop.remote,
648 pushop.outgoing)
648 pushop.outgoing)
649 outgoing = pushop.outgoing
649 outgoing = pushop.outgoing
650 unbundle = pushop.remote.capable('unbundle')
650 unbundle = pushop.remote.capable('unbundle')
651 # TODO: get bundlecaps from remote
651 # TODO: get bundlecaps from remote
652 bundlecaps = None
652 bundlecaps = None
653 # create a changegroup from local
653 # create a changegroup from local
654 if pushop.revs is None and not (outgoing.excluded
654 if pushop.revs is None and not (outgoing.excluded
655 or pushop.repo.changelog.filteredrevs):
655 or pushop.repo.changelog.filteredrevs):
656 # push everything,
656 # push everything,
657 # use the fast path, no race possible on push
657 # use the fast path, no race possible on push
658 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
658 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
659 cg = changegroup.getsubset(pushop.repo,
659 cg = changegroup.getsubset(pushop.repo,
660 outgoing,
660 outgoing,
661 bundler,
661 bundler,
662 'push',
662 'push',
663 fastpath=True)
663 fastpath=True)
664 else:
664 else:
665 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
665 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
666 bundlecaps)
666 bundlecaps)
667
667
668 # apply changegroup to remote
668 # apply changegroup to remote
669 if unbundle:
669 if unbundle:
670 # local repo finds heads on server, finds out what
670 # local repo finds heads on server, finds out what
671 # revs it must push. once revs transferred, if server
671 # revs it must push. once revs transferred, if server
672 # finds it has different heads (someone else won
672 # finds it has different heads (someone else won
673 # commit/push race), server aborts.
673 # commit/push race), server aborts.
674 if pushop.force:
674 if pushop.force:
675 remoteheads = ['force']
675 remoteheads = ['force']
676 else:
676 else:
677 remoteheads = pushop.remoteheads
677 remoteheads = pushop.remoteheads
678 # ssh: return remote's addchangegroup()
678 # ssh: return remote's addchangegroup()
679 # http: return remote's addchangegroup() or 0 for error
679 # http: return remote's addchangegroup() or 0 for error
680 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
680 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
681 pushop.repo.url())
681 pushop.repo.url())
682 else:
682 else:
683 # we return an integer indicating remote head count
683 # we return an integer indicating remote head count
684 # change
684 # change
685 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
685 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
686 pushop.repo.url())
686 pushop.repo.url())
687
687
688 def _pushsyncphase(pushop):
688 def _pushsyncphase(pushop):
689 """synchronise phase information locally and remotely"""
689 """synchronise phase information locally and remotely"""
690 cheads = pushop.commonheads
690 cheads = pushop.commonheads
691 # even when we don't push, exchanging phase data is useful
691 # even when we don't push, exchanging phase data is useful
692 remotephases = pushop.remote.listkeys('phases')
692 remotephases = pushop.remote.listkeys('phases')
693 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
693 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
694 and remotephases # server supports phases
694 and remotephases # server supports phases
695 and pushop.cgresult is None # nothing was pushed
695 and pushop.cgresult is None # nothing was pushed
696 and remotephases.get('publishing', False)):
696 and remotephases.get('publishing', False)):
697 # When:
697 # When:
698 # - this is a subrepo push
698 # - this is a subrepo push
699 # - and remote support phase
699 # - and remote support phase
700 # - and no changeset was pushed
700 # - and no changeset was pushed
701 # - and remote is publishing
701 # - and remote is publishing
702 # We may be in issue 3871 case!
702 # We may be in issue 3871 case!
703 # We drop the possible phase synchronisation done by
703 # We drop the possible phase synchronisation done by
704 # courtesy to publish changesets possibly locally draft
704 # courtesy to publish changesets possibly locally draft
705 # on the remote.
705 # on the remote.
706 remotephases = {'publishing': 'True'}
706 remotephases = {'publishing': 'True'}
707 if not remotephases: # old server or public only reply from non-publishing
707 if not remotephases: # old server or public only reply from non-publishing
708 _localphasemove(pushop, cheads)
708 _localphasemove(pushop, cheads)
709 # don't push any phase data as there is nothing to push
709 # don't push any phase data as there is nothing to push
710 else:
710 else:
711 ana = phases.analyzeremotephases(pushop.repo, cheads,
711 ana = phases.analyzeremotephases(pushop.repo, cheads,
712 remotephases)
712 remotephases)
713 pheads, droots = ana
713 pheads, droots = ana
714 ### Apply remote phase on local
714 ### Apply remote phase on local
715 if remotephases.get('publishing', False):
715 if remotephases.get('publishing', False):
716 _localphasemove(pushop, cheads)
716 _localphasemove(pushop, cheads)
717 else: # publish = False
717 else: # publish = False
718 _localphasemove(pushop, pheads)
718 _localphasemove(pushop, pheads)
719 _localphasemove(pushop, cheads, phases.draft)
719 _localphasemove(pushop, cheads, phases.draft)
720 ### Apply local phase on remote
720 ### Apply local phase on remote
721
721
722 if pushop.cgresult:
722 if pushop.cgresult:
723 if 'phases' in pushop.stepsdone:
723 if 'phases' in pushop.stepsdone:
724 # phases already pushed though bundle2
724 # phases already pushed though bundle2
725 return
725 return
726 outdated = pushop.outdatedphases
726 outdated = pushop.outdatedphases
727 else:
727 else:
728 outdated = pushop.fallbackoutdatedphases
728 outdated = pushop.fallbackoutdatedphases
729
729
730 pushop.stepsdone.add('phases')
730 pushop.stepsdone.add('phases')
731
731
732 # filter heads already turned public by the push
732 # filter heads already turned public by the push
733 outdated = [c for c in outdated if c.node() not in pheads]
733 outdated = [c for c in outdated if c.node() not in pheads]
734 # fallback to independent pushkey command
734 # fallback to independent pushkey command
735 for newremotehead in outdated:
735 for newremotehead in outdated:
736 r = pushop.remote.pushkey('phases',
736 r = pushop.remote.pushkey('phases',
737 newremotehead.hex(),
737 newremotehead.hex(),
738 str(phases.draft),
738 str(phases.draft),
739 str(phases.public))
739 str(phases.public))
740 if not r:
740 if not r:
741 pushop.ui.warn(_('updating %s to public failed!\n')
741 pushop.ui.warn(_('updating %s to public failed!\n')
742 % newremotehead)
742 % newremotehead)
743
743
744 def _localphasemove(pushop, nodes, phase=phases.public):
744 def _localphasemove(pushop, nodes, phase=phases.public):
745 """move <nodes> to <phase> in the local source repo"""
745 """move <nodes> to <phase> in the local source repo"""
746 if pushop.trmanager:
746 if pushop.trmanager:
747 phases.advanceboundary(pushop.repo,
747 phases.advanceboundary(pushop.repo,
748 pushop.trmanager.transaction(),
748 pushop.trmanager.transaction(),
749 phase,
749 phase,
750 nodes)
750 nodes)
751 else:
751 else:
752 # repo is not locked, do not change any phases!
752 # repo is not locked, do not change any phases!
753 # Informs the user that phases should have been moved when
753 # Informs the user that phases should have been moved when
754 # applicable.
754 # applicable.
755 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
755 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
756 phasestr = phases.phasenames[phase]
756 phasestr = phases.phasenames[phase]
757 if actualmoves:
757 if actualmoves:
758 pushop.ui.status(_('cannot lock source repo, skipping '
758 pushop.ui.status(_('cannot lock source repo, skipping '
759 'local %s phase update\n') % phasestr)
759 'local %s phase update\n') % phasestr)
760
760
761 def _pushobsolete(pushop):
761 def _pushobsolete(pushop):
762 """utility function to push obsolete markers to a remote"""
762 """utility function to push obsolete markers to a remote"""
763 if 'obsmarkers' in pushop.stepsdone:
763 if 'obsmarkers' in pushop.stepsdone:
764 return
764 return
765 pushop.ui.debug('try to push obsolete markers to remote\n')
765 pushop.ui.debug('try to push obsolete markers to remote\n')
766 repo = pushop.repo
766 repo = pushop.repo
767 remote = pushop.remote
767 remote = pushop.remote
768 pushop.stepsdone.add('obsmarkers')
768 pushop.stepsdone.add('obsmarkers')
769 if pushop.outobsmarkers:
769 if pushop.outobsmarkers:
770 rslts = []
770 rslts = []
771 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
771 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
772 for key in sorted(remotedata, reverse=True):
772 for key in sorted(remotedata, reverse=True):
773 # reverse sort to ensure we end with dump0
773 # reverse sort to ensure we end with dump0
774 data = remotedata[key]
774 data = remotedata[key]
775 rslts.append(remote.pushkey('obsolete', key, '', data))
775 rslts.append(remote.pushkey('obsolete', key, '', data))
776 if [r for r in rslts if not r]:
776 if [r for r in rslts if not r]:
777 msg = _('failed to push some obsolete markers!\n')
777 msg = _('failed to push some obsolete markers!\n')
778 repo.ui.warn(msg)
778 repo.ui.warn(msg)
779
779
780 def _pushbookmark(pushop):
780 def _pushbookmark(pushop):
781 """Update bookmark position on remote"""
781 """Update bookmark position on remote"""
782 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
782 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
783 return
783 return
784 pushop.stepsdone.add('bookmarks')
784 pushop.stepsdone.add('bookmarks')
785 ui = pushop.ui
785 ui = pushop.ui
786 remote = pushop.remote
786 remote = pushop.remote
787
787
788 for b, old, new in pushop.outbookmarks:
788 for b, old, new in pushop.outbookmarks:
789 action = 'update'
789 action = 'update'
790 if not old:
790 if not old:
791 action = 'export'
791 action = 'export'
792 elif not new:
792 elif not new:
793 action = 'delete'
793 action = 'delete'
794 if remote.pushkey('bookmarks', b, old, new):
794 if remote.pushkey('bookmarks', b, old, new):
795 ui.status(bookmsgmap[action][0] % b)
795 ui.status(bookmsgmap[action][0] % b)
796 else:
796 else:
797 ui.warn(bookmsgmap[action][1] % b)
797 ui.warn(bookmsgmap[action][1] % b)
798 # discovery can have set the value form invalid entry
798 # discovery can have set the value form invalid entry
799 if pushop.bkresult is not None:
799 if pushop.bkresult is not None:
800 pushop.bkresult = 1
800 pushop.bkresult = 1
801
801
802 class pulloperation(object):
802 class pulloperation(object):
803 """A object that represent a single pull operation
803 """A object that represent a single pull operation
804
804
805 It purpose is to carry pull related state and very common operation.
805 It purpose is to carry pull related state and very common operation.
806
806
807 A new should be created at the beginning of each pull and discarded
807 A new should be created at the beginning of each pull and discarded
808 afterward.
808 afterward.
809 """
809 """
810
810
811 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
811 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
812 remotebookmarks=None):
812 remotebookmarks=None):
813 # repo we pull into
813 # repo we pull into
814 self.repo = repo
814 self.repo = repo
815 # repo we pull from
815 # repo we pull from
816 self.remote = remote
816 self.remote = remote
817 # revision we try to pull (None is "all")
817 # revision we try to pull (None is "all")
818 self.heads = heads
818 self.heads = heads
819 # bookmark pulled explicitly
819 # bookmark pulled explicitly
820 self.explicitbookmarks = bookmarks
820 self.explicitbookmarks = bookmarks
821 # do we force pull?
821 # do we force pull?
822 self.force = force
822 self.force = force
823 # transaction manager
823 # transaction manager
824 self.trmanager = None
824 self.trmanager = None
825 # set of common changeset between local and remote before pull
825 # set of common changeset between local and remote before pull
826 self.common = None
826 self.common = None
827 # set of pulled head
827 # set of pulled head
828 self.rheads = None
828 self.rheads = None
829 # list of missing changeset to fetch remotely
829 # list of missing changeset to fetch remotely
830 self.fetch = None
830 self.fetch = None
831 # remote bookmarks data
831 # remote bookmarks data
832 self.remotebookmarks = remotebookmarks
832 self.remotebookmarks = remotebookmarks
833 # result of changegroup pulling (used as return code by pull)
833 # result of changegroup pulling (used as return code by pull)
834 self.cgresult = None
834 self.cgresult = None
835 # list of step already done
835 # list of step already done
836 self.stepsdone = set()
836 self.stepsdone = set()
837
837
838 @util.propertycache
838 @util.propertycache
839 def pulledsubset(self):
839 def pulledsubset(self):
840 """heads of the set of changeset target by the pull"""
840 """heads of the set of changeset target by the pull"""
841 # compute target subset
841 # compute target subset
842 if self.heads is None:
842 if self.heads is None:
843 # We pulled every thing possible
843 # We pulled every thing possible
844 # sync on everything common
844 # sync on everything common
845 c = set(self.common)
845 c = set(self.common)
846 ret = list(self.common)
846 ret = list(self.common)
847 for n in self.rheads:
847 for n in self.rheads:
848 if n not in c:
848 if n not in c:
849 ret.append(n)
849 ret.append(n)
850 return ret
850 return ret
851 else:
851 else:
852 # We pulled a specific subset
852 # We pulled a specific subset
853 # sync on this subset
853 # sync on this subset
854 return self.heads
854 return self.heads
855
855
856 def gettransaction(self):
856 def gettransaction(self):
857 # deprecated; talk to trmanager directly
857 # deprecated; talk to trmanager directly
858 return self.trmanager.transaction()
858 return self.trmanager.transaction()
859
859
860 class transactionmanager(object):
860 class transactionmanager(object):
861 """An object to manage the life cycle of a transaction
861 """An object to manage the life cycle of a transaction
862
862
863 It creates the transaction on demand and calls the appropriate hooks when
863 It creates the transaction on demand and calls the appropriate hooks when
864 closing the transaction."""
864 closing the transaction."""
865 def __init__(self, repo, source, url):
865 def __init__(self, repo, source, url):
866 self.repo = repo
866 self.repo = repo
867 self.source = source
867 self.source = source
868 self.url = url
868 self.url = url
869 self._tr = None
869 self._tr = None
870
870
871 def transaction(self):
871 def transaction(self):
872 """Return an open transaction object, constructing if necessary"""
872 """Return an open transaction object, constructing if necessary"""
873 if not self._tr:
873 if not self._tr:
874 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
874 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
875 self._tr = self.repo.transaction(trname)
875 self._tr = self.repo.transaction(trname)
876 self._tr.hookargs['source'] = self.source
876 self._tr.hookargs['source'] = self.source
877 self._tr.hookargs['url'] = self.url
877 self._tr.hookargs['url'] = self.url
878 return self._tr
878 return self._tr
879
879
880 def close(self):
880 def close(self):
881 """close transaction if created"""
881 """close transaction if created"""
882 if self._tr is not None:
882 if self._tr is not None:
883 self._tr.close()
883 self._tr.close()
884
884
885 def release(self):
885 def release(self):
886 """release transaction if created"""
886 """release transaction if created"""
887 if self._tr is not None:
887 if self._tr is not None:
888 self._tr.release()
888 self._tr.release()
889
889
890 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None):
890 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None):
891 if opargs is None:
891 if opargs is None:
892 opargs = {}
892 opargs = {}
893 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
893 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
894 **opargs)
894 **opargs)
895 if pullop.remote.local():
895 if pullop.remote.local():
896 missing = set(pullop.remote.requirements) - pullop.repo.supported
896 missing = set(pullop.remote.requirements) - pullop.repo.supported
897 if missing:
897 if missing:
898 msg = _("required features are not"
898 msg = _("required features are not"
899 " supported in the destination:"
899 " supported in the destination:"
900 " %s") % (', '.join(sorted(missing)))
900 " %s") % (', '.join(sorted(missing)))
901 raise util.Abort(msg)
901 raise util.Abort(msg)
902
902
903 lock = pullop.repo.lock()
903 lock = pullop.repo.lock()
904 try:
904 try:
905 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
905 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
906 _pulldiscovery(pullop)
906 _pulldiscovery(pullop)
907 if _canusebundle2(pullop):
907 if _canusebundle2(pullop):
908 _pullbundle2(pullop)
908 _pullbundle2(pullop)
909 _pullchangeset(pullop)
909 _pullchangeset(pullop)
910 _pullphase(pullop)
910 _pullphase(pullop)
911 _pullbookmarks(pullop)
911 _pullbookmarks(pullop)
912 _pullobsolete(pullop)
912 _pullobsolete(pullop)
913 pullop.trmanager.close()
913 pullop.trmanager.close()
914 finally:
914 finally:
915 pullop.trmanager.release()
915 pullop.trmanager.release()
916 lock.release()
916 lock.release()
917
917
918 return pullop
918 return pullop
919
919
920 # list of steps to perform discovery before pull
920 # list of steps to perform discovery before pull
921 pulldiscoveryorder = []
921 pulldiscoveryorder = []
922
922
923 # Mapping between step name and function
923 # Mapping between step name and function
924 #
924 #
925 # This exists to help extensions wrap steps if necessary
925 # This exists to help extensions wrap steps if necessary
926 pulldiscoverymapping = {}
926 pulldiscoverymapping = {}
927
927
928 def pulldiscovery(stepname):
928 def pulldiscovery(stepname):
929 """decorator for function performing discovery before pull
929 """decorator for function performing discovery before pull
930
930
931 The function is added to the step -> function mapping and appended to the
931 The function is added to the step -> function mapping and appended to the
932 list of steps. Beware that decorated function will be added in order (this
932 list of steps. Beware that decorated function will be added in order (this
933 may matter).
933 may matter).
934
934
935 You can only use this decorator for a new step, if you want to wrap a step
935 You can only use this decorator for a new step, if you want to wrap a step
936 from an extension, change the pulldiscovery dictionary directly."""
936 from an extension, change the pulldiscovery dictionary directly."""
937 def dec(func):
937 def dec(func):
938 assert stepname not in pulldiscoverymapping
938 assert stepname not in pulldiscoverymapping
939 pulldiscoverymapping[stepname] = func
939 pulldiscoverymapping[stepname] = func
940 pulldiscoveryorder.append(stepname)
940 pulldiscoveryorder.append(stepname)
941 return func
941 return func
942 return dec
942 return dec
943
943
944 def _pulldiscovery(pullop):
944 def _pulldiscovery(pullop):
945 """Run all discovery steps"""
945 """Run all discovery steps"""
946 for stepname in pulldiscoveryorder:
946 for stepname in pulldiscoveryorder:
947 step = pulldiscoverymapping[stepname]
947 step = pulldiscoverymapping[stepname]
948 step(pullop)
948 step(pullop)
949
949
950 @pulldiscovery('b1:bookmarks')
950 @pulldiscovery('b1:bookmarks')
951 def _pullbookmarkbundle1(pullop):
951 def _pullbookmarkbundle1(pullop):
952 """fetch bookmark data in bundle1 case
952 """fetch bookmark data in bundle1 case
953
953
954 If not using bundle2, we have to fetch bookmarks before changeset
954 If not using bundle2, we have to fetch bookmarks before changeset
955 discovery to reduce the chance and impact of race conditions."""
955 discovery to reduce the chance and impact of race conditions."""
956 if pullop.remotebookmarks is not None:
956 if pullop.remotebookmarks is not None:
957 return
957 return
958 if not _canusebundle2(pullop): # all bundle2 server now support listkeys
958 if (_canusebundle2(pullop)
959 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
959 and 'listkeys' in bundle2.bundle2caps(pullop.remote)):
960 # all known bundle2 servers now support listkeys, but lets be nice with
961 # new implementation.
962 return
963 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
960
964
961
965
962 @pulldiscovery('changegroup')
966 @pulldiscovery('changegroup')
963 def _pulldiscoverychangegroup(pullop):
967 def _pulldiscoverychangegroup(pullop):
964 """discovery phase for the pull
968 """discovery phase for the pull
965
969
966 Current handle changeset discovery only, will change handle all discovery
970 Current handle changeset discovery only, will change handle all discovery
967 at some point."""
971 at some point."""
968 tmp = discovery.findcommonincoming(pullop.repo,
972 tmp = discovery.findcommonincoming(pullop.repo,
969 pullop.remote,
973 pullop.remote,
970 heads=pullop.heads,
974 heads=pullop.heads,
971 force=pullop.force)
975 force=pullop.force)
972 common, fetch, rheads = tmp
976 common, fetch, rheads = tmp
973 nm = pullop.repo.unfiltered().changelog.nodemap
977 nm = pullop.repo.unfiltered().changelog.nodemap
974 if fetch and rheads:
978 if fetch and rheads:
975 # If a remote heads in filtered locally, lets drop it from the unknown
979 # If a remote heads in filtered locally, lets drop it from the unknown
976 # remote heads and put in back in common.
980 # remote heads and put in back in common.
977 #
981 #
978 # This is a hackish solution to catch most of "common but locally
982 # This is a hackish solution to catch most of "common but locally
979 # hidden situation". We do not performs discovery on unfiltered
983 # hidden situation". We do not performs discovery on unfiltered
980 # repository because it end up doing a pathological amount of round
984 # repository because it end up doing a pathological amount of round
981 # trip for w huge amount of changeset we do not care about.
985 # trip for w huge amount of changeset we do not care about.
982 #
986 #
983 # If a set of such "common but filtered" changeset exist on the server
987 # If a set of such "common but filtered" changeset exist on the server
984 # but are not including a remote heads, we'll not be able to detect it,
988 # but are not including a remote heads, we'll not be able to detect it,
985 scommon = set(common)
989 scommon = set(common)
986 filteredrheads = []
990 filteredrheads = []
987 for n in rheads:
991 for n in rheads:
988 if n in nm:
992 if n in nm:
989 if n not in scommon:
993 if n not in scommon:
990 common.append(n)
994 common.append(n)
991 else:
995 else:
992 filteredrheads.append(n)
996 filteredrheads.append(n)
993 if not filteredrheads:
997 if not filteredrheads:
994 fetch = []
998 fetch = []
995 rheads = filteredrheads
999 rheads = filteredrheads
996 pullop.common = common
1000 pullop.common = common
997 pullop.fetch = fetch
1001 pullop.fetch = fetch
998 pullop.rheads = rheads
1002 pullop.rheads = rheads
999
1003
1000 def _pullbundle2(pullop):
1004 def _pullbundle2(pullop):
1001 """pull data using bundle2
1005 """pull data using bundle2
1002
1006
1003 For now, the only supported data are changegroup."""
1007 For now, the only supported data are changegroup."""
1004 remotecaps = bundle2.bundle2caps(pullop.remote)
1008 remotecaps = bundle2.bundle2caps(pullop.remote)
1005 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1009 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1006 # pulling changegroup
1010 # pulling changegroup
1007 pullop.stepsdone.add('changegroup')
1011 pullop.stepsdone.add('changegroup')
1008
1012
1009 kwargs['common'] = pullop.common
1013 kwargs['common'] = pullop.common
1010 kwargs['heads'] = pullop.heads or pullop.rheads
1014 kwargs['heads'] = pullop.heads or pullop.rheads
1011 kwargs['cg'] = pullop.fetch
1015 kwargs['cg'] = pullop.fetch
1012 if 'listkeys' in remotecaps:
1016 if 'listkeys' in remotecaps:
1013 kwargs['listkeys'] = ['phase']
1017 kwargs['listkeys'] = ['phase']
1014 if pullop.remotebookmarks is None:
1018 if pullop.remotebookmarks is None:
1015 # make sure to always includes bookmark data when migrating
1019 # make sure to always includes bookmark data when migrating
1016 # `hg incoming --bundle` to using this function.
1020 # `hg incoming --bundle` to using this function.
1017 kwargs['listkeys'].append('bookmarks')
1021 kwargs['listkeys'].append('bookmarks')
1018 if not pullop.fetch:
1022 if not pullop.fetch:
1019 pullop.repo.ui.status(_("no changes found\n"))
1023 pullop.repo.ui.status(_("no changes found\n"))
1020 pullop.cgresult = 0
1024 pullop.cgresult = 0
1021 else:
1025 else:
1022 if pullop.heads is None and list(pullop.common) == [nullid]:
1026 if pullop.heads is None and list(pullop.common) == [nullid]:
1023 pullop.repo.ui.status(_("requesting all changes\n"))
1027 pullop.repo.ui.status(_("requesting all changes\n"))
1024 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1028 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1025 remoteversions = bundle2.obsmarkersversion(remotecaps)
1029 remoteversions = bundle2.obsmarkersversion(remotecaps)
1026 if obsolete.commonversion(remoteversions) is not None:
1030 if obsolete.commonversion(remoteversions) is not None:
1027 kwargs['obsmarkers'] = True
1031 kwargs['obsmarkers'] = True
1028 pullop.stepsdone.add('obsmarkers')
1032 pullop.stepsdone.add('obsmarkers')
1029 _pullbundle2extraprepare(pullop, kwargs)
1033 _pullbundle2extraprepare(pullop, kwargs)
1030 bundle = pullop.remote.getbundle('pull', **kwargs)
1034 bundle = pullop.remote.getbundle('pull', **kwargs)
1031 try:
1035 try:
1032 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1036 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1033 except error.BundleValueError, exc:
1037 except error.BundleValueError, exc:
1034 raise util.Abort('missing support for %s' % exc)
1038 raise util.Abort('missing support for %s' % exc)
1035
1039
1036 if pullop.fetch:
1040 if pullop.fetch:
1037 results = [cg['return'] for cg in op.records['changegroup']]
1041 results = [cg['return'] for cg in op.records['changegroup']]
1038 pullop.cgresult = changegroup.combineresults(results)
1042 pullop.cgresult = changegroup.combineresults(results)
1039
1043
1040 # processing phases change
1044 # processing phases change
1041 for namespace, value in op.records['listkeys']:
1045 for namespace, value in op.records['listkeys']:
1042 if namespace == 'phases':
1046 if namespace == 'phases':
1043 _pullapplyphases(pullop, value)
1047 _pullapplyphases(pullop, value)
1044
1048
1045 # processing bookmark update
1049 # processing bookmark update
1046 for namespace, value in op.records['listkeys']:
1050 for namespace, value in op.records['listkeys']:
1047 if namespace == 'bookmarks':
1051 if namespace == 'bookmarks':
1048 pullop.remotebookmarks = value
1052 pullop.remotebookmarks = value
1049
1053
1050 # bookmark data were either already there or pulled in the bundle
1054 # bookmark data were either already there or pulled in the bundle
1051 if pullop.remotebookmarks is not None:
1055 if pullop.remotebookmarks is not None:
1052 _pullbookmarks(pullop)
1056 _pullbookmarks(pullop)
1053
1057
1054 def _pullbundle2extraprepare(pullop, kwargs):
1058 def _pullbundle2extraprepare(pullop, kwargs):
1055 """hook function so that extensions can extend the getbundle call"""
1059 """hook function so that extensions can extend the getbundle call"""
1056 pass
1060 pass
1057
1061
1058 def _pullchangeset(pullop):
1062 def _pullchangeset(pullop):
1059 """pull changeset from unbundle into the local repo"""
1063 """pull changeset from unbundle into the local repo"""
1060 # We delay the open of the transaction as late as possible so we
1064 # We delay the open of the transaction as late as possible so we
1061 # don't open transaction for nothing or you break future useful
1065 # don't open transaction for nothing or you break future useful
1062 # rollback call
1066 # rollback call
1063 if 'changegroup' in pullop.stepsdone:
1067 if 'changegroup' in pullop.stepsdone:
1064 return
1068 return
1065 pullop.stepsdone.add('changegroup')
1069 pullop.stepsdone.add('changegroup')
1066 if not pullop.fetch:
1070 if not pullop.fetch:
1067 pullop.repo.ui.status(_("no changes found\n"))
1071 pullop.repo.ui.status(_("no changes found\n"))
1068 pullop.cgresult = 0
1072 pullop.cgresult = 0
1069 return
1073 return
1070 pullop.gettransaction()
1074 pullop.gettransaction()
1071 if pullop.heads is None and list(pullop.common) == [nullid]:
1075 if pullop.heads is None and list(pullop.common) == [nullid]:
1072 pullop.repo.ui.status(_("requesting all changes\n"))
1076 pullop.repo.ui.status(_("requesting all changes\n"))
1073 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1077 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1074 # issue1320, avoid a race if remote changed after discovery
1078 # issue1320, avoid a race if remote changed after discovery
1075 pullop.heads = pullop.rheads
1079 pullop.heads = pullop.rheads
1076
1080
1077 if pullop.remote.capable('getbundle'):
1081 if pullop.remote.capable('getbundle'):
1078 # TODO: get bundlecaps from remote
1082 # TODO: get bundlecaps from remote
1079 cg = pullop.remote.getbundle('pull', common=pullop.common,
1083 cg = pullop.remote.getbundle('pull', common=pullop.common,
1080 heads=pullop.heads or pullop.rheads)
1084 heads=pullop.heads or pullop.rheads)
1081 elif pullop.heads is None:
1085 elif pullop.heads is None:
1082 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1086 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1083 elif not pullop.remote.capable('changegroupsubset'):
1087 elif not pullop.remote.capable('changegroupsubset'):
1084 raise util.Abort(_("partial pull cannot be done because "
1088 raise util.Abort(_("partial pull cannot be done because "
1085 "other repository doesn't support "
1089 "other repository doesn't support "
1086 "changegroupsubset."))
1090 "changegroupsubset."))
1087 else:
1091 else:
1088 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1092 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1089 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1093 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1090 pullop.remote.url())
1094 pullop.remote.url())
1091
1095
1092 def _pullphase(pullop):
1096 def _pullphase(pullop):
1093 # Get remote phases data from remote
1097 # Get remote phases data from remote
1094 if 'phases' in pullop.stepsdone:
1098 if 'phases' in pullop.stepsdone:
1095 return
1099 return
1096 remotephases = pullop.remote.listkeys('phases')
1100 remotephases = pullop.remote.listkeys('phases')
1097 _pullapplyphases(pullop, remotephases)
1101 _pullapplyphases(pullop, remotephases)
1098
1102
1099 def _pullapplyphases(pullop, remotephases):
1103 def _pullapplyphases(pullop, remotephases):
1100 """apply phase movement from observed remote state"""
1104 """apply phase movement from observed remote state"""
1101 if 'phases' in pullop.stepsdone:
1105 if 'phases' in pullop.stepsdone:
1102 return
1106 return
1103 pullop.stepsdone.add('phases')
1107 pullop.stepsdone.add('phases')
1104 publishing = bool(remotephases.get('publishing', False))
1108 publishing = bool(remotephases.get('publishing', False))
1105 if remotephases and not publishing:
1109 if remotephases and not publishing:
1106 # remote is new and unpublishing
1110 # remote is new and unpublishing
1107 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1111 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1108 pullop.pulledsubset,
1112 pullop.pulledsubset,
1109 remotephases)
1113 remotephases)
1110 dheads = pullop.pulledsubset
1114 dheads = pullop.pulledsubset
1111 else:
1115 else:
1112 # Remote is old or publishing all common changesets
1116 # Remote is old or publishing all common changesets
1113 # should be seen as public
1117 # should be seen as public
1114 pheads = pullop.pulledsubset
1118 pheads = pullop.pulledsubset
1115 dheads = []
1119 dheads = []
1116 unfi = pullop.repo.unfiltered()
1120 unfi = pullop.repo.unfiltered()
1117 phase = unfi._phasecache.phase
1121 phase = unfi._phasecache.phase
1118 rev = unfi.changelog.nodemap.get
1122 rev = unfi.changelog.nodemap.get
1119 public = phases.public
1123 public = phases.public
1120 draft = phases.draft
1124 draft = phases.draft
1121
1125
1122 # exclude changesets already public locally and update the others
1126 # exclude changesets already public locally and update the others
1123 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1127 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1124 if pheads:
1128 if pheads:
1125 tr = pullop.gettransaction()
1129 tr = pullop.gettransaction()
1126 phases.advanceboundary(pullop.repo, tr, public, pheads)
1130 phases.advanceboundary(pullop.repo, tr, public, pheads)
1127
1131
1128 # exclude changesets already draft locally and update the others
1132 # exclude changesets already draft locally and update the others
1129 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1133 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1130 if dheads:
1134 if dheads:
1131 tr = pullop.gettransaction()
1135 tr = pullop.gettransaction()
1132 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1136 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1133
1137
1134 def _pullbookmarks(pullop):
1138 def _pullbookmarks(pullop):
1135 """process the remote bookmark information to update the local one"""
1139 """process the remote bookmark information to update the local one"""
1136 if 'bookmarks' in pullop.stepsdone:
1140 if 'bookmarks' in pullop.stepsdone:
1137 return
1141 return
1138 pullop.stepsdone.add('bookmarks')
1142 pullop.stepsdone.add('bookmarks')
1139 repo = pullop.repo
1143 repo = pullop.repo
1140 remotebookmarks = pullop.remotebookmarks
1144 remotebookmarks = pullop.remotebookmarks
1141 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1145 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1142 pullop.remote.url(),
1146 pullop.remote.url(),
1143 pullop.gettransaction,
1147 pullop.gettransaction,
1144 explicit=pullop.explicitbookmarks)
1148 explicit=pullop.explicitbookmarks)
1145
1149
1146 def _pullobsolete(pullop):
1150 def _pullobsolete(pullop):
1147 """utility function to pull obsolete markers from a remote
1151 """utility function to pull obsolete markers from a remote
1148
1152
1149 The `gettransaction` is function that return the pull transaction, creating
1153 The `gettransaction` is function that return the pull transaction, creating
1150 one if necessary. We return the transaction to inform the calling code that
1154 one if necessary. We return the transaction to inform the calling code that
1151 a new transaction have been created (when applicable).
1155 a new transaction have been created (when applicable).
1152
1156
1153 Exists mostly to allow overriding for experimentation purpose"""
1157 Exists mostly to allow overriding for experimentation purpose"""
1154 if 'obsmarkers' in pullop.stepsdone:
1158 if 'obsmarkers' in pullop.stepsdone:
1155 return
1159 return
1156 pullop.stepsdone.add('obsmarkers')
1160 pullop.stepsdone.add('obsmarkers')
1157 tr = None
1161 tr = None
1158 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1162 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1159 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1163 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1160 remoteobs = pullop.remote.listkeys('obsolete')
1164 remoteobs = pullop.remote.listkeys('obsolete')
1161 if 'dump0' in remoteobs:
1165 if 'dump0' in remoteobs:
1162 tr = pullop.gettransaction()
1166 tr = pullop.gettransaction()
1163 for key in sorted(remoteobs, reverse=True):
1167 for key in sorted(remoteobs, reverse=True):
1164 if key.startswith('dump'):
1168 if key.startswith('dump'):
1165 data = base85.b85decode(remoteobs[key])
1169 data = base85.b85decode(remoteobs[key])
1166 pullop.repo.obsstore.mergemarkers(tr, data)
1170 pullop.repo.obsstore.mergemarkers(tr, data)
1167 pullop.repo.invalidatevolatilesets()
1171 pullop.repo.invalidatevolatilesets()
1168 return tr
1172 return tr
1169
1173
1170 def caps20to10(repo):
1174 def caps20to10(repo):
1171 """return a set with appropriate options to use bundle20 during getbundle"""
1175 """return a set with appropriate options to use bundle20 during getbundle"""
1172 caps = set(['HG20'])
1176 caps = set(['HG20'])
1173 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1177 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1174 caps.add('bundle2=' + urllib.quote(capsblob))
1178 caps.add('bundle2=' + urllib.quote(capsblob))
1175 return caps
1179 return caps
1176
1180
1177 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1181 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1178 getbundle2partsorder = []
1182 getbundle2partsorder = []
1179
1183
1180 # Mapping between step name and function
1184 # Mapping between step name and function
1181 #
1185 #
1182 # This exists to help extensions wrap steps if necessary
1186 # This exists to help extensions wrap steps if necessary
1183 getbundle2partsmapping = {}
1187 getbundle2partsmapping = {}
1184
1188
1185 def getbundle2partsgenerator(stepname, idx=None):
1189 def getbundle2partsgenerator(stepname, idx=None):
1186 """decorator for function generating bundle2 part for getbundle
1190 """decorator for function generating bundle2 part for getbundle
1187
1191
1188 The function is added to the step -> function mapping and appended to the
1192 The function is added to the step -> function mapping and appended to the
1189 list of steps. Beware that decorated functions will be added in order
1193 list of steps. Beware that decorated functions will be added in order
1190 (this may matter).
1194 (this may matter).
1191
1195
1192 You can only use this decorator for new steps, if you want to wrap a step
1196 You can only use this decorator for new steps, if you want to wrap a step
1193 from an extension, attack the getbundle2partsmapping dictionary directly."""
1197 from an extension, attack the getbundle2partsmapping dictionary directly."""
1194 def dec(func):
1198 def dec(func):
1195 assert stepname not in getbundle2partsmapping
1199 assert stepname not in getbundle2partsmapping
1196 getbundle2partsmapping[stepname] = func
1200 getbundle2partsmapping[stepname] = func
1197 if idx is None:
1201 if idx is None:
1198 getbundle2partsorder.append(stepname)
1202 getbundle2partsorder.append(stepname)
1199 else:
1203 else:
1200 getbundle2partsorder.insert(idx, stepname)
1204 getbundle2partsorder.insert(idx, stepname)
1201 return func
1205 return func
1202 return dec
1206 return dec
1203
1207
1204 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1208 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1205 **kwargs):
1209 **kwargs):
1206 """return a full bundle (with potentially multiple kind of parts)
1210 """return a full bundle (with potentially multiple kind of parts)
1207
1211
1208 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1212 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1209 passed. For now, the bundle can contain only changegroup, but this will
1213 passed. For now, the bundle can contain only changegroup, but this will
1210 changes when more part type will be available for bundle2.
1214 changes when more part type will be available for bundle2.
1211
1215
1212 This is different from changegroup.getchangegroup that only returns an HG10
1216 This is different from changegroup.getchangegroup that only returns an HG10
1213 changegroup bundle. They may eventually get reunited in the future when we
1217 changegroup bundle. They may eventually get reunited in the future when we
1214 have a clearer idea of the API we what to query different data.
1218 have a clearer idea of the API we what to query different data.
1215
1219
1216 The implementation is at a very early stage and will get massive rework
1220 The implementation is at a very early stage and will get massive rework
1217 when the API of bundle is refined.
1221 when the API of bundle is refined.
1218 """
1222 """
1219 # bundle10 case
1223 # bundle10 case
1220 usebundle2 = False
1224 usebundle2 = False
1221 if bundlecaps is not None:
1225 if bundlecaps is not None:
1222 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1226 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1223 if not usebundle2:
1227 if not usebundle2:
1224 if bundlecaps and not kwargs.get('cg', True):
1228 if bundlecaps and not kwargs.get('cg', True):
1225 raise ValueError(_('request for bundle10 must include changegroup'))
1229 raise ValueError(_('request for bundle10 must include changegroup'))
1226
1230
1227 if kwargs:
1231 if kwargs:
1228 raise ValueError(_('unsupported getbundle arguments: %s')
1232 raise ValueError(_('unsupported getbundle arguments: %s')
1229 % ', '.join(sorted(kwargs.keys())))
1233 % ', '.join(sorted(kwargs.keys())))
1230 return changegroup.getchangegroup(repo, source, heads=heads,
1234 return changegroup.getchangegroup(repo, source, heads=heads,
1231 common=common, bundlecaps=bundlecaps)
1235 common=common, bundlecaps=bundlecaps)
1232
1236
1233 # bundle20 case
1237 # bundle20 case
1234 b2caps = {}
1238 b2caps = {}
1235 for bcaps in bundlecaps:
1239 for bcaps in bundlecaps:
1236 if bcaps.startswith('bundle2='):
1240 if bcaps.startswith('bundle2='):
1237 blob = urllib.unquote(bcaps[len('bundle2='):])
1241 blob = urllib.unquote(bcaps[len('bundle2='):])
1238 b2caps.update(bundle2.decodecaps(blob))
1242 b2caps.update(bundle2.decodecaps(blob))
1239 bundler = bundle2.bundle20(repo.ui, b2caps)
1243 bundler = bundle2.bundle20(repo.ui, b2caps)
1240
1244
1241 kwargs['heads'] = heads
1245 kwargs['heads'] = heads
1242 kwargs['common'] = common
1246 kwargs['common'] = common
1243
1247
1244 for name in getbundle2partsorder:
1248 for name in getbundle2partsorder:
1245 func = getbundle2partsmapping[name]
1249 func = getbundle2partsmapping[name]
1246 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1250 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1247 **kwargs)
1251 **kwargs)
1248
1252
1249 return util.chunkbuffer(bundler.getchunks())
1253 return util.chunkbuffer(bundler.getchunks())
1250
1254
1251 @getbundle2partsgenerator('changegroup')
1255 @getbundle2partsgenerator('changegroup')
1252 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1256 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1253 b2caps=None, heads=None, common=None, **kwargs):
1257 b2caps=None, heads=None, common=None, **kwargs):
1254 """add a changegroup part to the requested bundle"""
1258 """add a changegroup part to the requested bundle"""
1255 cg = None
1259 cg = None
1256 if kwargs.get('cg', True):
1260 if kwargs.get('cg', True):
1257 # build changegroup bundle here.
1261 # build changegroup bundle here.
1258 version = None
1262 version = None
1259 cgversions = b2caps.get('changegroup')
1263 cgversions = b2caps.get('changegroup')
1260 if not cgversions: # 3.1 and 3.2 ship with an empty value
1264 if not cgversions: # 3.1 and 3.2 ship with an empty value
1261 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1265 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1262 common=common,
1266 common=common,
1263 bundlecaps=bundlecaps)
1267 bundlecaps=bundlecaps)
1264 else:
1268 else:
1265 cgversions = [v for v in cgversions if v in changegroup.packermap]
1269 cgversions = [v for v in cgversions if v in changegroup.packermap]
1266 if not cgversions:
1270 if not cgversions:
1267 raise ValueError(_('no common changegroup version'))
1271 raise ValueError(_('no common changegroup version'))
1268 version = max(cgversions)
1272 version = max(cgversions)
1269 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1273 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1270 common=common,
1274 common=common,
1271 bundlecaps=bundlecaps,
1275 bundlecaps=bundlecaps,
1272 version=version)
1276 version=version)
1273
1277
1274 if cg:
1278 if cg:
1275 part = bundler.newpart('changegroup', data=cg)
1279 part = bundler.newpart('changegroup', data=cg)
1276 if version is not None:
1280 if version is not None:
1277 part.addparam('version', version)
1281 part.addparam('version', version)
1278
1282
1279 @getbundle2partsgenerator('listkeys')
1283 @getbundle2partsgenerator('listkeys')
1280 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1284 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1281 b2caps=None, **kwargs):
1285 b2caps=None, **kwargs):
1282 """add parts containing listkeys namespaces to the requested bundle"""
1286 """add parts containing listkeys namespaces to the requested bundle"""
1283 listkeys = kwargs.get('listkeys', ())
1287 listkeys = kwargs.get('listkeys', ())
1284 for namespace in listkeys:
1288 for namespace in listkeys:
1285 part = bundler.newpart('listkeys')
1289 part = bundler.newpart('listkeys')
1286 part.addparam('namespace', namespace)
1290 part.addparam('namespace', namespace)
1287 keys = repo.listkeys(namespace).items()
1291 keys = repo.listkeys(namespace).items()
1288 part.data = pushkey.encodekeys(keys)
1292 part.data = pushkey.encodekeys(keys)
1289
1293
1290 @getbundle2partsgenerator('obsmarkers')
1294 @getbundle2partsgenerator('obsmarkers')
1291 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1295 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1292 b2caps=None, heads=None, **kwargs):
1296 b2caps=None, heads=None, **kwargs):
1293 """add an obsolescence markers part to the requested bundle"""
1297 """add an obsolescence markers part to the requested bundle"""
1294 if kwargs.get('obsmarkers', False):
1298 if kwargs.get('obsmarkers', False):
1295 if heads is None:
1299 if heads is None:
1296 heads = repo.heads()
1300 heads = repo.heads()
1297 subset = [c.node() for c in repo.set('::%ln', heads)]
1301 subset = [c.node() for c in repo.set('::%ln', heads)]
1298 markers = repo.obsstore.relevantmarkers(subset)
1302 markers = repo.obsstore.relevantmarkers(subset)
1299 markers = sorted(markers)
1303 markers = sorted(markers)
1300 buildobsmarkerspart(bundler, markers)
1304 buildobsmarkerspart(bundler, markers)
1301
1305
1302 @getbundle2partsgenerator('hgtagsfnodes')
1306 @getbundle2partsgenerator('hgtagsfnodes')
1303 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1307 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1304 b2caps=None, heads=None, common=None,
1308 b2caps=None, heads=None, common=None,
1305 **kwargs):
1309 **kwargs):
1306 """Transfer the .hgtags filenodes mapping.
1310 """Transfer the .hgtags filenodes mapping.
1307
1311
1308 Only values for heads in this bundle will be transferred.
1312 Only values for heads in this bundle will be transferred.
1309
1313
1310 The part data consists of pairs of 20 byte changeset node and .hgtags
1314 The part data consists of pairs of 20 byte changeset node and .hgtags
1311 filenodes raw values.
1315 filenodes raw values.
1312 """
1316 """
1313 # Don't send unless:
1317 # Don't send unless:
1314 # - changeset are being exchanged,
1318 # - changeset are being exchanged,
1315 # - the client supports it.
1319 # - the client supports it.
1316 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1320 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1317 return
1321 return
1318
1322
1319 outgoing = changegroup.computeoutgoing(repo, heads, common)
1323 outgoing = changegroup.computeoutgoing(repo, heads, common)
1320
1324
1321 if not outgoing.missingheads:
1325 if not outgoing.missingheads:
1322 return
1326 return
1323
1327
1324 cache = tags.hgtagsfnodescache(repo.unfiltered())
1328 cache = tags.hgtagsfnodescache(repo.unfiltered())
1325 chunks = []
1329 chunks = []
1326
1330
1327 # .hgtags fnodes are only relevant for head changesets. While we could
1331 # .hgtags fnodes are only relevant for head changesets. While we could
1328 # transfer values for all known nodes, there will likely be little to
1332 # transfer values for all known nodes, there will likely be little to
1329 # no benefit.
1333 # no benefit.
1330 #
1334 #
1331 # We don't bother using a generator to produce output data because
1335 # We don't bother using a generator to produce output data because
1332 # a) we only have 40 bytes per head and even esoteric numbers of heads
1336 # a) we only have 40 bytes per head and even esoteric numbers of heads
1333 # consume little memory (1M heads is 40MB) b) we don't want to send the
1337 # consume little memory (1M heads is 40MB) b) we don't want to send the
1334 # part if we don't have entries and knowing if we have entries requires
1338 # part if we don't have entries and knowing if we have entries requires
1335 # cache lookups.
1339 # cache lookups.
1336 for node in outgoing.missingheads:
1340 for node in outgoing.missingheads:
1337 # Don't compute missing, as this may slow down serving.
1341 # Don't compute missing, as this may slow down serving.
1338 fnode = cache.getfnode(node, computemissing=False)
1342 fnode = cache.getfnode(node, computemissing=False)
1339 if fnode is not None:
1343 if fnode is not None:
1340 chunks.extend([node, fnode])
1344 chunks.extend([node, fnode])
1341
1345
1342 if chunks:
1346 if chunks:
1343 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1347 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1344
1348
1345 def check_heads(repo, their_heads, context):
1349 def check_heads(repo, their_heads, context):
1346 """check if the heads of a repo have been modified
1350 """check if the heads of a repo have been modified
1347
1351
1348 Used by peer for unbundling.
1352 Used by peer for unbundling.
1349 """
1353 """
1350 heads = repo.heads()
1354 heads = repo.heads()
1351 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1355 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1352 if not (their_heads == ['force'] or their_heads == heads or
1356 if not (their_heads == ['force'] or their_heads == heads or
1353 their_heads == ['hashed', heads_hash]):
1357 their_heads == ['hashed', heads_hash]):
1354 # someone else committed/pushed/unbundled while we
1358 # someone else committed/pushed/unbundled while we
1355 # were transferring data
1359 # were transferring data
1356 raise error.PushRaced('repository changed while %s - '
1360 raise error.PushRaced('repository changed while %s - '
1357 'please try again' % context)
1361 'please try again' % context)
1358
1362
1359 def unbundle(repo, cg, heads, source, url):
1363 def unbundle(repo, cg, heads, source, url):
1360 """Apply a bundle to a repo.
1364 """Apply a bundle to a repo.
1361
1365
1362 this function makes sure the repo is locked during the application and have
1366 this function makes sure the repo is locked during the application and have
1363 mechanism to check that no push race occurred between the creation of the
1367 mechanism to check that no push race occurred between the creation of the
1364 bundle and its application.
1368 bundle and its application.
1365
1369
1366 If the push was raced as PushRaced exception is raised."""
1370 If the push was raced as PushRaced exception is raised."""
1367 r = 0
1371 r = 0
1368 # need a transaction when processing a bundle2 stream
1372 # need a transaction when processing a bundle2 stream
1369 wlock = lock = tr = None
1373 wlock = lock = tr = None
1370 recordout = None
1374 recordout = None
1371 # quick fix for output mismatch with bundle2 in 3.4
1375 # quick fix for output mismatch with bundle2 in 3.4
1372 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1376 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1373 False)
1377 False)
1374 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1378 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1375 captureoutput = True
1379 captureoutput = True
1376 try:
1380 try:
1377 check_heads(repo, heads, 'uploading changes')
1381 check_heads(repo, heads, 'uploading changes')
1378 # push can proceed
1382 # push can proceed
1379 if util.safehasattr(cg, 'params'):
1383 if util.safehasattr(cg, 'params'):
1380 r = None
1384 r = None
1381 try:
1385 try:
1382 wlock = repo.wlock()
1386 wlock = repo.wlock()
1383 lock = repo.lock()
1387 lock = repo.lock()
1384 tr = repo.transaction(source)
1388 tr = repo.transaction(source)
1385 tr.hookargs['source'] = source
1389 tr.hookargs['source'] = source
1386 tr.hookargs['url'] = url
1390 tr.hookargs['url'] = url
1387 tr.hookargs['bundle2'] = '1'
1391 tr.hookargs['bundle2'] = '1'
1388 op = bundle2.bundleoperation(repo, lambda: tr,
1392 op = bundle2.bundleoperation(repo, lambda: tr,
1389 captureoutput=captureoutput)
1393 captureoutput=captureoutput)
1390 try:
1394 try:
1391 r = bundle2.processbundle(repo, cg, op=op)
1395 r = bundle2.processbundle(repo, cg, op=op)
1392 finally:
1396 finally:
1393 r = op.reply
1397 r = op.reply
1394 if captureoutput and r is not None:
1398 if captureoutput and r is not None:
1395 repo.ui.pushbuffer(error=True, subproc=True)
1399 repo.ui.pushbuffer(error=True, subproc=True)
1396 def recordout(output):
1400 def recordout(output):
1397 r.newpart('output', data=output, mandatory=False)
1401 r.newpart('output', data=output, mandatory=False)
1398 tr.close()
1402 tr.close()
1399 except BaseException, exc:
1403 except BaseException, exc:
1400 exc.duringunbundle2 = True
1404 exc.duringunbundle2 = True
1401 if captureoutput and r is not None:
1405 if captureoutput and r is not None:
1402 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1406 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1403 def recordout(output):
1407 def recordout(output):
1404 part = bundle2.bundlepart('output', data=output,
1408 part = bundle2.bundlepart('output', data=output,
1405 mandatory=False)
1409 mandatory=False)
1406 parts.append(part)
1410 parts.append(part)
1407 raise
1411 raise
1408 else:
1412 else:
1409 lock = repo.lock()
1413 lock = repo.lock()
1410 r = changegroup.addchangegroup(repo, cg, source, url)
1414 r = changegroup.addchangegroup(repo, cg, source, url)
1411 finally:
1415 finally:
1412 lockmod.release(tr, lock, wlock)
1416 lockmod.release(tr, lock, wlock)
1413 if recordout is not None:
1417 if recordout is not None:
1414 recordout(repo.ui.popbuffer())
1418 recordout(repo.ui.popbuffer())
1415 return r
1419 return r
1416
1420
1417 # This is it's own function so extensions can override it.
1421 # This is it's own function so extensions can override it.
1418 def _walkstreamfiles(repo):
1422 def _walkstreamfiles(repo):
1419 return repo.store.walk()
1423 return repo.store.walk()
1420
1424
1421 def generatestreamclone(repo):
1425 def generatestreamclone(repo):
1422 """Emit content for a streaming clone.
1426 """Emit content for a streaming clone.
1423
1427
1424 This is a generator of raw chunks that constitute a streaming clone.
1428 This is a generator of raw chunks that constitute a streaming clone.
1425
1429
1426 The stream begins with a line of 2 space-delimited integers containing the
1430 The stream begins with a line of 2 space-delimited integers containing the
1427 number of entries and total bytes size.
1431 number of entries and total bytes size.
1428
1432
1429 Next, are N entries for each file being transferred. Each file entry starts
1433 Next, are N entries for each file being transferred. Each file entry starts
1430 as a line with the file name and integer size delimited by a null byte.
1434 as a line with the file name and integer size delimited by a null byte.
1431 The raw file data follows. Following the raw file data is the next file
1435 The raw file data follows. Following the raw file data is the next file
1432 entry, or EOF.
1436 entry, or EOF.
1433
1437
1434 When used on the wire protocol, an additional line indicating protocol
1438 When used on the wire protocol, an additional line indicating protocol
1435 success will be prepended to the stream. This function is not responsible
1439 success will be prepended to the stream. This function is not responsible
1436 for adding it.
1440 for adding it.
1437
1441
1438 This function will obtain a repository lock to ensure a consistent view of
1442 This function will obtain a repository lock to ensure a consistent view of
1439 the store is captured. It therefore may raise LockError.
1443 the store is captured. It therefore may raise LockError.
1440 """
1444 """
1441 entries = []
1445 entries = []
1442 total_bytes = 0
1446 total_bytes = 0
1443 # Get consistent snapshot of repo, lock during scan.
1447 # Get consistent snapshot of repo, lock during scan.
1444 lock = repo.lock()
1448 lock = repo.lock()
1445 try:
1449 try:
1446 repo.ui.debug('scanning\n')
1450 repo.ui.debug('scanning\n')
1447 for name, ename, size in _walkstreamfiles(repo):
1451 for name, ename, size in _walkstreamfiles(repo):
1448 if size:
1452 if size:
1449 entries.append((name, size))
1453 entries.append((name, size))
1450 total_bytes += size
1454 total_bytes += size
1451 finally:
1455 finally:
1452 lock.release()
1456 lock.release()
1453
1457
1454 repo.ui.debug('%d files, %d bytes to transfer\n' %
1458 repo.ui.debug('%d files, %d bytes to transfer\n' %
1455 (len(entries), total_bytes))
1459 (len(entries), total_bytes))
1456 yield '%d %d\n' % (len(entries), total_bytes)
1460 yield '%d %d\n' % (len(entries), total_bytes)
1457
1461
1458 sopener = repo.svfs
1462 sopener = repo.svfs
1459 oldaudit = sopener.mustaudit
1463 oldaudit = sopener.mustaudit
1460 debugflag = repo.ui.debugflag
1464 debugflag = repo.ui.debugflag
1461 sopener.mustaudit = False
1465 sopener.mustaudit = False
1462
1466
1463 try:
1467 try:
1464 for name, size in entries:
1468 for name, size in entries:
1465 if debugflag:
1469 if debugflag:
1466 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
1470 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
1467 # partially encode name over the wire for backwards compat
1471 # partially encode name over the wire for backwards compat
1468 yield '%s\0%d\n' % (store.encodedir(name), size)
1472 yield '%s\0%d\n' % (store.encodedir(name), size)
1469 if size <= 65536:
1473 if size <= 65536:
1470 fp = sopener(name)
1474 fp = sopener(name)
1471 try:
1475 try:
1472 data = fp.read(size)
1476 data = fp.read(size)
1473 finally:
1477 finally:
1474 fp.close()
1478 fp.close()
1475 yield data
1479 yield data
1476 else:
1480 else:
1477 for chunk in util.filechunkiter(sopener(name), limit=size):
1481 for chunk in util.filechunkiter(sopener(name), limit=size):
1478 yield chunk
1482 yield chunk
1479 finally:
1483 finally:
1480 sopener.mustaudit = oldaudit
1484 sopener.mustaudit = oldaudit
1481
1485
1482 def consumestreamclone(repo, fp):
1486 def consumestreamclone(repo, fp):
1483 """Apply the contents from a streaming clone file.
1487 """Apply the contents from a streaming clone file.
1484
1488
1485 This takes the output from "streamout" and applies it to the specified
1489 This takes the output from "streamout" and applies it to the specified
1486 repository.
1490 repository.
1487
1491
1488 Like "streamout," the status line added by the wire protocol is not handled
1492 Like "streamout," the status line added by the wire protocol is not handled
1489 by this function.
1493 by this function.
1490 """
1494 """
1491 lock = repo.lock()
1495 lock = repo.lock()
1492 try:
1496 try:
1493 repo.ui.status(_('streaming all changes\n'))
1497 repo.ui.status(_('streaming all changes\n'))
1494 l = fp.readline()
1498 l = fp.readline()
1495 try:
1499 try:
1496 total_files, total_bytes = map(int, l.split(' ', 1))
1500 total_files, total_bytes = map(int, l.split(' ', 1))
1497 except (ValueError, TypeError):
1501 except (ValueError, TypeError):
1498 raise error.ResponseError(
1502 raise error.ResponseError(
1499 _('unexpected response from remote server:'), l)
1503 _('unexpected response from remote server:'), l)
1500 repo.ui.status(_('%d files to transfer, %s of data\n') %
1504 repo.ui.status(_('%d files to transfer, %s of data\n') %
1501 (total_files, util.bytecount(total_bytes)))
1505 (total_files, util.bytecount(total_bytes)))
1502 handled_bytes = 0
1506 handled_bytes = 0
1503 repo.ui.progress(_('clone'), 0, total=total_bytes)
1507 repo.ui.progress(_('clone'), 0, total=total_bytes)
1504 start = time.time()
1508 start = time.time()
1505
1509
1506 tr = repo.transaction(_('clone'))
1510 tr = repo.transaction(_('clone'))
1507 try:
1511 try:
1508 for i in xrange(total_files):
1512 for i in xrange(total_files):
1509 # XXX doesn't support '\n' or '\r' in filenames
1513 # XXX doesn't support '\n' or '\r' in filenames
1510 l = fp.readline()
1514 l = fp.readline()
1511 try:
1515 try:
1512 name, size = l.split('\0', 1)
1516 name, size = l.split('\0', 1)
1513 size = int(size)
1517 size = int(size)
1514 except (ValueError, TypeError):
1518 except (ValueError, TypeError):
1515 raise error.ResponseError(
1519 raise error.ResponseError(
1516 _('unexpected response from remote server:'), l)
1520 _('unexpected response from remote server:'), l)
1517 if repo.ui.debugflag:
1521 if repo.ui.debugflag:
1518 repo.ui.debug('adding %s (%s)\n' %
1522 repo.ui.debug('adding %s (%s)\n' %
1519 (name, util.bytecount(size)))
1523 (name, util.bytecount(size)))
1520 # for backwards compat, name was partially encoded
1524 # for backwards compat, name was partially encoded
1521 ofp = repo.svfs(store.decodedir(name), 'w')
1525 ofp = repo.svfs(store.decodedir(name), 'w')
1522 for chunk in util.filechunkiter(fp, limit=size):
1526 for chunk in util.filechunkiter(fp, limit=size):
1523 handled_bytes += len(chunk)
1527 handled_bytes += len(chunk)
1524 repo.ui.progress(_('clone'), handled_bytes,
1528 repo.ui.progress(_('clone'), handled_bytes,
1525 total=total_bytes)
1529 total=total_bytes)
1526 ofp.write(chunk)
1530 ofp.write(chunk)
1527 ofp.close()
1531 ofp.close()
1528 tr.close()
1532 tr.close()
1529 finally:
1533 finally:
1530 tr.release()
1534 tr.release()
1531
1535
1532 # Writing straight to files circumvented the inmemory caches
1536 # Writing straight to files circumvented the inmemory caches
1533 repo.invalidate()
1537 repo.invalidate()
1534
1538
1535 elapsed = time.time() - start
1539 elapsed = time.time() - start
1536 if elapsed <= 0:
1540 if elapsed <= 0:
1537 elapsed = 0.001
1541 elapsed = 0.001
1538 repo.ui.progress(_('clone'), None)
1542 repo.ui.progress(_('clone'), None)
1539 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1543 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1540 (util.bytecount(total_bytes), elapsed,
1544 (util.bytecount(total_bytes), elapsed,
1541 util.bytecount(total_bytes / elapsed)))
1545 util.bytecount(total_bytes / elapsed)))
1542 finally:
1546 finally:
1543 lock.release()
1547 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now