##// END OF EJS Templates
pull: skip pulling remote bookmarks with bundle1 if a value already exist...
Pierre-Yves David -
r25443:443d3dec default
parent child Browse files
Show More
@@ -1,1530 +1,1532 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import time
8 import time
9 from i18n import _
9 from i18n import _
10 from node import hex, nullid
10 from node import hex, nullid
11 import errno, urllib
11 import errno, urllib
12 import util, scmutil, changegroup, base85, error, store
12 import util, scmutil, changegroup, base85, error, store
13 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
14 import lock as lockmod
14 import lock as lockmod
15 import tags
15 import tags
16
16
17 def readbundle(ui, fh, fname, vfs=None):
17 def readbundle(ui, fh, fname, vfs=None):
18 header = changegroup.readexactly(fh, 4)
18 header = changegroup.readexactly(fh, 4)
19
19
20 alg = None
20 alg = None
21 if not fname:
21 if not fname:
22 fname = "stream"
22 fname = "stream"
23 if not header.startswith('HG') and header.startswith('\0'):
23 if not header.startswith('HG') and header.startswith('\0'):
24 fh = changegroup.headerlessfixup(fh, header)
24 fh = changegroup.headerlessfixup(fh, header)
25 header = "HG10"
25 header = "HG10"
26 alg = 'UN'
26 alg = 'UN'
27 elif vfs:
27 elif vfs:
28 fname = vfs.join(fname)
28 fname = vfs.join(fname)
29
29
30 magic, version = header[0:2], header[2:4]
30 magic, version = header[0:2], header[2:4]
31
31
32 if magic != 'HG':
32 if magic != 'HG':
33 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
33 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
34 if version == '10':
34 if version == '10':
35 if alg is None:
35 if alg is None:
36 alg = changegroup.readexactly(fh, 2)
36 alg = changegroup.readexactly(fh, 2)
37 return changegroup.cg1unpacker(fh, alg)
37 return changegroup.cg1unpacker(fh, alg)
38 elif version.startswith('2'):
38 elif version.startswith('2'):
39 return bundle2.getunbundler(ui, fh, header=magic + version)
39 return bundle2.getunbundler(ui, fh, header=magic + version)
40 else:
40 else:
41 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
41 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
42
42
43 def buildobsmarkerspart(bundler, markers):
43 def buildobsmarkerspart(bundler, markers):
44 """add an obsmarker part to the bundler with <markers>
44 """add an obsmarker part to the bundler with <markers>
45
45
46 No part is created if markers is empty.
46 No part is created if markers is empty.
47 Raises ValueError if the bundler doesn't support any known obsmarker format.
47 Raises ValueError if the bundler doesn't support any known obsmarker format.
48 """
48 """
49 if markers:
49 if markers:
50 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
50 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
51 version = obsolete.commonversion(remoteversions)
51 version = obsolete.commonversion(remoteversions)
52 if version is None:
52 if version is None:
53 raise ValueError('bundler do not support common obsmarker format')
53 raise ValueError('bundler do not support common obsmarker format')
54 stream = obsolete.encodemarkers(markers, True, version=version)
54 stream = obsolete.encodemarkers(markers, True, version=version)
55 return bundler.newpart('obsmarkers', data=stream)
55 return bundler.newpart('obsmarkers', data=stream)
56 return None
56 return None
57
57
58 def _canusebundle2(op):
58 def _canusebundle2(op):
59 """return true if a pull/push can use bundle2
59 """return true if a pull/push can use bundle2
60
60
61 Feel free to nuke this function when we drop the experimental option"""
61 Feel free to nuke this function when we drop the experimental option"""
62 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
62 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
63 and op.remote.capable('bundle2'))
63 and op.remote.capable('bundle2'))
64
64
65
65
66 class pushoperation(object):
66 class pushoperation(object):
67 """A object that represent a single push operation
67 """A object that represent a single push operation
68
68
69 It purpose is to carry push related state and very common operation.
69 It purpose is to carry push related state and very common operation.
70
70
71 A new should be created at the beginning of each push and discarded
71 A new should be created at the beginning of each push and discarded
72 afterward.
72 afterward.
73 """
73 """
74
74
75 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
75 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
76 bookmarks=()):
76 bookmarks=()):
77 # repo we push from
77 # repo we push from
78 self.repo = repo
78 self.repo = repo
79 self.ui = repo.ui
79 self.ui = repo.ui
80 # repo we push to
80 # repo we push to
81 self.remote = remote
81 self.remote = remote
82 # force option provided
82 # force option provided
83 self.force = force
83 self.force = force
84 # revs to be pushed (None is "all")
84 # revs to be pushed (None is "all")
85 self.revs = revs
85 self.revs = revs
86 # bookmark explicitly pushed
86 # bookmark explicitly pushed
87 self.bookmarks = bookmarks
87 self.bookmarks = bookmarks
88 # allow push of new branch
88 # allow push of new branch
89 self.newbranch = newbranch
89 self.newbranch = newbranch
90 # did a local lock get acquired?
90 # did a local lock get acquired?
91 self.locallocked = None
91 self.locallocked = None
92 # step already performed
92 # step already performed
93 # (used to check what steps have been already performed through bundle2)
93 # (used to check what steps have been already performed through bundle2)
94 self.stepsdone = set()
94 self.stepsdone = set()
95 # Integer version of the changegroup push result
95 # Integer version of the changegroup push result
96 # - None means nothing to push
96 # - None means nothing to push
97 # - 0 means HTTP error
97 # - 0 means HTTP error
98 # - 1 means we pushed and remote head count is unchanged *or*
98 # - 1 means we pushed and remote head count is unchanged *or*
99 # we have outgoing changesets but refused to push
99 # we have outgoing changesets but refused to push
100 # - other values as described by addchangegroup()
100 # - other values as described by addchangegroup()
101 self.cgresult = None
101 self.cgresult = None
102 # Boolean value for the bookmark push
102 # Boolean value for the bookmark push
103 self.bkresult = None
103 self.bkresult = None
104 # discover.outgoing object (contains common and outgoing data)
104 # discover.outgoing object (contains common and outgoing data)
105 self.outgoing = None
105 self.outgoing = None
106 # all remote heads before the push
106 # all remote heads before the push
107 self.remoteheads = None
107 self.remoteheads = None
108 # testable as a boolean indicating if any nodes are missing locally.
108 # testable as a boolean indicating if any nodes are missing locally.
109 self.incoming = None
109 self.incoming = None
110 # phases changes that must be pushed along side the changesets
110 # phases changes that must be pushed along side the changesets
111 self.outdatedphases = None
111 self.outdatedphases = None
112 # phases changes that must be pushed if changeset push fails
112 # phases changes that must be pushed if changeset push fails
113 self.fallbackoutdatedphases = None
113 self.fallbackoutdatedphases = None
114 # outgoing obsmarkers
114 # outgoing obsmarkers
115 self.outobsmarkers = set()
115 self.outobsmarkers = set()
116 # outgoing bookmarks
116 # outgoing bookmarks
117 self.outbookmarks = []
117 self.outbookmarks = []
118 # transaction manager
118 # transaction manager
119 self.trmanager = None
119 self.trmanager = None
120
120
121 @util.propertycache
121 @util.propertycache
122 def futureheads(self):
122 def futureheads(self):
123 """future remote heads if the changeset push succeeds"""
123 """future remote heads if the changeset push succeeds"""
124 return self.outgoing.missingheads
124 return self.outgoing.missingheads
125
125
126 @util.propertycache
126 @util.propertycache
127 def fallbackheads(self):
127 def fallbackheads(self):
128 """future remote heads if the changeset push fails"""
128 """future remote heads if the changeset push fails"""
129 if self.revs is None:
129 if self.revs is None:
130 # not target to push, all common are relevant
130 # not target to push, all common are relevant
131 return self.outgoing.commonheads
131 return self.outgoing.commonheads
132 unfi = self.repo.unfiltered()
132 unfi = self.repo.unfiltered()
133 # I want cheads = heads(::missingheads and ::commonheads)
133 # I want cheads = heads(::missingheads and ::commonheads)
134 # (missingheads is revs with secret changeset filtered out)
134 # (missingheads is revs with secret changeset filtered out)
135 #
135 #
136 # This can be expressed as:
136 # This can be expressed as:
137 # cheads = ( (missingheads and ::commonheads)
137 # cheads = ( (missingheads and ::commonheads)
138 # + (commonheads and ::missingheads))"
138 # + (commonheads and ::missingheads))"
139 # )
139 # )
140 #
140 #
141 # while trying to push we already computed the following:
141 # while trying to push we already computed the following:
142 # common = (::commonheads)
142 # common = (::commonheads)
143 # missing = ((commonheads::missingheads) - commonheads)
143 # missing = ((commonheads::missingheads) - commonheads)
144 #
144 #
145 # We can pick:
145 # We can pick:
146 # * missingheads part of common (::commonheads)
146 # * missingheads part of common (::commonheads)
147 common = set(self.outgoing.common)
147 common = set(self.outgoing.common)
148 nm = self.repo.changelog.nodemap
148 nm = self.repo.changelog.nodemap
149 cheads = [node for node in self.revs if nm[node] in common]
149 cheads = [node for node in self.revs if nm[node] in common]
150 # and
150 # and
151 # * commonheads parents on missing
151 # * commonheads parents on missing
152 revset = unfi.set('%ln and parents(roots(%ln))',
152 revset = unfi.set('%ln and parents(roots(%ln))',
153 self.outgoing.commonheads,
153 self.outgoing.commonheads,
154 self.outgoing.missing)
154 self.outgoing.missing)
155 cheads.extend(c.node() for c in revset)
155 cheads.extend(c.node() for c in revset)
156 return cheads
156 return cheads
157
157
158 @property
158 @property
159 def commonheads(self):
159 def commonheads(self):
160 """set of all common heads after changeset bundle push"""
160 """set of all common heads after changeset bundle push"""
161 if self.cgresult:
161 if self.cgresult:
162 return self.futureheads
162 return self.futureheads
163 else:
163 else:
164 return self.fallbackheads
164 return self.fallbackheads
165
165
166 # mapping of message used when pushing bookmark
166 # mapping of message used when pushing bookmark
167 bookmsgmap = {'update': (_("updating bookmark %s\n"),
167 bookmsgmap = {'update': (_("updating bookmark %s\n"),
168 _('updating bookmark %s failed!\n')),
168 _('updating bookmark %s failed!\n')),
169 'export': (_("exporting bookmark %s\n"),
169 'export': (_("exporting bookmark %s\n"),
170 _('exporting bookmark %s failed!\n')),
170 _('exporting bookmark %s failed!\n')),
171 'delete': (_("deleting remote bookmark %s\n"),
171 'delete': (_("deleting remote bookmark %s\n"),
172 _('deleting remote bookmark %s failed!\n')),
172 _('deleting remote bookmark %s failed!\n')),
173 }
173 }
174
174
175
175
176 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
176 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
177 '''Push outgoing changesets (limited by revs) from a local
177 '''Push outgoing changesets (limited by revs) from a local
178 repository to remote. Return an integer:
178 repository to remote. Return an integer:
179 - None means nothing to push
179 - None means nothing to push
180 - 0 means HTTP error
180 - 0 means HTTP error
181 - 1 means we pushed and remote head count is unchanged *or*
181 - 1 means we pushed and remote head count is unchanged *or*
182 we have outgoing changesets but refused to push
182 we have outgoing changesets but refused to push
183 - other values as described by addchangegroup()
183 - other values as described by addchangegroup()
184 '''
184 '''
185 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
185 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
186 if pushop.remote.local():
186 if pushop.remote.local():
187 missing = (set(pushop.repo.requirements)
187 missing = (set(pushop.repo.requirements)
188 - pushop.remote.local().supported)
188 - pushop.remote.local().supported)
189 if missing:
189 if missing:
190 msg = _("required features are not"
190 msg = _("required features are not"
191 " supported in the destination:"
191 " supported in the destination:"
192 " %s") % (', '.join(sorted(missing)))
192 " %s") % (', '.join(sorted(missing)))
193 raise util.Abort(msg)
193 raise util.Abort(msg)
194
194
195 # there are two ways to push to remote repo:
195 # there are two ways to push to remote repo:
196 #
196 #
197 # addchangegroup assumes local user can lock remote
197 # addchangegroup assumes local user can lock remote
198 # repo (local filesystem, old ssh servers).
198 # repo (local filesystem, old ssh servers).
199 #
199 #
200 # unbundle assumes local user cannot lock remote repo (new ssh
200 # unbundle assumes local user cannot lock remote repo (new ssh
201 # servers, http servers).
201 # servers, http servers).
202
202
203 if not pushop.remote.canpush():
203 if not pushop.remote.canpush():
204 raise util.Abort(_("destination does not support push"))
204 raise util.Abort(_("destination does not support push"))
205 # get local lock as we might write phase data
205 # get local lock as we might write phase data
206 localwlock = locallock = None
206 localwlock = locallock = None
207 try:
207 try:
208 # bundle2 push may receive a reply bundle touching bookmarks or other
208 # bundle2 push may receive a reply bundle touching bookmarks or other
209 # things requiring the wlock. Take it now to ensure proper ordering.
209 # things requiring the wlock. Take it now to ensure proper ordering.
210 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
210 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
211 if _canusebundle2(pushop) and maypushback:
211 if _canusebundle2(pushop) and maypushback:
212 localwlock = pushop.repo.wlock()
212 localwlock = pushop.repo.wlock()
213 locallock = pushop.repo.lock()
213 locallock = pushop.repo.lock()
214 pushop.locallocked = True
214 pushop.locallocked = True
215 except IOError, err:
215 except IOError, err:
216 pushop.locallocked = False
216 pushop.locallocked = False
217 if err.errno != errno.EACCES:
217 if err.errno != errno.EACCES:
218 raise
218 raise
219 # source repo cannot be locked.
219 # source repo cannot be locked.
220 # We do not abort the push, but just disable the local phase
220 # We do not abort the push, but just disable the local phase
221 # synchronisation.
221 # synchronisation.
222 msg = 'cannot lock source repository: %s\n' % err
222 msg = 'cannot lock source repository: %s\n' % err
223 pushop.ui.debug(msg)
223 pushop.ui.debug(msg)
224 try:
224 try:
225 if pushop.locallocked:
225 if pushop.locallocked:
226 pushop.trmanager = transactionmanager(repo,
226 pushop.trmanager = transactionmanager(repo,
227 'push-response',
227 'push-response',
228 pushop.remote.url())
228 pushop.remote.url())
229 pushop.repo.checkpush(pushop)
229 pushop.repo.checkpush(pushop)
230 lock = None
230 lock = None
231 unbundle = pushop.remote.capable('unbundle')
231 unbundle = pushop.remote.capable('unbundle')
232 if not unbundle:
232 if not unbundle:
233 lock = pushop.remote.lock()
233 lock = pushop.remote.lock()
234 try:
234 try:
235 _pushdiscovery(pushop)
235 _pushdiscovery(pushop)
236 if _canusebundle2(pushop):
236 if _canusebundle2(pushop):
237 _pushbundle2(pushop)
237 _pushbundle2(pushop)
238 _pushchangeset(pushop)
238 _pushchangeset(pushop)
239 _pushsyncphase(pushop)
239 _pushsyncphase(pushop)
240 _pushobsolete(pushop)
240 _pushobsolete(pushop)
241 _pushbookmark(pushop)
241 _pushbookmark(pushop)
242 finally:
242 finally:
243 if lock is not None:
243 if lock is not None:
244 lock.release()
244 lock.release()
245 if pushop.trmanager:
245 if pushop.trmanager:
246 pushop.trmanager.close()
246 pushop.trmanager.close()
247 finally:
247 finally:
248 if pushop.trmanager:
248 if pushop.trmanager:
249 pushop.trmanager.release()
249 pushop.trmanager.release()
250 if locallock is not None:
250 if locallock is not None:
251 locallock.release()
251 locallock.release()
252 if localwlock is not None:
252 if localwlock is not None:
253 localwlock.release()
253 localwlock.release()
254
254
255 return pushop
255 return pushop
256
256
257 # list of steps to perform discovery before push
257 # list of steps to perform discovery before push
258 pushdiscoveryorder = []
258 pushdiscoveryorder = []
259
259
260 # Mapping between step name and function
260 # Mapping between step name and function
261 #
261 #
262 # This exists to help extensions wrap steps if necessary
262 # This exists to help extensions wrap steps if necessary
263 pushdiscoverymapping = {}
263 pushdiscoverymapping = {}
264
264
265 def pushdiscovery(stepname):
265 def pushdiscovery(stepname):
266 """decorator for function performing discovery before push
266 """decorator for function performing discovery before push
267
267
268 The function is added to the step -> function mapping and appended to the
268 The function is added to the step -> function mapping and appended to the
269 list of steps. Beware that decorated function will be added in order (this
269 list of steps. Beware that decorated function will be added in order (this
270 may matter).
270 may matter).
271
271
272 You can only use this decorator for a new step, if you want to wrap a step
272 You can only use this decorator for a new step, if you want to wrap a step
273 from an extension, change the pushdiscovery dictionary directly."""
273 from an extension, change the pushdiscovery dictionary directly."""
274 def dec(func):
274 def dec(func):
275 assert stepname not in pushdiscoverymapping
275 assert stepname not in pushdiscoverymapping
276 pushdiscoverymapping[stepname] = func
276 pushdiscoverymapping[stepname] = func
277 pushdiscoveryorder.append(stepname)
277 pushdiscoveryorder.append(stepname)
278 return func
278 return func
279 return dec
279 return dec
280
280
281 def _pushdiscovery(pushop):
281 def _pushdiscovery(pushop):
282 """Run all discovery steps"""
282 """Run all discovery steps"""
283 for stepname in pushdiscoveryorder:
283 for stepname in pushdiscoveryorder:
284 step = pushdiscoverymapping[stepname]
284 step = pushdiscoverymapping[stepname]
285 step(pushop)
285 step(pushop)
286
286
287 @pushdiscovery('changeset')
287 @pushdiscovery('changeset')
288 def _pushdiscoverychangeset(pushop):
288 def _pushdiscoverychangeset(pushop):
289 """discover the changeset that need to be pushed"""
289 """discover the changeset that need to be pushed"""
290 fci = discovery.findcommonincoming
290 fci = discovery.findcommonincoming
291 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
291 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
292 common, inc, remoteheads = commoninc
292 common, inc, remoteheads = commoninc
293 fco = discovery.findcommonoutgoing
293 fco = discovery.findcommonoutgoing
294 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
294 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
295 commoninc=commoninc, force=pushop.force)
295 commoninc=commoninc, force=pushop.force)
296 pushop.outgoing = outgoing
296 pushop.outgoing = outgoing
297 pushop.remoteheads = remoteheads
297 pushop.remoteheads = remoteheads
298 pushop.incoming = inc
298 pushop.incoming = inc
299
299
300 @pushdiscovery('phase')
300 @pushdiscovery('phase')
301 def _pushdiscoveryphase(pushop):
301 def _pushdiscoveryphase(pushop):
302 """discover the phase that needs to be pushed
302 """discover the phase that needs to be pushed
303
303
304 (computed for both success and failure case for changesets push)"""
304 (computed for both success and failure case for changesets push)"""
305 outgoing = pushop.outgoing
305 outgoing = pushop.outgoing
306 unfi = pushop.repo.unfiltered()
306 unfi = pushop.repo.unfiltered()
307 remotephases = pushop.remote.listkeys('phases')
307 remotephases = pushop.remote.listkeys('phases')
308 publishing = remotephases.get('publishing', False)
308 publishing = remotephases.get('publishing', False)
309 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
309 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
310 and remotephases # server supports phases
310 and remotephases # server supports phases
311 and not pushop.outgoing.missing # no changesets to be pushed
311 and not pushop.outgoing.missing # no changesets to be pushed
312 and publishing):
312 and publishing):
313 # When:
313 # When:
314 # - this is a subrepo push
314 # - this is a subrepo push
315 # - and remote support phase
315 # - and remote support phase
316 # - and no changeset are to be pushed
316 # - and no changeset are to be pushed
317 # - and remote is publishing
317 # - and remote is publishing
318 # We may be in issue 3871 case!
318 # We may be in issue 3871 case!
319 # We drop the possible phase synchronisation done by
319 # We drop the possible phase synchronisation done by
320 # courtesy to publish changesets possibly locally draft
320 # courtesy to publish changesets possibly locally draft
321 # on the remote.
321 # on the remote.
322 remotephases = {'publishing': 'True'}
322 remotephases = {'publishing': 'True'}
323 ana = phases.analyzeremotephases(pushop.repo,
323 ana = phases.analyzeremotephases(pushop.repo,
324 pushop.fallbackheads,
324 pushop.fallbackheads,
325 remotephases)
325 remotephases)
326 pheads, droots = ana
326 pheads, droots = ana
327 extracond = ''
327 extracond = ''
328 if not publishing:
328 if not publishing:
329 extracond = ' and public()'
329 extracond = ' and public()'
330 revset = 'heads((%%ln::%%ln) %s)' % extracond
330 revset = 'heads((%%ln::%%ln) %s)' % extracond
331 # Get the list of all revs draft on remote by public here.
331 # Get the list of all revs draft on remote by public here.
332 # XXX Beware that revset break if droots is not strictly
332 # XXX Beware that revset break if droots is not strictly
333 # XXX root we may want to ensure it is but it is costly
333 # XXX root we may want to ensure it is but it is costly
334 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
334 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
335 if not outgoing.missing:
335 if not outgoing.missing:
336 future = fallback
336 future = fallback
337 else:
337 else:
338 # adds changeset we are going to push as draft
338 # adds changeset we are going to push as draft
339 #
339 #
340 # should not be necessary for publishing server, but because of an
340 # should not be necessary for publishing server, but because of an
341 # issue fixed in xxxxx we have to do it anyway.
341 # issue fixed in xxxxx we have to do it anyway.
342 fdroots = list(unfi.set('roots(%ln + %ln::)',
342 fdroots = list(unfi.set('roots(%ln + %ln::)',
343 outgoing.missing, droots))
343 outgoing.missing, droots))
344 fdroots = [f.node() for f in fdroots]
344 fdroots = [f.node() for f in fdroots]
345 future = list(unfi.set(revset, fdroots, pushop.futureheads))
345 future = list(unfi.set(revset, fdroots, pushop.futureheads))
346 pushop.outdatedphases = future
346 pushop.outdatedphases = future
347 pushop.fallbackoutdatedphases = fallback
347 pushop.fallbackoutdatedphases = fallback
348
348
349 @pushdiscovery('obsmarker')
349 @pushdiscovery('obsmarker')
350 def _pushdiscoveryobsmarkers(pushop):
350 def _pushdiscoveryobsmarkers(pushop):
351 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
351 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
352 and pushop.repo.obsstore
352 and pushop.repo.obsstore
353 and 'obsolete' in pushop.remote.listkeys('namespaces')):
353 and 'obsolete' in pushop.remote.listkeys('namespaces')):
354 repo = pushop.repo
354 repo = pushop.repo
355 # very naive computation, that can be quite expensive on big repo.
355 # very naive computation, that can be quite expensive on big repo.
356 # However: evolution is currently slow on them anyway.
356 # However: evolution is currently slow on them anyway.
357 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
357 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
358 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
358 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
359
359
360 @pushdiscovery('bookmarks')
360 @pushdiscovery('bookmarks')
361 def _pushdiscoverybookmarks(pushop):
361 def _pushdiscoverybookmarks(pushop):
362 ui = pushop.ui
362 ui = pushop.ui
363 repo = pushop.repo.unfiltered()
363 repo = pushop.repo.unfiltered()
364 remote = pushop.remote
364 remote = pushop.remote
365 ui.debug("checking for updated bookmarks\n")
365 ui.debug("checking for updated bookmarks\n")
366 ancestors = ()
366 ancestors = ()
367 if pushop.revs:
367 if pushop.revs:
368 revnums = map(repo.changelog.rev, pushop.revs)
368 revnums = map(repo.changelog.rev, pushop.revs)
369 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
369 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
370 remotebookmark = remote.listkeys('bookmarks')
370 remotebookmark = remote.listkeys('bookmarks')
371
371
372 explicit = set(pushop.bookmarks)
372 explicit = set(pushop.bookmarks)
373
373
374 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
374 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
375 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
375 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
376 for b, scid, dcid in advsrc:
376 for b, scid, dcid in advsrc:
377 if b in explicit:
377 if b in explicit:
378 explicit.remove(b)
378 explicit.remove(b)
379 if not ancestors or repo[scid].rev() in ancestors:
379 if not ancestors or repo[scid].rev() in ancestors:
380 pushop.outbookmarks.append((b, dcid, scid))
380 pushop.outbookmarks.append((b, dcid, scid))
381 # search added bookmark
381 # search added bookmark
382 for b, scid, dcid in addsrc:
382 for b, scid, dcid in addsrc:
383 if b in explicit:
383 if b in explicit:
384 explicit.remove(b)
384 explicit.remove(b)
385 pushop.outbookmarks.append((b, '', scid))
385 pushop.outbookmarks.append((b, '', scid))
386 # search for overwritten bookmark
386 # search for overwritten bookmark
387 for b, scid, dcid in advdst + diverge + differ:
387 for b, scid, dcid in advdst + diverge + differ:
388 if b in explicit:
388 if b in explicit:
389 explicit.remove(b)
389 explicit.remove(b)
390 pushop.outbookmarks.append((b, dcid, scid))
390 pushop.outbookmarks.append((b, dcid, scid))
391 # search for bookmark to delete
391 # search for bookmark to delete
392 for b, scid, dcid in adddst:
392 for b, scid, dcid in adddst:
393 if b in explicit:
393 if b in explicit:
394 explicit.remove(b)
394 explicit.remove(b)
395 # treat as "deleted locally"
395 # treat as "deleted locally"
396 pushop.outbookmarks.append((b, dcid, ''))
396 pushop.outbookmarks.append((b, dcid, ''))
397 # identical bookmarks shouldn't get reported
397 # identical bookmarks shouldn't get reported
398 for b, scid, dcid in same:
398 for b, scid, dcid in same:
399 if b in explicit:
399 if b in explicit:
400 explicit.remove(b)
400 explicit.remove(b)
401
401
402 if explicit:
402 if explicit:
403 explicit = sorted(explicit)
403 explicit = sorted(explicit)
404 # we should probably list all of them
404 # we should probably list all of them
405 ui.warn(_('bookmark %s does not exist on the local '
405 ui.warn(_('bookmark %s does not exist on the local '
406 'or remote repository!\n') % explicit[0])
406 'or remote repository!\n') % explicit[0])
407 pushop.bkresult = 2
407 pushop.bkresult = 2
408
408
409 pushop.outbookmarks.sort()
409 pushop.outbookmarks.sort()
410
410
411 def _pushcheckoutgoing(pushop):
411 def _pushcheckoutgoing(pushop):
412 outgoing = pushop.outgoing
412 outgoing = pushop.outgoing
413 unfi = pushop.repo.unfiltered()
413 unfi = pushop.repo.unfiltered()
414 if not outgoing.missing:
414 if not outgoing.missing:
415 # nothing to push
415 # nothing to push
416 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
416 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
417 return False
417 return False
418 # something to push
418 # something to push
419 if not pushop.force:
419 if not pushop.force:
420 # if repo.obsstore == False --> no obsolete
420 # if repo.obsstore == False --> no obsolete
421 # then, save the iteration
421 # then, save the iteration
422 if unfi.obsstore:
422 if unfi.obsstore:
423 # this message are here for 80 char limit reason
423 # this message are here for 80 char limit reason
424 mso = _("push includes obsolete changeset: %s!")
424 mso = _("push includes obsolete changeset: %s!")
425 mst = {"unstable": _("push includes unstable changeset: %s!"),
425 mst = {"unstable": _("push includes unstable changeset: %s!"),
426 "bumped": _("push includes bumped changeset: %s!"),
426 "bumped": _("push includes bumped changeset: %s!"),
427 "divergent": _("push includes divergent changeset: %s!")}
427 "divergent": _("push includes divergent changeset: %s!")}
428 # If we are to push if there is at least one
428 # If we are to push if there is at least one
429 # obsolete or unstable changeset in missing, at
429 # obsolete or unstable changeset in missing, at
430 # least one of the missinghead will be obsolete or
430 # least one of the missinghead will be obsolete or
431 # unstable. So checking heads only is ok
431 # unstable. So checking heads only is ok
432 for node in outgoing.missingheads:
432 for node in outgoing.missingheads:
433 ctx = unfi[node]
433 ctx = unfi[node]
434 if ctx.obsolete():
434 if ctx.obsolete():
435 raise util.Abort(mso % ctx)
435 raise util.Abort(mso % ctx)
436 elif ctx.troubled():
436 elif ctx.troubled():
437 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
437 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
438 newbm = pushop.ui.configlist('bookmarks', 'pushing')
438 newbm = pushop.ui.configlist('bookmarks', 'pushing')
439 discovery.checkheads(unfi, pushop.remote, outgoing,
439 discovery.checkheads(unfi, pushop.remote, outgoing,
440 pushop.remoteheads,
440 pushop.remoteheads,
441 pushop.newbranch,
441 pushop.newbranch,
442 bool(pushop.incoming),
442 bool(pushop.incoming),
443 newbm)
443 newbm)
444 return True
444 return True
445
445
446 # List of names of steps to perform for an outgoing bundle2, order matters.
446 # List of names of steps to perform for an outgoing bundle2, order matters.
447 b2partsgenorder = []
447 b2partsgenorder = []
448
448
449 # Mapping between step name and function
449 # Mapping between step name and function
450 #
450 #
451 # This exists to help extensions wrap steps if necessary
451 # This exists to help extensions wrap steps if necessary
452 b2partsgenmapping = {}
452 b2partsgenmapping = {}
453
453
454 def b2partsgenerator(stepname, idx=None):
454 def b2partsgenerator(stepname, idx=None):
455 """decorator for function generating bundle2 part
455 """decorator for function generating bundle2 part
456
456
457 The function is added to the step -> function mapping and appended to the
457 The function is added to the step -> function mapping and appended to the
458 list of steps. Beware that decorated functions will be added in order
458 list of steps. Beware that decorated functions will be added in order
459 (this may matter).
459 (this may matter).
460
460
461 You can only use this decorator for new steps, if you want to wrap a step
461 You can only use this decorator for new steps, if you want to wrap a step
462 from an extension, attack the b2partsgenmapping dictionary directly."""
462 from an extension, attack the b2partsgenmapping dictionary directly."""
463 def dec(func):
463 def dec(func):
464 assert stepname not in b2partsgenmapping
464 assert stepname not in b2partsgenmapping
465 b2partsgenmapping[stepname] = func
465 b2partsgenmapping[stepname] = func
466 if idx is None:
466 if idx is None:
467 b2partsgenorder.append(stepname)
467 b2partsgenorder.append(stepname)
468 else:
468 else:
469 b2partsgenorder.insert(idx, stepname)
469 b2partsgenorder.insert(idx, stepname)
470 return func
470 return func
471 return dec
471 return dec
472
472
473 @b2partsgenerator('changeset')
473 @b2partsgenerator('changeset')
474 def _pushb2ctx(pushop, bundler):
474 def _pushb2ctx(pushop, bundler):
475 """handle changegroup push through bundle2
475 """handle changegroup push through bundle2
476
476
477 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
477 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
478 """
478 """
479 if 'changesets' in pushop.stepsdone:
479 if 'changesets' in pushop.stepsdone:
480 return
480 return
481 pushop.stepsdone.add('changesets')
481 pushop.stepsdone.add('changesets')
482 # Send known heads to the server for race detection.
482 # Send known heads to the server for race detection.
483 if not _pushcheckoutgoing(pushop):
483 if not _pushcheckoutgoing(pushop):
484 return
484 return
485 pushop.repo.prepushoutgoinghooks(pushop.repo,
485 pushop.repo.prepushoutgoinghooks(pushop.repo,
486 pushop.remote,
486 pushop.remote,
487 pushop.outgoing)
487 pushop.outgoing)
488 if not pushop.force:
488 if not pushop.force:
489 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
489 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
490 b2caps = bundle2.bundle2caps(pushop.remote)
490 b2caps = bundle2.bundle2caps(pushop.remote)
491 version = None
491 version = None
492 cgversions = b2caps.get('changegroup')
492 cgversions = b2caps.get('changegroup')
493 if not cgversions: # 3.1 and 3.2 ship with an empty value
493 if not cgversions: # 3.1 and 3.2 ship with an empty value
494 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
494 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
495 pushop.outgoing)
495 pushop.outgoing)
496 else:
496 else:
497 cgversions = [v for v in cgversions if v in changegroup.packermap]
497 cgversions = [v for v in cgversions if v in changegroup.packermap]
498 if not cgversions:
498 if not cgversions:
499 raise ValueError(_('no common changegroup version'))
499 raise ValueError(_('no common changegroup version'))
500 version = max(cgversions)
500 version = max(cgversions)
501 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
501 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
502 pushop.outgoing,
502 pushop.outgoing,
503 version=version)
503 version=version)
504 cgpart = bundler.newpart('changegroup', data=cg)
504 cgpart = bundler.newpart('changegroup', data=cg)
505 if version is not None:
505 if version is not None:
506 cgpart.addparam('version', version)
506 cgpart.addparam('version', version)
507 def handlereply(op):
507 def handlereply(op):
508 """extract addchangegroup returns from server reply"""
508 """extract addchangegroup returns from server reply"""
509 cgreplies = op.records.getreplies(cgpart.id)
509 cgreplies = op.records.getreplies(cgpart.id)
510 assert len(cgreplies['changegroup']) == 1
510 assert len(cgreplies['changegroup']) == 1
511 pushop.cgresult = cgreplies['changegroup'][0]['return']
511 pushop.cgresult = cgreplies['changegroup'][0]['return']
512 return handlereply
512 return handlereply
513
513
514 @b2partsgenerator('phase')
514 @b2partsgenerator('phase')
515 def _pushb2phases(pushop, bundler):
515 def _pushb2phases(pushop, bundler):
516 """handle phase push through bundle2"""
516 """handle phase push through bundle2"""
517 if 'phases' in pushop.stepsdone:
517 if 'phases' in pushop.stepsdone:
518 return
518 return
519 b2caps = bundle2.bundle2caps(pushop.remote)
519 b2caps = bundle2.bundle2caps(pushop.remote)
520 if not 'pushkey' in b2caps:
520 if not 'pushkey' in b2caps:
521 return
521 return
522 pushop.stepsdone.add('phases')
522 pushop.stepsdone.add('phases')
523 part2node = []
523 part2node = []
524 enc = pushkey.encode
524 enc = pushkey.encode
525 for newremotehead in pushop.outdatedphases:
525 for newremotehead in pushop.outdatedphases:
526 part = bundler.newpart('pushkey')
526 part = bundler.newpart('pushkey')
527 part.addparam('namespace', enc('phases'))
527 part.addparam('namespace', enc('phases'))
528 part.addparam('key', enc(newremotehead.hex()))
528 part.addparam('key', enc(newremotehead.hex()))
529 part.addparam('old', enc(str(phases.draft)))
529 part.addparam('old', enc(str(phases.draft)))
530 part.addparam('new', enc(str(phases.public)))
530 part.addparam('new', enc(str(phases.public)))
531 part2node.append((part.id, newremotehead))
531 part2node.append((part.id, newremotehead))
532 def handlereply(op):
532 def handlereply(op):
533 for partid, node in part2node:
533 for partid, node in part2node:
534 partrep = op.records.getreplies(partid)
534 partrep = op.records.getreplies(partid)
535 results = partrep['pushkey']
535 results = partrep['pushkey']
536 assert len(results) <= 1
536 assert len(results) <= 1
537 msg = None
537 msg = None
538 if not results:
538 if not results:
539 msg = _('server ignored update of %s to public!\n') % node
539 msg = _('server ignored update of %s to public!\n') % node
540 elif not int(results[0]['return']):
540 elif not int(results[0]['return']):
541 msg = _('updating %s to public failed!\n') % node
541 msg = _('updating %s to public failed!\n') % node
542 if msg is not None:
542 if msg is not None:
543 pushop.ui.warn(msg)
543 pushop.ui.warn(msg)
544 return handlereply
544 return handlereply
545
545
546 @b2partsgenerator('obsmarkers')
546 @b2partsgenerator('obsmarkers')
547 def _pushb2obsmarkers(pushop, bundler):
547 def _pushb2obsmarkers(pushop, bundler):
548 if 'obsmarkers' in pushop.stepsdone:
548 if 'obsmarkers' in pushop.stepsdone:
549 return
549 return
550 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
550 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
551 if obsolete.commonversion(remoteversions) is None:
551 if obsolete.commonversion(remoteversions) is None:
552 return
552 return
553 pushop.stepsdone.add('obsmarkers')
553 pushop.stepsdone.add('obsmarkers')
554 if pushop.outobsmarkers:
554 if pushop.outobsmarkers:
555 markers = sorted(pushop.outobsmarkers)
555 markers = sorted(pushop.outobsmarkers)
556 buildobsmarkerspart(bundler, markers)
556 buildobsmarkerspart(bundler, markers)
557
557
558 @b2partsgenerator('bookmarks')
558 @b2partsgenerator('bookmarks')
559 def _pushb2bookmarks(pushop, bundler):
559 def _pushb2bookmarks(pushop, bundler):
560 """handle phase push through bundle2"""
560 """handle phase push through bundle2"""
561 if 'bookmarks' in pushop.stepsdone:
561 if 'bookmarks' in pushop.stepsdone:
562 return
562 return
563 b2caps = bundle2.bundle2caps(pushop.remote)
563 b2caps = bundle2.bundle2caps(pushop.remote)
564 if 'pushkey' not in b2caps:
564 if 'pushkey' not in b2caps:
565 return
565 return
566 pushop.stepsdone.add('bookmarks')
566 pushop.stepsdone.add('bookmarks')
567 part2book = []
567 part2book = []
568 enc = pushkey.encode
568 enc = pushkey.encode
569 for book, old, new in pushop.outbookmarks:
569 for book, old, new in pushop.outbookmarks:
570 part = bundler.newpart('pushkey')
570 part = bundler.newpart('pushkey')
571 part.addparam('namespace', enc('bookmarks'))
571 part.addparam('namespace', enc('bookmarks'))
572 part.addparam('key', enc(book))
572 part.addparam('key', enc(book))
573 part.addparam('old', enc(old))
573 part.addparam('old', enc(old))
574 part.addparam('new', enc(new))
574 part.addparam('new', enc(new))
575 action = 'update'
575 action = 'update'
576 if not old:
576 if not old:
577 action = 'export'
577 action = 'export'
578 elif not new:
578 elif not new:
579 action = 'delete'
579 action = 'delete'
580 part2book.append((part.id, book, action))
580 part2book.append((part.id, book, action))
581
581
582
582
583 def handlereply(op):
583 def handlereply(op):
584 ui = pushop.ui
584 ui = pushop.ui
585 for partid, book, action in part2book:
585 for partid, book, action in part2book:
586 partrep = op.records.getreplies(partid)
586 partrep = op.records.getreplies(partid)
587 results = partrep['pushkey']
587 results = partrep['pushkey']
588 assert len(results) <= 1
588 assert len(results) <= 1
589 if not results:
589 if not results:
590 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
590 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
591 else:
591 else:
592 ret = int(results[0]['return'])
592 ret = int(results[0]['return'])
593 if ret:
593 if ret:
594 ui.status(bookmsgmap[action][0] % book)
594 ui.status(bookmsgmap[action][0] % book)
595 else:
595 else:
596 ui.warn(bookmsgmap[action][1] % book)
596 ui.warn(bookmsgmap[action][1] % book)
597 if pushop.bkresult is not None:
597 if pushop.bkresult is not None:
598 pushop.bkresult = 1
598 pushop.bkresult = 1
599 return handlereply
599 return handlereply
600
600
601
601
602 def _pushbundle2(pushop):
602 def _pushbundle2(pushop):
603 """push data to the remote using bundle2
603 """push data to the remote using bundle2
604
604
605 The only currently supported type of data is changegroup but this will
605 The only currently supported type of data is changegroup but this will
606 evolve in the future."""
606 evolve in the future."""
607 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
607 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
608 pushback = (pushop.trmanager
608 pushback = (pushop.trmanager
609 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
609 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
610
610
611 # create reply capability
611 # create reply capability
612 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
612 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
613 allowpushback=pushback))
613 allowpushback=pushback))
614 bundler.newpart('replycaps', data=capsblob)
614 bundler.newpart('replycaps', data=capsblob)
615 replyhandlers = []
615 replyhandlers = []
616 for partgenname in b2partsgenorder:
616 for partgenname in b2partsgenorder:
617 partgen = b2partsgenmapping[partgenname]
617 partgen = b2partsgenmapping[partgenname]
618 ret = partgen(pushop, bundler)
618 ret = partgen(pushop, bundler)
619 if callable(ret):
619 if callable(ret):
620 replyhandlers.append(ret)
620 replyhandlers.append(ret)
621 # do not push if nothing to push
621 # do not push if nothing to push
622 if bundler.nbparts <= 1:
622 if bundler.nbparts <= 1:
623 return
623 return
624 stream = util.chunkbuffer(bundler.getchunks())
624 stream = util.chunkbuffer(bundler.getchunks())
625 try:
625 try:
626 reply = pushop.remote.unbundle(stream, ['force'], 'push')
626 reply = pushop.remote.unbundle(stream, ['force'], 'push')
627 except error.BundleValueError, exc:
627 except error.BundleValueError, exc:
628 raise util.Abort('missing support for %s' % exc)
628 raise util.Abort('missing support for %s' % exc)
629 try:
629 try:
630 trgetter = None
630 trgetter = None
631 if pushback:
631 if pushback:
632 trgetter = pushop.trmanager.transaction
632 trgetter = pushop.trmanager.transaction
633 op = bundle2.processbundle(pushop.repo, reply, trgetter)
633 op = bundle2.processbundle(pushop.repo, reply, trgetter)
634 except error.BundleValueError, exc:
634 except error.BundleValueError, exc:
635 raise util.Abort('missing support for %s' % exc)
635 raise util.Abort('missing support for %s' % exc)
636 for rephand in replyhandlers:
636 for rephand in replyhandlers:
637 rephand(op)
637 rephand(op)
638
638
639 def _pushchangeset(pushop):
639 def _pushchangeset(pushop):
640 """Make the actual push of changeset bundle to remote repo"""
640 """Make the actual push of changeset bundle to remote repo"""
641 if 'changesets' in pushop.stepsdone:
641 if 'changesets' in pushop.stepsdone:
642 return
642 return
643 pushop.stepsdone.add('changesets')
643 pushop.stepsdone.add('changesets')
644 if not _pushcheckoutgoing(pushop):
644 if not _pushcheckoutgoing(pushop):
645 return
645 return
646 pushop.repo.prepushoutgoinghooks(pushop.repo,
646 pushop.repo.prepushoutgoinghooks(pushop.repo,
647 pushop.remote,
647 pushop.remote,
648 pushop.outgoing)
648 pushop.outgoing)
649 outgoing = pushop.outgoing
649 outgoing = pushop.outgoing
650 unbundle = pushop.remote.capable('unbundle')
650 unbundle = pushop.remote.capable('unbundle')
651 # TODO: get bundlecaps from remote
651 # TODO: get bundlecaps from remote
652 bundlecaps = None
652 bundlecaps = None
653 # create a changegroup from local
653 # create a changegroup from local
654 if pushop.revs is None and not (outgoing.excluded
654 if pushop.revs is None and not (outgoing.excluded
655 or pushop.repo.changelog.filteredrevs):
655 or pushop.repo.changelog.filteredrevs):
656 # push everything,
656 # push everything,
657 # use the fast path, no race possible on push
657 # use the fast path, no race possible on push
658 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
658 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
659 cg = changegroup.getsubset(pushop.repo,
659 cg = changegroup.getsubset(pushop.repo,
660 outgoing,
660 outgoing,
661 bundler,
661 bundler,
662 'push',
662 'push',
663 fastpath=True)
663 fastpath=True)
664 else:
664 else:
665 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
665 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
666 bundlecaps)
666 bundlecaps)
667
667
668 # apply changegroup to remote
668 # apply changegroup to remote
669 if unbundle:
669 if unbundle:
670 # local repo finds heads on server, finds out what
670 # local repo finds heads on server, finds out what
671 # revs it must push. once revs transferred, if server
671 # revs it must push. once revs transferred, if server
672 # finds it has different heads (someone else won
672 # finds it has different heads (someone else won
673 # commit/push race), server aborts.
673 # commit/push race), server aborts.
674 if pushop.force:
674 if pushop.force:
675 remoteheads = ['force']
675 remoteheads = ['force']
676 else:
676 else:
677 remoteheads = pushop.remoteheads
677 remoteheads = pushop.remoteheads
678 # ssh: return remote's addchangegroup()
678 # ssh: return remote's addchangegroup()
679 # http: return remote's addchangegroup() or 0 for error
679 # http: return remote's addchangegroup() or 0 for error
680 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
680 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
681 pushop.repo.url())
681 pushop.repo.url())
682 else:
682 else:
683 # we return an integer indicating remote head count
683 # we return an integer indicating remote head count
684 # change
684 # change
685 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
685 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
686 pushop.repo.url())
686 pushop.repo.url())
687
687
688 def _pushsyncphase(pushop):
688 def _pushsyncphase(pushop):
689 """synchronise phase information locally and remotely"""
689 """synchronise phase information locally and remotely"""
690 cheads = pushop.commonheads
690 cheads = pushop.commonheads
691 # even when we don't push, exchanging phase data is useful
691 # even when we don't push, exchanging phase data is useful
692 remotephases = pushop.remote.listkeys('phases')
692 remotephases = pushop.remote.listkeys('phases')
693 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
693 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
694 and remotephases # server supports phases
694 and remotephases # server supports phases
695 and pushop.cgresult is None # nothing was pushed
695 and pushop.cgresult is None # nothing was pushed
696 and remotephases.get('publishing', False)):
696 and remotephases.get('publishing', False)):
697 # When:
697 # When:
698 # - this is a subrepo push
698 # - this is a subrepo push
699 # - and remote support phase
699 # - and remote support phase
700 # - and no changeset was pushed
700 # - and no changeset was pushed
701 # - and remote is publishing
701 # - and remote is publishing
702 # We may be in issue 3871 case!
702 # We may be in issue 3871 case!
703 # We drop the possible phase synchronisation done by
703 # We drop the possible phase synchronisation done by
704 # courtesy to publish changesets possibly locally draft
704 # courtesy to publish changesets possibly locally draft
705 # on the remote.
705 # on the remote.
706 remotephases = {'publishing': 'True'}
706 remotephases = {'publishing': 'True'}
707 if not remotephases: # old server or public only reply from non-publishing
707 if not remotephases: # old server or public only reply from non-publishing
708 _localphasemove(pushop, cheads)
708 _localphasemove(pushop, cheads)
709 # don't push any phase data as there is nothing to push
709 # don't push any phase data as there is nothing to push
710 else:
710 else:
711 ana = phases.analyzeremotephases(pushop.repo, cheads,
711 ana = phases.analyzeremotephases(pushop.repo, cheads,
712 remotephases)
712 remotephases)
713 pheads, droots = ana
713 pheads, droots = ana
714 ### Apply remote phase on local
714 ### Apply remote phase on local
715 if remotephases.get('publishing', False):
715 if remotephases.get('publishing', False):
716 _localphasemove(pushop, cheads)
716 _localphasemove(pushop, cheads)
717 else: # publish = False
717 else: # publish = False
718 _localphasemove(pushop, pheads)
718 _localphasemove(pushop, pheads)
719 _localphasemove(pushop, cheads, phases.draft)
719 _localphasemove(pushop, cheads, phases.draft)
720 ### Apply local phase on remote
720 ### Apply local phase on remote
721
721
722 if pushop.cgresult:
722 if pushop.cgresult:
723 if 'phases' in pushop.stepsdone:
723 if 'phases' in pushop.stepsdone:
724 # phases already pushed though bundle2
724 # phases already pushed though bundle2
725 return
725 return
726 outdated = pushop.outdatedphases
726 outdated = pushop.outdatedphases
727 else:
727 else:
728 outdated = pushop.fallbackoutdatedphases
728 outdated = pushop.fallbackoutdatedphases
729
729
730 pushop.stepsdone.add('phases')
730 pushop.stepsdone.add('phases')
731
731
732 # filter heads already turned public by the push
732 # filter heads already turned public by the push
733 outdated = [c for c in outdated if c.node() not in pheads]
733 outdated = [c for c in outdated if c.node() not in pheads]
734 # fallback to independent pushkey command
734 # fallback to independent pushkey command
735 for newremotehead in outdated:
735 for newremotehead in outdated:
736 r = pushop.remote.pushkey('phases',
736 r = pushop.remote.pushkey('phases',
737 newremotehead.hex(),
737 newremotehead.hex(),
738 str(phases.draft),
738 str(phases.draft),
739 str(phases.public))
739 str(phases.public))
740 if not r:
740 if not r:
741 pushop.ui.warn(_('updating %s to public failed!\n')
741 pushop.ui.warn(_('updating %s to public failed!\n')
742 % newremotehead)
742 % newremotehead)
743
743
744 def _localphasemove(pushop, nodes, phase=phases.public):
744 def _localphasemove(pushop, nodes, phase=phases.public):
745 """move <nodes> to <phase> in the local source repo"""
745 """move <nodes> to <phase> in the local source repo"""
746 if pushop.trmanager:
746 if pushop.trmanager:
747 phases.advanceboundary(pushop.repo,
747 phases.advanceboundary(pushop.repo,
748 pushop.trmanager.transaction(),
748 pushop.trmanager.transaction(),
749 phase,
749 phase,
750 nodes)
750 nodes)
751 else:
751 else:
752 # repo is not locked, do not change any phases!
752 # repo is not locked, do not change any phases!
753 # Informs the user that phases should have been moved when
753 # Informs the user that phases should have been moved when
754 # applicable.
754 # applicable.
755 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
755 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
756 phasestr = phases.phasenames[phase]
756 phasestr = phases.phasenames[phase]
757 if actualmoves:
757 if actualmoves:
758 pushop.ui.status(_('cannot lock source repo, skipping '
758 pushop.ui.status(_('cannot lock source repo, skipping '
759 'local %s phase update\n') % phasestr)
759 'local %s phase update\n') % phasestr)
760
760
761 def _pushobsolete(pushop):
761 def _pushobsolete(pushop):
762 """utility function to push obsolete markers to a remote"""
762 """utility function to push obsolete markers to a remote"""
763 if 'obsmarkers' in pushop.stepsdone:
763 if 'obsmarkers' in pushop.stepsdone:
764 return
764 return
765 pushop.ui.debug('try to push obsolete markers to remote\n')
765 pushop.ui.debug('try to push obsolete markers to remote\n')
766 repo = pushop.repo
766 repo = pushop.repo
767 remote = pushop.remote
767 remote = pushop.remote
768 pushop.stepsdone.add('obsmarkers')
768 pushop.stepsdone.add('obsmarkers')
769 if pushop.outobsmarkers:
769 if pushop.outobsmarkers:
770 rslts = []
770 rslts = []
771 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
771 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
772 for key in sorted(remotedata, reverse=True):
772 for key in sorted(remotedata, reverse=True):
773 # reverse sort to ensure we end with dump0
773 # reverse sort to ensure we end with dump0
774 data = remotedata[key]
774 data = remotedata[key]
775 rslts.append(remote.pushkey('obsolete', key, '', data))
775 rslts.append(remote.pushkey('obsolete', key, '', data))
776 if [r for r in rslts if not r]:
776 if [r for r in rslts if not r]:
777 msg = _('failed to push some obsolete markers!\n')
777 msg = _('failed to push some obsolete markers!\n')
778 repo.ui.warn(msg)
778 repo.ui.warn(msg)
779
779
780 def _pushbookmark(pushop):
780 def _pushbookmark(pushop):
781 """Update bookmark position on remote"""
781 """Update bookmark position on remote"""
782 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
782 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
783 return
783 return
784 pushop.stepsdone.add('bookmarks')
784 pushop.stepsdone.add('bookmarks')
785 ui = pushop.ui
785 ui = pushop.ui
786 remote = pushop.remote
786 remote = pushop.remote
787
787
788 for b, old, new in pushop.outbookmarks:
788 for b, old, new in pushop.outbookmarks:
789 action = 'update'
789 action = 'update'
790 if not old:
790 if not old:
791 action = 'export'
791 action = 'export'
792 elif not new:
792 elif not new:
793 action = 'delete'
793 action = 'delete'
794 if remote.pushkey('bookmarks', b, old, new):
794 if remote.pushkey('bookmarks', b, old, new):
795 ui.status(bookmsgmap[action][0] % b)
795 ui.status(bookmsgmap[action][0] % b)
796 else:
796 else:
797 ui.warn(bookmsgmap[action][1] % b)
797 ui.warn(bookmsgmap[action][1] % b)
798 # discovery can have set the value form invalid entry
798 # discovery can have set the value form invalid entry
799 if pushop.bkresult is not None:
799 if pushop.bkresult is not None:
800 pushop.bkresult = 1
800 pushop.bkresult = 1
801
801
802 class pulloperation(object):
802 class pulloperation(object):
803 """A object that represent a single pull operation
803 """A object that represent a single pull operation
804
804
805 It purpose is to carry pull related state and very common operation.
805 It purpose is to carry pull related state and very common operation.
806
806
807 A new should be created at the beginning of each pull and discarded
807 A new should be created at the beginning of each pull and discarded
808 afterward.
808 afterward.
809 """
809 """
810
810
811 def __init__(self, repo, remote, heads=None, force=False, bookmarks=()):
811 def __init__(self, repo, remote, heads=None, force=False, bookmarks=()):
812 # repo we pull into
812 # repo we pull into
813 self.repo = repo
813 self.repo = repo
814 # repo we pull from
814 # repo we pull from
815 self.remote = remote
815 self.remote = remote
816 # revision we try to pull (None is "all")
816 # revision we try to pull (None is "all")
817 self.heads = heads
817 self.heads = heads
818 # bookmark pulled explicitly
818 # bookmark pulled explicitly
819 self.explicitbookmarks = bookmarks
819 self.explicitbookmarks = bookmarks
820 # do we force pull?
820 # do we force pull?
821 self.force = force
821 self.force = force
822 # transaction manager
822 # transaction manager
823 self.trmanager = None
823 self.trmanager = None
824 # set of common changeset between local and remote before pull
824 # set of common changeset between local and remote before pull
825 self.common = None
825 self.common = None
826 # set of pulled head
826 # set of pulled head
827 self.rheads = None
827 self.rheads = None
828 # list of missing changeset to fetch remotely
828 # list of missing changeset to fetch remotely
829 self.fetch = None
829 self.fetch = None
830 # remote bookmarks data
830 # remote bookmarks data
831 self.remotebookmarks = None
831 self.remotebookmarks = None
832 # result of changegroup pulling (used as return code by pull)
832 # result of changegroup pulling (used as return code by pull)
833 self.cgresult = None
833 self.cgresult = None
834 # list of step already done
834 # list of step already done
835 self.stepsdone = set()
835 self.stepsdone = set()
836
836
837 @util.propertycache
837 @util.propertycache
838 def pulledsubset(self):
838 def pulledsubset(self):
839 """heads of the set of changeset target by the pull"""
839 """heads of the set of changeset target by the pull"""
840 # compute target subset
840 # compute target subset
841 if self.heads is None:
841 if self.heads is None:
842 # We pulled every thing possible
842 # We pulled every thing possible
843 # sync on everything common
843 # sync on everything common
844 c = set(self.common)
844 c = set(self.common)
845 ret = list(self.common)
845 ret = list(self.common)
846 for n in self.rheads:
846 for n in self.rheads:
847 if n not in c:
847 if n not in c:
848 ret.append(n)
848 ret.append(n)
849 return ret
849 return ret
850 else:
850 else:
851 # We pulled a specific subset
851 # We pulled a specific subset
852 # sync on this subset
852 # sync on this subset
853 return self.heads
853 return self.heads
854
854
855 def gettransaction(self):
855 def gettransaction(self):
856 # deprecated; talk to trmanager directly
856 # deprecated; talk to trmanager directly
857 return self.trmanager.transaction()
857 return self.trmanager.transaction()
858
858
859 class transactionmanager(object):
859 class transactionmanager(object):
860 """An object to manage the life cycle of a transaction
860 """An object to manage the life cycle of a transaction
861
861
862 It creates the transaction on demand and calls the appropriate hooks when
862 It creates the transaction on demand and calls the appropriate hooks when
863 closing the transaction."""
863 closing the transaction."""
864 def __init__(self, repo, source, url):
864 def __init__(self, repo, source, url):
865 self.repo = repo
865 self.repo = repo
866 self.source = source
866 self.source = source
867 self.url = url
867 self.url = url
868 self._tr = None
868 self._tr = None
869
869
870 def transaction(self):
870 def transaction(self):
871 """Return an open transaction object, constructing if necessary"""
871 """Return an open transaction object, constructing if necessary"""
872 if not self._tr:
872 if not self._tr:
873 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
873 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
874 self._tr = self.repo.transaction(trname)
874 self._tr = self.repo.transaction(trname)
875 self._tr.hookargs['source'] = self.source
875 self._tr.hookargs['source'] = self.source
876 self._tr.hookargs['url'] = self.url
876 self._tr.hookargs['url'] = self.url
877 return self._tr
877 return self._tr
878
878
879 def close(self):
879 def close(self):
880 """close transaction if created"""
880 """close transaction if created"""
881 if self._tr is not None:
881 if self._tr is not None:
882 self._tr.close()
882 self._tr.close()
883
883
884 def release(self):
884 def release(self):
885 """release transaction if created"""
885 """release transaction if created"""
886 if self._tr is not None:
886 if self._tr is not None:
887 self._tr.release()
887 self._tr.release()
888
888
889 def pull(repo, remote, heads=None, force=False, bookmarks=()):
889 def pull(repo, remote, heads=None, force=False, bookmarks=()):
890 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks)
890 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks)
891 if pullop.remote.local():
891 if pullop.remote.local():
892 missing = set(pullop.remote.requirements) - pullop.repo.supported
892 missing = set(pullop.remote.requirements) - pullop.repo.supported
893 if missing:
893 if missing:
894 msg = _("required features are not"
894 msg = _("required features are not"
895 " supported in the destination:"
895 " supported in the destination:"
896 " %s") % (', '.join(sorted(missing)))
896 " %s") % (', '.join(sorted(missing)))
897 raise util.Abort(msg)
897 raise util.Abort(msg)
898
898
899 lock = pullop.repo.lock()
899 lock = pullop.repo.lock()
900 try:
900 try:
901 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
901 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
902 _pulldiscovery(pullop)
902 _pulldiscovery(pullop)
903 if _canusebundle2(pullop):
903 if _canusebundle2(pullop):
904 _pullbundle2(pullop)
904 _pullbundle2(pullop)
905 _pullchangeset(pullop)
905 _pullchangeset(pullop)
906 _pullphase(pullop)
906 _pullphase(pullop)
907 _pullbookmarks(pullop)
907 _pullbookmarks(pullop)
908 _pullobsolete(pullop)
908 _pullobsolete(pullop)
909 pullop.trmanager.close()
909 pullop.trmanager.close()
910 finally:
910 finally:
911 pullop.trmanager.release()
911 pullop.trmanager.release()
912 lock.release()
912 lock.release()
913
913
914 return pullop
914 return pullop
915
915
916 # list of steps to perform discovery before pull
916 # list of steps to perform discovery before pull
917 pulldiscoveryorder = []
917 pulldiscoveryorder = []
918
918
919 # Mapping between step name and function
919 # Mapping between step name and function
920 #
920 #
921 # This exists to help extensions wrap steps if necessary
921 # This exists to help extensions wrap steps if necessary
922 pulldiscoverymapping = {}
922 pulldiscoverymapping = {}
923
923
924 def pulldiscovery(stepname):
924 def pulldiscovery(stepname):
925 """decorator for function performing discovery before pull
925 """decorator for function performing discovery before pull
926
926
927 The function is added to the step -> function mapping and appended to the
927 The function is added to the step -> function mapping and appended to the
928 list of steps. Beware that decorated function will be added in order (this
928 list of steps. Beware that decorated function will be added in order (this
929 may matter).
929 may matter).
930
930
931 You can only use this decorator for a new step, if you want to wrap a step
931 You can only use this decorator for a new step, if you want to wrap a step
932 from an extension, change the pulldiscovery dictionary directly."""
932 from an extension, change the pulldiscovery dictionary directly."""
933 def dec(func):
933 def dec(func):
934 assert stepname not in pulldiscoverymapping
934 assert stepname not in pulldiscoverymapping
935 pulldiscoverymapping[stepname] = func
935 pulldiscoverymapping[stepname] = func
936 pulldiscoveryorder.append(stepname)
936 pulldiscoveryorder.append(stepname)
937 return func
937 return func
938 return dec
938 return dec
939
939
940 def _pulldiscovery(pullop):
940 def _pulldiscovery(pullop):
941 """Run all discovery steps"""
941 """Run all discovery steps"""
942 for stepname in pulldiscoveryorder:
942 for stepname in pulldiscoveryorder:
943 step = pulldiscoverymapping[stepname]
943 step = pulldiscoverymapping[stepname]
944 step(pullop)
944 step(pullop)
945
945
946 @pulldiscovery('b1:bookmarks')
946 @pulldiscovery('b1:bookmarks')
947 def _pullbookmarkbundle1(pullop):
947 def _pullbookmarkbundle1(pullop):
948 """fetch bookmark data in bundle1 case
948 """fetch bookmark data in bundle1 case
949
949
950 If not using bundle2, we have to fetch bookmarks before changeset
950 If not using bundle2, we have to fetch bookmarks before changeset
951 discovery to reduce the chance and impact of race conditions."""
951 discovery to reduce the chance and impact of race conditions."""
952 if pullop.remotebookmarks is not None:
953 return
952 if not _canusebundle2(pullop): # all bundle2 server now support listkeys
954 if not _canusebundle2(pullop): # all bundle2 server now support listkeys
953 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
955 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
954
956
955
957
956 @pulldiscovery('changegroup')
958 @pulldiscovery('changegroup')
957 def _pulldiscoverychangegroup(pullop):
959 def _pulldiscoverychangegroup(pullop):
958 """discovery phase for the pull
960 """discovery phase for the pull
959
961
960 Current handle changeset discovery only, will change handle all discovery
962 Current handle changeset discovery only, will change handle all discovery
961 at some point."""
963 at some point."""
962 tmp = discovery.findcommonincoming(pullop.repo,
964 tmp = discovery.findcommonincoming(pullop.repo,
963 pullop.remote,
965 pullop.remote,
964 heads=pullop.heads,
966 heads=pullop.heads,
965 force=pullop.force)
967 force=pullop.force)
966 common, fetch, rheads = tmp
968 common, fetch, rheads = tmp
967 nm = pullop.repo.unfiltered().changelog.nodemap
969 nm = pullop.repo.unfiltered().changelog.nodemap
968 if fetch and rheads:
970 if fetch and rheads:
969 # If a remote heads in filtered locally, lets drop it from the unknown
971 # If a remote heads in filtered locally, lets drop it from the unknown
970 # remote heads and put in back in common.
972 # remote heads and put in back in common.
971 #
973 #
972 # This is a hackish solution to catch most of "common but locally
974 # This is a hackish solution to catch most of "common but locally
973 # hidden situation". We do not performs discovery on unfiltered
975 # hidden situation". We do not performs discovery on unfiltered
974 # repository because it end up doing a pathological amount of round
976 # repository because it end up doing a pathological amount of round
975 # trip for w huge amount of changeset we do not care about.
977 # trip for w huge amount of changeset we do not care about.
976 #
978 #
977 # If a set of such "common but filtered" changeset exist on the server
979 # If a set of such "common but filtered" changeset exist on the server
978 # but are not including a remote heads, we'll not be able to detect it,
980 # but are not including a remote heads, we'll not be able to detect it,
979 scommon = set(common)
981 scommon = set(common)
980 filteredrheads = []
982 filteredrheads = []
981 for n in rheads:
983 for n in rheads:
982 if n in nm:
984 if n in nm:
983 if n not in scommon:
985 if n not in scommon:
984 common.append(n)
986 common.append(n)
985 else:
987 else:
986 filteredrheads.append(n)
988 filteredrheads.append(n)
987 if not filteredrheads:
989 if not filteredrheads:
988 fetch = []
990 fetch = []
989 rheads = filteredrheads
991 rheads = filteredrheads
990 pullop.common = common
992 pullop.common = common
991 pullop.fetch = fetch
993 pullop.fetch = fetch
992 pullop.rheads = rheads
994 pullop.rheads = rheads
993
995
994 def _pullbundle2(pullop):
996 def _pullbundle2(pullop):
995 """pull data using bundle2
997 """pull data using bundle2
996
998
997 For now, the only supported data are changegroup."""
999 For now, the only supported data are changegroup."""
998 remotecaps = bundle2.bundle2caps(pullop.remote)
1000 remotecaps = bundle2.bundle2caps(pullop.remote)
999 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1001 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1000 # pulling changegroup
1002 # pulling changegroup
1001 pullop.stepsdone.add('changegroup')
1003 pullop.stepsdone.add('changegroup')
1002
1004
1003 kwargs['common'] = pullop.common
1005 kwargs['common'] = pullop.common
1004 kwargs['heads'] = pullop.heads or pullop.rheads
1006 kwargs['heads'] = pullop.heads or pullop.rheads
1005 kwargs['cg'] = pullop.fetch
1007 kwargs['cg'] = pullop.fetch
1006 if 'listkeys' in remotecaps:
1008 if 'listkeys' in remotecaps:
1007 kwargs['listkeys'] = ['phase', 'bookmarks']
1009 kwargs['listkeys'] = ['phase', 'bookmarks']
1008 if not pullop.fetch:
1010 if not pullop.fetch:
1009 pullop.repo.ui.status(_("no changes found\n"))
1011 pullop.repo.ui.status(_("no changes found\n"))
1010 pullop.cgresult = 0
1012 pullop.cgresult = 0
1011 else:
1013 else:
1012 if pullop.heads is None and list(pullop.common) == [nullid]:
1014 if pullop.heads is None and list(pullop.common) == [nullid]:
1013 pullop.repo.ui.status(_("requesting all changes\n"))
1015 pullop.repo.ui.status(_("requesting all changes\n"))
1014 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1016 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1015 remoteversions = bundle2.obsmarkersversion(remotecaps)
1017 remoteversions = bundle2.obsmarkersversion(remotecaps)
1016 if obsolete.commonversion(remoteversions) is not None:
1018 if obsolete.commonversion(remoteversions) is not None:
1017 kwargs['obsmarkers'] = True
1019 kwargs['obsmarkers'] = True
1018 pullop.stepsdone.add('obsmarkers')
1020 pullop.stepsdone.add('obsmarkers')
1019 _pullbundle2extraprepare(pullop, kwargs)
1021 _pullbundle2extraprepare(pullop, kwargs)
1020 bundle = pullop.remote.getbundle('pull', **kwargs)
1022 bundle = pullop.remote.getbundle('pull', **kwargs)
1021 try:
1023 try:
1022 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1024 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1023 except error.BundleValueError, exc:
1025 except error.BundleValueError, exc:
1024 raise util.Abort('missing support for %s' % exc)
1026 raise util.Abort('missing support for %s' % exc)
1025
1027
1026 if pullop.fetch:
1028 if pullop.fetch:
1027 results = [cg['return'] for cg in op.records['changegroup']]
1029 results = [cg['return'] for cg in op.records['changegroup']]
1028 pullop.cgresult = changegroup.combineresults(results)
1030 pullop.cgresult = changegroup.combineresults(results)
1029
1031
1030 # processing phases change
1032 # processing phases change
1031 for namespace, value in op.records['listkeys']:
1033 for namespace, value in op.records['listkeys']:
1032 if namespace == 'phases':
1034 if namespace == 'phases':
1033 _pullapplyphases(pullop, value)
1035 _pullapplyphases(pullop, value)
1034
1036
1035 # processing bookmark update
1037 # processing bookmark update
1036 for namespace, value in op.records['listkeys']:
1038 for namespace, value in op.records['listkeys']:
1037 if namespace == 'bookmarks':
1039 if namespace == 'bookmarks':
1038 pullop.remotebookmarks = value
1040 pullop.remotebookmarks = value
1039 _pullbookmarks(pullop)
1041 _pullbookmarks(pullop)
1040
1042
1041 def _pullbundle2extraprepare(pullop, kwargs):
1043 def _pullbundle2extraprepare(pullop, kwargs):
1042 """hook function so that extensions can extend the getbundle call"""
1044 """hook function so that extensions can extend the getbundle call"""
1043 pass
1045 pass
1044
1046
1045 def _pullchangeset(pullop):
1047 def _pullchangeset(pullop):
1046 """pull changeset from unbundle into the local repo"""
1048 """pull changeset from unbundle into the local repo"""
1047 # We delay the open of the transaction as late as possible so we
1049 # We delay the open of the transaction as late as possible so we
1048 # don't open transaction for nothing or you break future useful
1050 # don't open transaction for nothing or you break future useful
1049 # rollback call
1051 # rollback call
1050 if 'changegroup' in pullop.stepsdone:
1052 if 'changegroup' in pullop.stepsdone:
1051 return
1053 return
1052 pullop.stepsdone.add('changegroup')
1054 pullop.stepsdone.add('changegroup')
1053 if not pullop.fetch:
1055 if not pullop.fetch:
1054 pullop.repo.ui.status(_("no changes found\n"))
1056 pullop.repo.ui.status(_("no changes found\n"))
1055 pullop.cgresult = 0
1057 pullop.cgresult = 0
1056 return
1058 return
1057 pullop.gettransaction()
1059 pullop.gettransaction()
1058 if pullop.heads is None and list(pullop.common) == [nullid]:
1060 if pullop.heads is None and list(pullop.common) == [nullid]:
1059 pullop.repo.ui.status(_("requesting all changes\n"))
1061 pullop.repo.ui.status(_("requesting all changes\n"))
1060 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1062 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1061 # issue1320, avoid a race if remote changed after discovery
1063 # issue1320, avoid a race if remote changed after discovery
1062 pullop.heads = pullop.rheads
1064 pullop.heads = pullop.rheads
1063
1065
1064 if pullop.remote.capable('getbundle'):
1066 if pullop.remote.capable('getbundle'):
1065 # TODO: get bundlecaps from remote
1067 # TODO: get bundlecaps from remote
1066 cg = pullop.remote.getbundle('pull', common=pullop.common,
1068 cg = pullop.remote.getbundle('pull', common=pullop.common,
1067 heads=pullop.heads or pullop.rheads)
1069 heads=pullop.heads or pullop.rheads)
1068 elif pullop.heads is None:
1070 elif pullop.heads is None:
1069 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1071 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1070 elif not pullop.remote.capable('changegroupsubset'):
1072 elif not pullop.remote.capable('changegroupsubset'):
1071 raise util.Abort(_("partial pull cannot be done because "
1073 raise util.Abort(_("partial pull cannot be done because "
1072 "other repository doesn't support "
1074 "other repository doesn't support "
1073 "changegroupsubset."))
1075 "changegroupsubset."))
1074 else:
1076 else:
1075 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1077 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1076 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1078 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1077 pullop.remote.url())
1079 pullop.remote.url())
1078
1080
1079 def _pullphase(pullop):
1081 def _pullphase(pullop):
1080 # Get remote phases data from remote
1082 # Get remote phases data from remote
1081 if 'phases' in pullop.stepsdone:
1083 if 'phases' in pullop.stepsdone:
1082 return
1084 return
1083 remotephases = pullop.remote.listkeys('phases')
1085 remotephases = pullop.remote.listkeys('phases')
1084 _pullapplyphases(pullop, remotephases)
1086 _pullapplyphases(pullop, remotephases)
1085
1087
1086 def _pullapplyphases(pullop, remotephases):
1088 def _pullapplyphases(pullop, remotephases):
1087 """apply phase movement from observed remote state"""
1089 """apply phase movement from observed remote state"""
1088 if 'phases' in pullop.stepsdone:
1090 if 'phases' in pullop.stepsdone:
1089 return
1091 return
1090 pullop.stepsdone.add('phases')
1092 pullop.stepsdone.add('phases')
1091 publishing = bool(remotephases.get('publishing', False))
1093 publishing = bool(remotephases.get('publishing', False))
1092 if remotephases and not publishing:
1094 if remotephases and not publishing:
1093 # remote is new and unpublishing
1095 # remote is new and unpublishing
1094 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1096 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1095 pullop.pulledsubset,
1097 pullop.pulledsubset,
1096 remotephases)
1098 remotephases)
1097 dheads = pullop.pulledsubset
1099 dheads = pullop.pulledsubset
1098 else:
1100 else:
1099 # Remote is old or publishing all common changesets
1101 # Remote is old or publishing all common changesets
1100 # should be seen as public
1102 # should be seen as public
1101 pheads = pullop.pulledsubset
1103 pheads = pullop.pulledsubset
1102 dheads = []
1104 dheads = []
1103 unfi = pullop.repo.unfiltered()
1105 unfi = pullop.repo.unfiltered()
1104 phase = unfi._phasecache.phase
1106 phase = unfi._phasecache.phase
1105 rev = unfi.changelog.nodemap.get
1107 rev = unfi.changelog.nodemap.get
1106 public = phases.public
1108 public = phases.public
1107 draft = phases.draft
1109 draft = phases.draft
1108
1110
1109 # exclude changesets already public locally and update the others
1111 # exclude changesets already public locally and update the others
1110 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1112 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1111 if pheads:
1113 if pheads:
1112 tr = pullop.gettransaction()
1114 tr = pullop.gettransaction()
1113 phases.advanceboundary(pullop.repo, tr, public, pheads)
1115 phases.advanceboundary(pullop.repo, tr, public, pheads)
1114
1116
1115 # exclude changesets already draft locally and update the others
1117 # exclude changesets already draft locally and update the others
1116 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1118 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1117 if dheads:
1119 if dheads:
1118 tr = pullop.gettransaction()
1120 tr = pullop.gettransaction()
1119 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1121 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1120
1122
1121 def _pullbookmarks(pullop):
1123 def _pullbookmarks(pullop):
1122 """process the remote bookmark information to update the local one"""
1124 """process the remote bookmark information to update the local one"""
1123 if 'bookmarks' in pullop.stepsdone:
1125 if 'bookmarks' in pullop.stepsdone:
1124 return
1126 return
1125 pullop.stepsdone.add('bookmarks')
1127 pullop.stepsdone.add('bookmarks')
1126 repo = pullop.repo
1128 repo = pullop.repo
1127 remotebookmarks = pullop.remotebookmarks
1129 remotebookmarks = pullop.remotebookmarks
1128 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1130 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1129 pullop.remote.url(),
1131 pullop.remote.url(),
1130 pullop.gettransaction,
1132 pullop.gettransaction,
1131 explicit=pullop.explicitbookmarks)
1133 explicit=pullop.explicitbookmarks)
1132
1134
1133 def _pullobsolete(pullop):
1135 def _pullobsolete(pullop):
1134 """utility function to pull obsolete markers from a remote
1136 """utility function to pull obsolete markers from a remote
1135
1137
1136 The `gettransaction` is function that return the pull transaction, creating
1138 The `gettransaction` is function that return the pull transaction, creating
1137 one if necessary. We return the transaction to inform the calling code that
1139 one if necessary. We return the transaction to inform the calling code that
1138 a new transaction have been created (when applicable).
1140 a new transaction have been created (when applicable).
1139
1141
1140 Exists mostly to allow overriding for experimentation purpose"""
1142 Exists mostly to allow overriding for experimentation purpose"""
1141 if 'obsmarkers' in pullop.stepsdone:
1143 if 'obsmarkers' in pullop.stepsdone:
1142 return
1144 return
1143 pullop.stepsdone.add('obsmarkers')
1145 pullop.stepsdone.add('obsmarkers')
1144 tr = None
1146 tr = None
1145 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1147 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1146 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1148 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1147 remoteobs = pullop.remote.listkeys('obsolete')
1149 remoteobs = pullop.remote.listkeys('obsolete')
1148 if 'dump0' in remoteobs:
1150 if 'dump0' in remoteobs:
1149 tr = pullop.gettransaction()
1151 tr = pullop.gettransaction()
1150 for key in sorted(remoteobs, reverse=True):
1152 for key in sorted(remoteobs, reverse=True):
1151 if key.startswith('dump'):
1153 if key.startswith('dump'):
1152 data = base85.b85decode(remoteobs[key])
1154 data = base85.b85decode(remoteobs[key])
1153 pullop.repo.obsstore.mergemarkers(tr, data)
1155 pullop.repo.obsstore.mergemarkers(tr, data)
1154 pullop.repo.invalidatevolatilesets()
1156 pullop.repo.invalidatevolatilesets()
1155 return tr
1157 return tr
1156
1158
1157 def caps20to10(repo):
1159 def caps20to10(repo):
1158 """return a set with appropriate options to use bundle20 during getbundle"""
1160 """return a set with appropriate options to use bundle20 during getbundle"""
1159 caps = set(['HG20'])
1161 caps = set(['HG20'])
1160 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1162 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1161 caps.add('bundle2=' + urllib.quote(capsblob))
1163 caps.add('bundle2=' + urllib.quote(capsblob))
1162 return caps
1164 return caps
1163
1165
1164 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1166 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1165 getbundle2partsorder = []
1167 getbundle2partsorder = []
1166
1168
1167 # Mapping between step name and function
1169 # Mapping between step name and function
1168 #
1170 #
1169 # This exists to help extensions wrap steps if necessary
1171 # This exists to help extensions wrap steps if necessary
1170 getbundle2partsmapping = {}
1172 getbundle2partsmapping = {}
1171
1173
1172 def getbundle2partsgenerator(stepname, idx=None):
1174 def getbundle2partsgenerator(stepname, idx=None):
1173 """decorator for function generating bundle2 part for getbundle
1175 """decorator for function generating bundle2 part for getbundle
1174
1176
1175 The function is added to the step -> function mapping and appended to the
1177 The function is added to the step -> function mapping and appended to the
1176 list of steps. Beware that decorated functions will be added in order
1178 list of steps. Beware that decorated functions will be added in order
1177 (this may matter).
1179 (this may matter).
1178
1180
1179 You can only use this decorator for new steps, if you want to wrap a step
1181 You can only use this decorator for new steps, if you want to wrap a step
1180 from an extension, attack the getbundle2partsmapping dictionary directly."""
1182 from an extension, attack the getbundle2partsmapping dictionary directly."""
1181 def dec(func):
1183 def dec(func):
1182 assert stepname not in getbundle2partsmapping
1184 assert stepname not in getbundle2partsmapping
1183 getbundle2partsmapping[stepname] = func
1185 getbundle2partsmapping[stepname] = func
1184 if idx is None:
1186 if idx is None:
1185 getbundle2partsorder.append(stepname)
1187 getbundle2partsorder.append(stepname)
1186 else:
1188 else:
1187 getbundle2partsorder.insert(idx, stepname)
1189 getbundle2partsorder.insert(idx, stepname)
1188 return func
1190 return func
1189 return dec
1191 return dec
1190
1192
1191 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1193 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1192 **kwargs):
1194 **kwargs):
1193 """return a full bundle (with potentially multiple kind of parts)
1195 """return a full bundle (with potentially multiple kind of parts)
1194
1196
1195 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1197 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1196 passed. For now, the bundle can contain only changegroup, but this will
1198 passed. For now, the bundle can contain only changegroup, but this will
1197 changes when more part type will be available for bundle2.
1199 changes when more part type will be available for bundle2.
1198
1200
1199 This is different from changegroup.getchangegroup that only returns an HG10
1201 This is different from changegroup.getchangegroup that only returns an HG10
1200 changegroup bundle. They may eventually get reunited in the future when we
1202 changegroup bundle. They may eventually get reunited in the future when we
1201 have a clearer idea of the API we what to query different data.
1203 have a clearer idea of the API we what to query different data.
1202
1204
1203 The implementation is at a very early stage and will get massive rework
1205 The implementation is at a very early stage and will get massive rework
1204 when the API of bundle is refined.
1206 when the API of bundle is refined.
1205 """
1207 """
1206 # bundle10 case
1208 # bundle10 case
1207 usebundle2 = False
1209 usebundle2 = False
1208 if bundlecaps is not None:
1210 if bundlecaps is not None:
1209 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1211 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1210 if not usebundle2:
1212 if not usebundle2:
1211 if bundlecaps and not kwargs.get('cg', True):
1213 if bundlecaps and not kwargs.get('cg', True):
1212 raise ValueError(_('request for bundle10 must include changegroup'))
1214 raise ValueError(_('request for bundle10 must include changegroup'))
1213
1215
1214 if kwargs:
1216 if kwargs:
1215 raise ValueError(_('unsupported getbundle arguments: %s')
1217 raise ValueError(_('unsupported getbundle arguments: %s')
1216 % ', '.join(sorted(kwargs.keys())))
1218 % ', '.join(sorted(kwargs.keys())))
1217 return changegroup.getchangegroup(repo, source, heads=heads,
1219 return changegroup.getchangegroup(repo, source, heads=heads,
1218 common=common, bundlecaps=bundlecaps)
1220 common=common, bundlecaps=bundlecaps)
1219
1221
1220 # bundle20 case
1222 # bundle20 case
1221 b2caps = {}
1223 b2caps = {}
1222 for bcaps in bundlecaps:
1224 for bcaps in bundlecaps:
1223 if bcaps.startswith('bundle2='):
1225 if bcaps.startswith('bundle2='):
1224 blob = urllib.unquote(bcaps[len('bundle2='):])
1226 blob = urllib.unquote(bcaps[len('bundle2='):])
1225 b2caps.update(bundle2.decodecaps(blob))
1227 b2caps.update(bundle2.decodecaps(blob))
1226 bundler = bundle2.bundle20(repo.ui, b2caps)
1228 bundler = bundle2.bundle20(repo.ui, b2caps)
1227
1229
1228 kwargs['heads'] = heads
1230 kwargs['heads'] = heads
1229 kwargs['common'] = common
1231 kwargs['common'] = common
1230
1232
1231 for name in getbundle2partsorder:
1233 for name in getbundle2partsorder:
1232 func = getbundle2partsmapping[name]
1234 func = getbundle2partsmapping[name]
1233 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1235 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1234 **kwargs)
1236 **kwargs)
1235
1237
1236 return util.chunkbuffer(bundler.getchunks())
1238 return util.chunkbuffer(bundler.getchunks())
1237
1239
1238 @getbundle2partsgenerator('changegroup')
1240 @getbundle2partsgenerator('changegroup')
1239 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1241 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1240 b2caps=None, heads=None, common=None, **kwargs):
1242 b2caps=None, heads=None, common=None, **kwargs):
1241 """add a changegroup part to the requested bundle"""
1243 """add a changegroup part to the requested bundle"""
1242 cg = None
1244 cg = None
1243 if kwargs.get('cg', True):
1245 if kwargs.get('cg', True):
1244 # build changegroup bundle here.
1246 # build changegroup bundle here.
1245 version = None
1247 version = None
1246 cgversions = b2caps.get('changegroup')
1248 cgversions = b2caps.get('changegroup')
1247 if not cgversions: # 3.1 and 3.2 ship with an empty value
1249 if not cgversions: # 3.1 and 3.2 ship with an empty value
1248 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1250 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1249 common=common,
1251 common=common,
1250 bundlecaps=bundlecaps)
1252 bundlecaps=bundlecaps)
1251 else:
1253 else:
1252 cgversions = [v for v in cgversions if v in changegroup.packermap]
1254 cgversions = [v for v in cgversions if v in changegroup.packermap]
1253 if not cgversions:
1255 if not cgversions:
1254 raise ValueError(_('no common changegroup version'))
1256 raise ValueError(_('no common changegroup version'))
1255 version = max(cgversions)
1257 version = max(cgversions)
1256 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1258 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1257 common=common,
1259 common=common,
1258 bundlecaps=bundlecaps,
1260 bundlecaps=bundlecaps,
1259 version=version)
1261 version=version)
1260
1262
1261 if cg:
1263 if cg:
1262 part = bundler.newpart('changegroup', data=cg)
1264 part = bundler.newpart('changegroup', data=cg)
1263 if version is not None:
1265 if version is not None:
1264 part.addparam('version', version)
1266 part.addparam('version', version)
1265
1267
1266 @getbundle2partsgenerator('listkeys')
1268 @getbundle2partsgenerator('listkeys')
1267 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1269 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1268 b2caps=None, **kwargs):
1270 b2caps=None, **kwargs):
1269 """add parts containing listkeys namespaces to the requested bundle"""
1271 """add parts containing listkeys namespaces to the requested bundle"""
1270 listkeys = kwargs.get('listkeys', ())
1272 listkeys = kwargs.get('listkeys', ())
1271 for namespace in listkeys:
1273 for namespace in listkeys:
1272 part = bundler.newpart('listkeys')
1274 part = bundler.newpart('listkeys')
1273 part.addparam('namespace', namespace)
1275 part.addparam('namespace', namespace)
1274 keys = repo.listkeys(namespace).items()
1276 keys = repo.listkeys(namespace).items()
1275 part.data = pushkey.encodekeys(keys)
1277 part.data = pushkey.encodekeys(keys)
1276
1278
1277 @getbundle2partsgenerator('obsmarkers')
1279 @getbundle2partsgenerator('obsmarkers')
1278 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1280 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1279 b2caps=None, heads=None, **kwargs):
1281 b2caps=None, heads=None, **kwargs):
1280 """add an obsolescence markers part to the requested bundle"""
1282 """add an obsolescence markers part to the requested bundle"""
1281 if kwargs.get('obsmarkers', False):
1283 if kwargs.get('obsmarkers', False):
1282 if heads is None:
1284 if heads is None:
1283 heads = repo.heads()
1285 heads = repo.heads()
1284 subset = [c.node() for c in repo.set('::%ln', heads)]
1286 subset = [c.node() for c in repo.set('::%ln', heads)]
1285 markers = repo.obsstore.relevantmarkers(subset)
1287 markers = repo.obsstore.relevantmarkers(subset)
1286 markers = sorted(markers)
1288 markers = sorted(markers)
1287 buildobsmarkerspart(bundler, markers)
1289 buildobsmarkerspart(bundler, markers)
1288
1290
1289 @getbundle2partsgenerator('hgtagsfnodes')
1291 @getbundle2partsgenerator('hgtagsfnodes')
1290 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1292 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1291 b2caps=None, heads=None, common=None,
1293 b2caps=None, heads=None, common=None,
1292 **kwargs):
1294 **kwargs):
1293 """Transfer the .hgtags filenodes mapping.
1295 """Transfer the .hgtags filenodes mapping.
1294
1296
1295 Only values for heads in this bundle will be transferred.
1297 Only values for heads in this bundle will be transferred.
1296
1298
1297 The part data consists of pairs of 20 byte changeset node and .hgtags
1299 The part data consists of pairs of 20 byte changeset node and .hgtags
1298 filenodes raw values.
1300 filenodes raw values.
1299 """
1301 """
1300 # Don't send unless:
1302 # Don't send unless:
1301 # - changeset are being exchanged,
1303 # - changeset are being exchanged,
1302 # - the client supports it.
1304 # - the client supports it.
1303 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1305 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1304 return
1306 return
1305
1307
1306 outgoing = changegroup.computeoutgoing(repo, heads, common)
1308 outgoing = changegroup.computeoutgoing(repo, heads, common)
1307
1309
1308 if not outgoing.missingheads:
1310 if not outgoing.missingheads:
1309 return
1311 return
1310
1312
1311 cache = tags.hgtagsfnodescache(repo.unfiltered())
1313 cache = tags.hgtagsfnodescache(repo.unfiltered())
1312 chunks = []
1314 chunks = []
1313
1315
1314 # .hgtags fnodes are only relevant for head changesets. While we could
1316 # .hgtags fnodes are only relevant for head changesets. While we could
1315 # transfer values for all known nodes, there will likely be little to
1317 # transfer values for all known nodes, there will likely be little to
1316 # no benefit.
1318 # no benefit.
1317 #
1319 #
1318 # We don't bother using a generator to produce output data because
1320 # We don't bother using a generator to produce output data because
1319 # a) we only have 40 bytes per head and even esoteric numbers of heads
1321 # a) we only have 40 bytes per head and even esoteric numbers of heads
1320 # consume little memory (1M heads is 40MB) b) we don't want to send the
1322 # consume little memory (1M heads is 40MB) b) we don't want to send the
1321 # part if we don't have entries and knowing if we have entries requires
1323 # part if we don't have entries and knowing if we have entries requires
1322 # cache lookups.
1324 # cache lookups.
1323 for node in outgoing.missingheads:
1325 for node in outgoing.missingheads:
1324 # Don't compute missing, as this may slow down serving.
1326 # Don't compute missing, as this may slow down serving.
1325 fnode = cache.getfnode(node, computemissing=False)
1327 fnode = cache.getfnode(node, computemissing=False)
1326 if fnode is not None:
1328 if fnode is not None:
1327 chunks.extend([node, fnode])
1329 chunks.extend([node, fnode])
1328
1330
1329 if chunks:
1331 if chunks:
1330 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1332 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1331
1333
1332 def check_heads(repo, their_heads, context):
1334 def check_heads(repo, their_heads, context):
1333 """check if the heads of a repo have been modified
1335 """check if the heads of a repo have been modified
1334
1336
1335 Used by peer for unbundling.
1337 Used by peer for unbundling.
1336 """
1338 """
1337 heads = repo.heads()
1339 heads = repo.heads()
1338 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1340 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1339 if not (their_heads == ['force'] or their_heads == heads or
1341 if not (their_heads == ['force'] or their_heads == heads or
1340 their_heads == ['hashed', heads_hash]):
1342 their_heads == ['hashed', heads_hash]):
1341 # someone else committed/pushed/unbundled while we
1343 # someone else committed/pushed/unbundled while we
1342 # were transferring data
1344 # were transferring data
1343 raise error.PushRaced('repository changed while %s - '
1345 raise error.PushRaced('repository changed while %s - '
1344 'please try again' % context)
1346 'please try again' % context)
1345
1347
1346 def unbundle(repo, cg, heads, source, url):
1348 def unbundle(repo, cg, heads, source, url):
1347 """Apply a bundle to a repo.
1349 """Apply a bundle to a repo.
1348
1350
1349 this function makes sure the repo is locked during the application and have
1351 this function makes sure the repo is locked during the application and have
1350 mechanism to check that no push race occurred between the creation of the
1352 mechanism to check that no push race occurred between the creation of the
1351 bundle and its application.
1353 bundle and its application.
1352
1354
1353 If the push was raced as PushRaced exception is raised."""
1355 If the push was raced as PushRaced exception is raised."""
1354 r = 0
1356 r = 0
1355 # need a transaction when processing a bundle2 stream
1357 # need a transaction when processing a bundle2 stream
1356 wlock = lock = tr = None
1358 wlock = lock = tr = None
1357 recordout = None
1359 recordout = None
1358 # quick fix for output mismatch with bundle2 in 3.4
1360 # quick fix for output mismatch with bundle2 in 3.4
1359 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1361 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1360 False)
1362 False)
1361 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1363 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1362 captureoutput = True
1364 captureoutput = True
1363 try:
1365 try:
1364 check_heads(repo, heads, 'uploading changes')
1366 check_heads(repo, heads, 'uploading changes')
1365 # push can proceed
1367 # push can proceed
1366 if util.safehasattr(cg, 'params'):
1368 if util.safehasattr(cg, 'params'):
1367 r = None
1369 r = None
1368 try:
1370 try:
1369 wlock = repo.wlock()
1371 wlock = repo.wlock()
1370 lock = repo.lock()
1372 lock = repo.lock()
1371 tr = repo.transaction(source)
1373 tr = repo.transaction(source)
1372 tr.hookargs['source'] = source
1374 tr.hookargs['source'] = source
1373 tr.hookargs['url'] = url
1375 tr.hookargs['url'] = url
1374 tr.hookargs['bundle2'] = '1'
1376 tr.hookargs['bundle2'] = '1'
1375 op = bundle2.bundleoperation(repo, lambda: tr,
1377 op = bundle2.bundleoperation(repo, lambda: tr,
1376 captureoutput=captureoutput)
1378 captureoutput=captureoutput)
1377 try:
1379 try:
1378 r = bundle2.processbundle(repo, cg, op=op)
1380 r = bundle2.processbundle(repo, cg, op=op)
1379 finally:
1381 finally:
1380 r = op.reply
1382 r = op.reply
1381 if captureoutput and r is not None:
1383 if captureoutput and r is not None:
1382 repo.ui.pushbuffer(error=True, subproc=True)
1384 repo.ui.pushbuffer(error=True, subproc=True)
1383 def recordout(output):
1385 def recordout(output):
1384 r.newpart('output', data=output, mandatory=False)
1386 r.newpart('output', data=output, mandatory=False)
1385 tr.close()
1387 tr.close()
1386 except BaseException, exc:
1388 except BaseException, exc:
1387 exc.duringunbundle2 = True
1389 exc.duringunbundle2 = True
1388 if captureoutput and r is not None:
1390 if captureoutput and r is not None:
1389 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1391 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1390 def recordout(output):
1392 def recordout(output):
1391 part = bundle2.bundlepart('output', data=output,
1393 part = bundle2.bundlepart('output', data=output,
1392 mandatory=False)
1394 mandatory=False)
1393 parts.append(part)
1395 parts.append(part)
1394 raise
1396 raise
1395 else:
1397 else:
1396 lock = repo.lock()
1398 lock = repo.lock()
1397 r = changegroup.addchangegroup(repo, cg, source, url)
1399 r = changegroup.addchangegroup(repo, cg, source, url)
1398 finally:
1400 finally:
1399 lockmod.release(tr, lock, wlock)
1401 lockmod.release(tr, lock, wlock)
1400 if recordout is not None:
1402 if recordout is not None:
1401 recordout(repo.ui.popbuffer())
1403 recordout(repo.ui.popbuffer())
1402 return r
1404 return r
1403
1405
1404 # This is it's own function so extensions can override it.
1406 # This is it's own function so extensions can override it.
1405 def _walkstreamfiles(repo):
1407 def _walkstreamfiles(repo):
1406 return repo.store.walk()
1408 return repo.store.walk()
1407
1409
1408 def generatestreamclone(repo):
1410 def generatestreamclone(repo):
1409 """Emit content for a streaming clone.
1411 """Emit content for a streaming clone.
1410
1412
1411 This is a generator of raw chunks that constitute a streaming clone.
1413 This is a generator of raw chunks that constitute a streaming clone.
1412
1414
1413 The stream begins with a line of 2 space-delimited integers containing the
1415 The stream begins with a line of 2 space-delimited integers containing the
1414 number of entries and total bytes size.
1416 number of entries and total bytes size.
1415
1417
1416 Next, are N entries for each file being transferred. Each file entry starts
1418 Next, are N entries for each file being transferred. Each file entry starts
1417 as a line with the file name and integer size delimited by a null byte.
1419 as a line with the file name and integer size delimited by a null byte.
1418 The raw file data follows. Following the raw file data is the next file
1420 The raw file data follows. Following the raw file data is the next file
1419 entry, or EOF.
1421 entry, or EOF.
1420
1422
1421 When used on the wire protocol, an additional line indicating protocol
1423 When used on the wire protocol, an additional line indicating protocol
1422 success will be prepended to the stream. This function is not responsible
1424 success will be prepended to the stream. This function is not responsible
1423 for adding it.
1425 for adding it.
1424
1426
1425 This function will obtain a repository lock to ensure a consistent view of
1427 This function will obtain a repository lock to ensure a consistent view of
1426 the store is captured. It therefore may raise LockError.
1428 the store is captured. It therefore may raise LockError.
1427 """
1429 """
1428 entries = []
1430 entries = []
1429 total_bytes = 0
1431 total_bytes = 0
1430 # Get consistent snapshot of repo, lock during scan.
1432 # Get consistent snapshot of repo, lock during scan.
1431 lock = repo.lock()
1433 lock = repo.lock()
1432 try:
1434 try:
1433 repo.ui.debug('scanning\n')
1435 repo.ui.debug('scanning\n')
1434 for name, ename, size in _walkstreamfiles(repo):
1436 for name, ename, size in _walkstreamfiles(repo):
1435 if size:
1437 if size:
1436 entries.append((name, size))
1438 entries.append((name, size))
1437 total_bytes += size
1439 total_bytes += size
1438 finally:
1440 finally:
1439 lock.release()
1441 lock.release()
1440
1442
1441 repo.ui.debug('%d files, %d bytes to transfer\n' %
1443 repo.ui.debug('%d files, %d bytes to transfer\n' %
1442 (len(entries), total_bytes))
1444 (len(entries), total_bytes))
1443 yield '%d %d\n' % (len(entries), total_bytes)
1445 yield '%d %d\n' % (len(entries), total_bytes)
1444
1446
1445 sopener = repo.svfs
1447 sopener = repo.svfs
1446 oldaudit = sopener.mustaudit
1448 oldaudit = sopener.mustaudit
1447 debugflag = repo.ui.debugflag
1449 debugflag = repo.ui.debugflag
1448 sopener.mustaudit = False
1450 sopener.mustaudit = False
1449
1451
1450 try:
1452 try:
1451 for name, size in entries:
1453 for name, size in entries:
1452 if debugflag:
1454 if debugflag:
1453 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
1455 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
1454 # partially encode name over the wire for backwards compat
1456 # partially encode name over the wire for backwards compat
1455 yield '%s\0%d\n' % (store.encodedir(name), size)
1457 yield '%s\0%d\n' % (store.encodedir(name), size)
1456 if size <= 65536:
1458 if size <= 65536:
1457 fp = sopener(name)
1459 fp = sopener(name)
1458 try:
1460 try:
1459 data = fp.read(size)
1461 data = fp.read(size)
1460 finally:
1462 finally:
1461 fp.close()
1463 fp.close()
1462 yield data
1464 yield data
1463 else:
1465 else:
1464 for chunk in util.filechunkiter(sopener(name), limit=size):
1466 for chunk in util.filechunkiter(sopener(name), limit=size):
1465 yield chunk
1467 yield chunk
1466 finally:
1468 finally:
1467 sopener.mustaudit = oldaudit
1469 sopener.mustaudit = oldaudit
1468
1470
1469 def consumestreamclone(repo, fp):
1471 def consumestreamclone(repo, fp):
1470 """Apply the contents from a streaming clone file.
1472 """Apply the contents from a streaming clone file.
1471
1473
1472 This takes the output from "streamout" and applies it to the specified
1474 This takes the output from "streamout" and applies it to the specified
1473 repository.
1475 repository.
1474
1476
1475 Like "streamout," the status line added by the wire protocol is not handled
1477 Like "streamout," the status line added by the wire protocol is not handled
1476 by this function.
1478 by this function.
1477 """
1479 """
1478 lock = repo.lock()
1480 lock = repo.lock()
1479 try:
1481 try:
1480 repo.ui.status(_('streaming all changes\n'))
1482 repo.ui.status(_('streaming all changes\n'))
1481 l = fp.readline()
1483 l = fp.readline()
1482 try:
1484 try:
1483 total_files, total_bytes = map(int, l.split(' ', 1))
1485 total_files, total_bytes = map(int, l.split(' ', 1))
1484 except (ValueError, TypeError):
1486 except (ValueError, TypeError):
1485 raise error.ResponseError(
1487 raise error.ResponseError(
1486 _('unexpected response from remote server:'), l)
1488 _('unexpected response from remote server:'), l)
1487 repo.ui.status(_('%d files to transfer, %s of data\n') %
1489 repo.ui.status(_('%d files to transfer, %s of data\n') %
1488 (total_files, util.bytecount(total_bytes)))
1490 (total_files, util.bytecount(total_bytes)))
1489 handled_bytes = 0
1491 handled_bytes = 0
1490 repo.ui.progress(_('clone'), 0, total=total_bytes)
1492 repo.ui.progress(_('clone'), 0, total=total_bytes)
1491 start = time.time()
1493 start = time.time()
1492
1494
1493 tr = repo.transaction(_('clone'))
1495 tr = repo.transaction(_('clone'))
1494 try:
1496 try:
1495 for i in xrange(total_files):
1497 for i in xrange(total_files):
1496 # XXX doesn't support '\n' or '\r' in filenames
1498 # XXX doesn't support '\n' or '\r' in filenames
1497 l = fp.readline()
1499 l = fp.readline()
1498 try:
1500 try:
1499 name, size = l.split('\0', 1)
1501 name, size = l.split('\0', 1)
1500 size = int(size)
1502 size = int(size)
1501 except (ValueError, TypeError):
1503 except (ValueError, TypeError):
1502 raise error.ResponseError(
1504 raise error.ResponseError(
1503 _('unexpected response from remote server:'), l)
1505 _('unexpected response from remote server:'), l)
1504 if repo.ui.debugflag:
1506 if repo.ui.debugflag:
1505 repo.ui.debug('adding %s (%s)\n' %
1507 repo.ui.debug('adding %s (%s)\n' %
1506 (name, util.bytecount(size)))
1508 (name, util.bytecount(size)))
1507 # for backwards compat, name was partially encoded
1509 # for backwards compat, name was partially encoded
1508 ofp = repo.svfs(store.decodedir(name), 'w')
1510 ofp = repo.svfs(store.decodedir(name), 'w')
1509 for chunk in util.filechunkiter(fp, limit=size):
1511 for chunk in util.filechunkiter(fp, limit=size):
1510 handled_bytes += len(chunk)
1512 handled_bytes += len(chunk)
1511 repo.ui.progress(_('clone'), handled_bytes,
1513 repo.ui.progress(_('clone'), handled_bytes,
1512 total=total_bytes)
1514 total=total_bytes)
1513 ofp.write(chunk)
1515 ofp.write(chunk)
1514 ofp.close()
1516 ofp.close()
1515 tr.close()
1517 tr.close()
1516 finally:
1518 finally:
1517 tr.release()
1519 tr.release()
1518
1520
1519 # Writing straight to files circumvented the inmemory caches
1521 # Writing straight to files circumvented the inmemory caches
1520 repo.invalidate()
1522 repo.invalidate()
1521
1523
1522 elapsed = time.time() - start
1524 elapsed = time.time() - start
1523 if elapsed <= 0:
1525 if elapsed <= 0:
1524 elapsed = 0.001
1526 elapsed = 0.001
1525 repo.ui.progress(_('clone'), None)
1527 repo.ui.progress(_('clone'), None)
1526 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1528 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1527 (util.bytecount(total_bytes), elapsed,
1529 (util.bytecount(total_bytes), elapsed,
1528 util.bytecount(total_bytes / elapsed)))
1530 util.bytecount(total_bytes / elapsed)))
1529 finally:
1531 finally:
1530 lock.release()
1532 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now