##// END OF EJS Templates
push: wrap local phase movement in a transaction...
Pierre-Yves David -
r22051:e894de23 default
parent child Browse files
Show More
@@ -1,965 +1,970
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno, urllib
10 import errno, urllib
11 import util, scmutil, changegroup, base85, error
11 import util, scmutil, changegroup, base85, error
12 import discovery, phases, obsolete, bookmarks, bundle2, pushkey
12 import discovery, phases, obsolete, bookmarks, bundle2, pushkey
13
13
14 def readbundle(ui, fh, fname, vfs=None):
14 def readbundle(ui, fh, fname, vfs=None):
15 header = changegroup.readexactly(fh, 4)
15 header = changegroup.readexactly(fh, 4)
16
16
17 alg = None
17 alg = None
18 if not fname:
18 if not fname:
19 fname = "stream"
19 fname = "stream"
20 if not header.startswith('HG') and header.startswith('\0'):
20 if not header.startswith('HG') and header.startswith('\0'):
21 fh = changegroup.headerlessfixup(fh, header)
21 fh = changegroup.headerlessfixup(fh, header)
22 header = "HG10"
22 header = "HG10"
23 alg = 'UN'
23 alg = 'UN'
24 elif vfs:
24 elif vfs:
25 fname = vfs.join(fname)
25 fname = vfs.join(fname)
26
26
27 magic, version = header[0:2], header[2:4]
27 magic, version = header[0:2], header[2:4]
28
28
29 if magic != 'HG':
29 if magic != 'HG':
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 if version == '10':
31 if version == '10':
32 if alg is None:
32 if alg is None:
33 alg = changegroup.readexactly(fh, 2)
33 alg = changegroup.readexactly(fh, 2)
34 return changegroup.unbundle10(fh, alg)
34 return changegroup.unbundle10(fh, alg)
35 elif version == '2X':
35 elif version == '2X':
36 return bundle2.unbundle20(ui, fh, header=magic + version)
36 return bundle2.unbundle20(ui, fh, header=magic + version)
37 else:
37 else:
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39
39
40
40
41 class pushoperation(object):
41 class pushoperation(object):
42 """A object that represent a single push operation
42 """A object that represent a single push operation
43
43
44 It purpose is to carry push related state and very common operation.
44 It purpose is to carry push related state and very common operation.
45
45
46 A new should be created at the beginning of each push and discarded
46 A new should be created at the beginning of each push and discarded
47 afterward.
47 afterward.
48 """
48 """
49
49
50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
51 # repo we push from
51 # repo we push from
52 self.repo = repo
52 self.repo = repo
53 self.ui = repo.ui
53 self.ui = repo.ui
54 # repo we push to
54 # repo we push to
55 self.remote = remote
55 self.remote = remote
56 # force option provided
56 # force option provided
57 self.force = force
57 self.force = force
58 # revs to be pushed (None is "all")
58 # revs to be pushed (None is "all")
59 self.revs = revs
59 self.revs = revs
60 # allow push of new branch
60 # allow push of new branch
61 self.newbranch = newbranch
61 self.newbranch = newbranch
62 # did a local lock get acquired?
62 # did a local lock get acquired?
63 self.locallocked = None
63 self.locallocked = None
64 # step already performed
64 # step already performed
65 # (used to check what steps have been already performed through bundle2)
65 # (used to check what steps have been already performed through bundle2)
66 self.stepsdone = set()
66 self.stepsdone = set()
67 # Integer version of the push result
67 # Integer version of the push result
68 # - None means nothing to push
68 # - None means nothing to push
69 # - 0 means HTTP error
69 # - 0 means HTTP error
70 # - 1 means we pushed and remote head count is unchanged *or*
70 # - 1 means we pushed and remote head count is unchanged *or*
71 # we have outgoing changesets but refused to push
71 # we have outgoing changesets but refused to push
72 # - other values as described by addchangegroup()
72 # - other values as described by addchangegroup()
73 self.ret = None
73 self.ret = None
74 # discover.outgoing object (contains common and outgoing data)
74 # discover.outgoing object (contains common and outgoing data)
75 self.outgoing = None
75 self.outgoing = None
76 # all remote heads before the push
76 # all remote heads before the push
77 self.remoteheads = None
77 self.remoteheads = None
78 # testable as a boolean indicating if any nodes are missing locally.
78 # testable as a boolean indicating if any nodes are missing locally.
79 self.incoming = None
79 self.incoming = None
80 # phases changes that must be pushed along side the changesets
80 # phases changes that must be pushed along side the changesets
81 self.outdatedphases = None
81 self.outdatedphases = None
82 # phases changes that must be pushed if changeset push fails
82 # phases changes that must be pushed if changeset push fails
83 self.fallbackoutdatedphases = None
83 self.fallbackoutdatedphases = None
84 # outgoing obsmarkers
84 # outgoing obsmarkers
85 self.outobsmarkers = set()
85 self.outobsmarkers = set()
86
86
87 @util.propertycache
87 @util.propertycache
88 def futureheads(self):
88 def futureheads(self):
89 """future remote heads if the changeset push succeeds"""
89 """future remote heads if the changeset push succeeds"""
90 return self.outgoing.missingheads
90 return self.outgoing.missingheads
91
91
92 @util.propertycache
92 @util.propertycache
93 def fallbackheads(self):
93 def fallbackheads(self):
94 """future remote heads if the changeset push fails"""
94 """future remote heads if the changeset push fails"""
95 if self.revs is None:
95 if self.revs is None:
96 # not target to push, all common are relevant
96 # not target to push, all common are relevant
97 return self.outgoing.commonheads
97 return self.outgoing.commonheads
98 unfi = self.repo.unfiltered()
98 unfi = self.repo.unfiltered()
99 # I want cheads = heads(::missingheads and ::commonheads)
99 # I want cheads = heads(::missingheads and ::commonheads)
100 # (missingheads is revs with secret changeset filtered out)
100 # (missingheads is revs with secret changeset filtered out)
101 #
101 #
102 # This can be expressed as:
102 # This can be expressed as:
103 # cheads = ( (missingheads and ::commonheads)
103 # cheads = ( (missingheads and ::commonheads)
104 # + (commonheads and ::missingheads))"
104 # + (commonheads and ::missingheads))"
105 # )
105 # )
106 #
106 #
107 # while trying to push we already computed the following:
107 # while trying to push we already computed the following:
108 # common = (::commonheads)
108 # common = (::commonheads)
109 # missing = ((commonheads::missingheads) - commonheads)
109 # missing = ((commonheads::missingheads) - commonheads)
110 #
110 #
111 # We can pick:
111 # We can pick:
112 # * missingheads part of common (::commonheads)
112 # * missingheads part of common (::commonheads)
113 common = set(self.outgoing.common)
113 common = set(self.outgoing.common)
114 nm = self.repo.changelog.nodemap
114 nm = self.repo.changelog.nodemap
115 cheads = [node for node in self.revs if nm[node] in common]
115 cheads = [node for node in self.revs if nm[node] in common]
116 # and
116 # and
117 # * commonheads parents on missing
117 # * commonheads parents on missing
118 revset = unfi.set('%ln and parents(roots(%ln))',
118 revset = unfi.set('%ln and parents(roots(%ln))',
119 self.outgoing.commonheads,
119 self.outgoing.commonheads,
120 self.outgoing.missing)
120 self.outgoing.missing)
121 cheads.extend(c.node() for c in revset)
121 cheads.extend(c.node() for c in revset)
122 return cheads
122 return cheads
123
123
124 @property
124 @property
125 def commonheads(self):
125 def commonheads(self):
126 """set of all common heads after changeset bundle push"""
126 """set of all common heads after changeset bundle push"""
127 if self.ret:
127 if self.ret:
128 return self.futureheads
128 return self.futureheads
129 else:
129 else:
130 return self.fallbackheads
130 return self.fallbackheads
131
131
132 def push(repo, remote, force=False, revs=None, newbranch=False):
132 def push(repo, remote, force=False, revs=None, newbranch=False):
133 '''Push outgoing changesets (limited by revs) from a local
133 '''Push outgoing changesets (limited by revs) from a local
134 repository to remote. Return an integer:
134 repository to remote. Return an integer:
135 - None means nothing to push
135 - None means nothing to push
136 - 0 means HTTP error
136 - 0 means HTTP error
137 - 1 means we pushed and remote head count is unchanged *or*
137 - 1 means we pushed and remote head count is unchanged *or*
138 we have outgoing changesets but refused to push
138 we have outgoing changesets but refused to push
139 - other values as described by addchangegroup()
139 - other values as described by addchangegroup()
140 '''
140 '''
141 pushop = pushoperation(repo, remote, force, revs, newbranch)
141 pushop = pushoperation(repo, remote, force, revs, newbranch)
142 if pushop.remote.local():
142 if pushop.remote.local():
143 missing = (set(pushop.repo.requirements)
143 missing = (set(pushop.repo.requirements)
144 - pushop.remote.local().supported)
144 - pushop.remote.local().supported)
145 if missing:
145 if missing:
146 msg = _("required features are not"
146 msg = _("required features are not"
147 " supported in the destination:"
147 " supported in the destination:"
148 " %s") % (', '.join(sorted(missing)))
148 " %s") % (', '.join(sorted(missing)))
149 raise util.Abort(msg)
149 raise util.Abort(msg)
150
150
151 # there are two ways to push to remote repo:
151 # there are two ways to push to remote repo:
152 #
152 #
153 # addchangegroup assumes local user can lock remote
153 # addchangegroup assumes local user can lock remote
154 # repo (local filesystem, old ssh servers).
154 # repo (local filesystem, old ssh servers).
155 #
155 #
156 # unbundle assumes local user cannot lock remote repo (new ssh
156 # unbundle assumes local user cannot lock remote repo (new ssh
157 # servers, http servers).
157 # servers, http servers).
158
158
159 if not pushop.remote.canpush():
159 if not pushop.remote.canpush():
160 raise util.Abort(_("destination does not support push"))
160 raise util.Abort(_("destination does not support push"))
161 # get local lock as we might write phase data
161 # get local lock as we might write phase data
162 locallock = None
162 locallock = None
163 try:
163 try:
164 locallock = pushop.repo.lock()
164 locallock = pushop.repo.lock()
165 pushop.locallocked = True
165 pushop.locallocked = True
166 except IOError, err:
166 except IOError, err:
167 pushop.locallocked = False
167 pushop.locallocked = False
168 if err.errno != errno.EACCES:
168 if err.errno != errno.EACCES:
169 raise
169 raise
170 # source repo cannot be locked.
170 # source repo cannot be locked.
171 # We do not abort the push, but just disable the local phase
171 # We do not abort the push, but just disable the local phase
172 # synchronisation.
172 # synchronisation.
173 msg = 'cannot lock source repository: %s\n' % err
173 msg = 'cannot lock source repository: %s\n' % err
174 pushop.ui.debug(msg)
174 pushop.ui.debug(msg)
175 try:
175 try:
176 pushop.repo.checkpush(pushop)
176 pushop.repo.checkpush(pushop)
177 lock = None
177 lock = None
178 unbundle = pushop.remote.capable('unbundle')
178 unbundle = pushop.remote.capable('unbundle')
179 if not unbundle:
179 if not unbundle:
180 lock = pushop.remote.lock()
180 lock = pushop.remote.lock()
181 try:
181 try:
182 _pushdiscovery(pushop)
182 _pushdiscovery(pushop)
183 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
183 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
184 False)
184 False)
185 and pushop.remote.capable('bundle2-exp')):
185 and pushop.remote.capable('bundle2-exp')):
186 _pushbundle2(pushop)
186 _pushbundle2(pushop)
187 _pushchangeset(pushop)
187 _pushchangeset(pushop)
188 _pushsyncphase(pushop)
188 _pushsyncphase(pushop)
189 _pushobsolete(pushop)
189 _pushobsolete(pushop)
190 finally:
190 finally:
191 if lock is not None:
191 if lock is not None:
192 lock.release()
192 lock.release()
193 finally:
193 finally:
194 if locallock is not None:
194 if locallock is not None:
195 locallock.release()
195 locallock.release()
196
196
197 _pushbookmark(pushop)
197 _pushbookmark(pushop)
198 return pushop.ret
198 return pushop.ret
199
199
200 # list of steps to perform discovery before push
200 # list of steps to perform discovery before push
201 pushdiscoveryorder = []
201 pushdiscoveryorder = []
202
202
203 # Mapping between step name and function
203 # Mapping between step name and function
204 #
204 #
205 # This exists to help extensions wrap steps if necessary
205 # This exists to help extensions wrap steps if necessary
206 pushdiscoverymapping = {}
206 pushdiscoverymapping = {}
207
207
208 def pushdiscovery(stepname):
208 def pushdiscovery(stepname):
209 """decorator for function performing discovery before push
209 """decorator for function performing discovery before push
210
210
211 The function is added to the step -> function mapping and appended to the
211 The function is added to the step -> function mapping and appended to the
212 list of steps. Beware that decorated function will be added in order (this
212 list of steps. Beware that decorated function will be added in order (this
213 may matter).
213 may matter).
214
214
215 You can only use this decorator for a new step, if you want to wrap a step
215 You can only use this decorator for a new step, if you want to wrap a step
216 from an extension, change the pushdiscovery dictionary directly."""
216 from an extension, change the pushdiscovery dictionary directly."""
217 def dec(func):
217 def dec(func):
218 assert stepname not in pushdiscoverymapping
218 assert stepname not in pushdiscoverymapping
219 pushdiscoverymapping[stepname] = func
219 pushdiscoverymapping[stepname] = func
220 pushdiscoveryorder.append(stepname)
220 pushdiscoveryorder.append(stepname)
221 return func
221 return func
222 return dec
222 return dec
223
223
224
224
225
225
226 def _pushdiscovery(pushop):
226 def _pushdiscovery(pushop):
227 """Run all discovery steps"""
227 """Run all discovery steps"""
228 for stepname in pushdiscoveryorder:
228 for stepname in pushdiscoveryorder:
229 step = pushdiscoverymapping[stepname]
229 step = pushdiscoverymapping[stepname]
230 step(pushop)
230 step(pushop)
231
231
232 @pushdiscovery('changeset')
232 @pushdiscovery('changeset')
233 def _pushdiscoverychangeset(pushop):
233 def _pushdiscoverychangeset(pushop):
234 """discover the changeset that need to be pushed"""
234 """discover the changeset that need to be pushed"""
235 unfi = pushop.repo.unfiltered()
235 unfi = pushop.repo.unfiltered()
236 fci = discovery.findcommonincoming
236 fci = discovery.findcommonincoming
237 commoninc = fci(unfi, pushop.remote, force=pushop.force)
237 commoninc = fci(unfi, pushop.remote, force=pushop.force)
238 common, inc, remoteheads = commoninc
238 common, inc, remoteheads = commoninc
239 fco = discovery.findcommonoutgoing
239 fco = discovery.findcommonoutgoing
240 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
240 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
241 commoninc=commoninc, force=pushop.force)
241 commoninc=commoninc, force=pushop.force)
242 pushop.outgoing = outgoing
242 pushop.outgoing = outgoing
243 pushop.remoteheads = remoteheads
243 pushop.remoteheads = remoteheads
244 pushop.incoming = inc
244 pushop.incoming = inc
245
245
246 @pushdiscovery('phase')
246 @pushdiscovery('phase')
247 def _pushdiscoveryphase(pushop):
247 def _pushdiscoveryphase(pushop):
248 """discover the phase that needs to be pushed
248 """discover the phase that needs to be pushed
249
249
250 (computed for both success and failure case for changesets push)"""
250 (computed for both success and failure case for changesets push)"""
251 outgoing = pushop.outgoing
251 outgoing = pushop.outgoing
252 unfi = pushop.repo.unfiltered()
252 unfi = pushop.repo.unfiltered()
253 remotephases = pushop.remote.listkeys('phases')
253 remotephases = pushop.remote.listkeys('phases')
254 publishing = remotephases.get('publishing', False)
254 publishing = remotephases.get('publishing', False)
255 ana = phases.analyzeremotephases(pushop.repo,
255 ana = phases.analyzeremotephases(pushop.repo,
256 pushop.fallbackheads,
256 pushop.fallbackheads,
257 remotephases)
257 remotephases)
258 pheads, droots = ana
258 pheads, droots = ana
259 extracond = ''
259 extracond = ''
260 if not publishing:
260 if not publishing:
261 extracond = ' and public()'
261 extracond = ' and public()'
262 revset = 'heads((%%ln::%%ln) %s)' % extracond
262 revset = 'heads((%%ln::%%ln) %s)' % extracond
263 # Get the list of all revs draft on remote by public here.
263 # Get the list of all revs draft on remote by public here.
264 # XXX Beware that revset break if droots is not strictly
264 # XXX Beware that revset break if droots is not strictly
265 # XXX root we may want to ensure it is but it is costly
265 # XXX root we may want to ensure it is but it is costly
266 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
266 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
267 if not outgoing.missing:
267 if not outgoing.missing:
268 future = fallback
268 future = fallback
269 else:
269 else:
270 # adds changeset we are going to push as draft
270 # adds changeset we are going to push as draft
271 #
271 #
272 # should not be necessary for pushblishing server, but because of an
272 # should not be necessary for pushblishing server, but because of an
273 # issue fixed in xxxxx we have to do it anyway.
273 # issue fixed in xxxxx we have to do it anyway.
274 fdroots = list(unfi.set('roots(%ln + %ln::)',
274 fdroots = list(unfi.set('roots(%ln + %ln::)',
275 outgoing.missing, droots))
275 outgoing.missing, droots))
276 fdroots = [f.node() for f in fdroots]
276 fdroots = [f.node() for f in fdroots]
277 future = list(unfi.set(revset, fdroots, pushop.futureheads))
277 future = list(unfi.set(revset, fdroots, pushop.futureheads))
278 pushop.outdatedphases = future
278 pushop.outdatedphases = future
279 pushop.fallbackoutdatedphases = fallback
279 pushop.fallbackoutdatedphases = fallback
280
280
281 @pushdiscovery('obsmarker')
281 @pushdiscovery('obsmarker')
282 def _pushdiscoveryobsmarkers(pushop):
282 def _pushdiscoveryobsmarkers(pushop):
283 pushop.outobsmarkers = pushop.repo.obsstore
283 pushop.outobsmarkers = pushop.repo.obsstore
284
284
285 def _pushcheckoutgoing(pushop):
285 def _pushcheckoutgoing(pushop):
286 outgoing = pushop.outgoing
286 outgoing = pushop.outgoing
287 unfi = pushop.repo.unfiltered()
287 unfi = pushop.repo.unfiltered()
288 if not outgoing.missing:
288 if not outgoing.missing:
289 # nothing to push
289 # nothing to push
290 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
290 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
291 return False
291 return False
292 # something to push
292 # something to push
293 if not pushop.force:
293 if not pushop.force:
294 # if repo.obsstore == False --> no obsolete
294 # if repo.obsstore == False --> no obsolete
295 # then, save the iteration
295 # then, save the iteration
296 if unfi.obsstore:
296 if unfi.obsstore:
297 # this message are here for 80 char limit reason
297 # this message are here for 80 char limit reason
298 mso = _("push includes obsolete changeset: %s!")
298 mso = _("push includes obsolete changeset: %s!")
299 mst = "push includes %s changeset: %s!"
299 mst = "push includes %s changeset: %s!"
300 # plain versions for i18n tool to detect them
300 # plain versions for i18n tool to detect them
301 _("push includes unstable changeset: %s!")
301 _("push includes unstable changeset: %s!")
302 _("push includes bumped changeset: %s!")
302 _("push includes bumped changeset: %s!")
303 _("push includes divergent changeset: %s!")
303 _("push includes divergent changeset: %s!")
304 # If we are to push if there is at least one
304 # If we are to push if there is at least one
305 # obsolete or unstable changeset in missing, at
305 # obsolete or unstable changeset in missing, at
306 # least one of the missinghead will be obsolete or
306 # least one of the missinghead will be obsolete or
307 # unstable. So checking heads only is ok
307 # unstable. So checking heads only is ok
308 for node in outgoing.missingheads:
308 for node in outgoing.missingheads:
309 ctx = unfi[node]
309 ctx = unfi[node]
310 if ctx.obsolete():
310 if ctx.obsolete():
311 raise util.Abort(mso % ctx)
311 raise util.Abort(mso % ctx)
312 elif ctx.troubled():
312 elif ctx.troubled():
313 raise util.Abort(_(mst)
313 raise util.Abort(_(mst)
314 % (ctx.troubles()[0],
314 % (ctx.troubles()[0],
315 ctx))
315 ctx))
316 newbm = pushop.ui.configlist('bookmarks', 'pushing')
316 newbm = pushop.ui.configlist('bookmarks', 'pushing')
317 discovery.checkheads(unfi, pushop.remote, outgoing,
317 discovery.checkheads(unfi, pushop.remote, outgoing,
318 pushop.remoteheads,
318 pushop.remoteheads,
319 pushop.newbranch,
319 pushop.newbranch,
320 bool(pushop.incoming),
320 bool(pushop.incoming),
321 newbm)
321 newbm)
322 return True
322 return True
323
323
324 # List of names of steps to perform for an outgoing bundle2, order matters.
324 # List of names of steps to perform for an outgoing bundle2, order matters.
325 b2partsgenorder = []
325 b2partsgenorder = []
326
326
327 # Mapping between step name and function
327 # Mapping between step name and function
328 #
328 #
329 # This exists to help extensions wrap steps if necessary
329 # This exists to help extensions wrap steps if necessary
330 b2partsgenmapping = {}
330 b2partsgenmapping = {}
331
331
332 def b2partsgenerator(stepname):
332 def b2partsgenerator(stepname):
333 """decorator for function generating bundle2 part
333 """decorator for function generating bundle2 part
334
334
335 The function is added to the step -> function mapping and appended to the
335 The function is added to the step -> function mapping and appended to the
336 list of steps. Beware that decorated functions will be added in order
336 list of steps. Beware that decorated functions will be added in order
337 (this may matter).
337 (this may matter).
338
338
339 You can only use this decorator for new steps, if you want to wrap a step
339 You can only use this decorator for new steps, if you want to wrap a step
340 from an extension, attack the b2partsgenmapping dictionary directly."""
340 from an extension, attack the b2partsgenmapping dictionary directly."""
341 def dec(func):
341 def dec(func):
342 assert stepname not in b2partsgenmapping
342 assert stepname not in b2partsgenmapping
343 b2partsgenmapping[stepname] = func
343 b2partsgenmapping[stepname] = func
344 b2partsgenorder.append(stepname)
344 b2partsgenorder.append(stepname)
345 return func
345 return func
346 return dec
346 return dec
347
347
348 @b2partsgenerator('changeset')
348 @b2partsgenerator('changeset')
349 def _pushb2ctx(pushop, bundler):
349 def _pushb2ctx(pushop, bundler):
350 """handle changegroup push through bundle2
350 """handle changegroup push through bundle2
351
351
352 addchangegroup result is stored in the ``pushop.ret`` attribute.
352 addchangegroup result is stored in the ``pushop.ret`` attribute.
353 """
353 """
354 if 'changesets' in pushop.stepsdone:
354 if 'changesets' in pushop.stepsdone:
355 return
355 return
356 pushop.stepsdone.add('changesets')
356 pushop.stepsdone.add('changesets')
357 # Send known heads to the server for race detection.
357 # Send known heads to the server for race detection.
358 pushop.stepsdone.add('changesets')
358 pushop.stepsdone.add('changesets')
359 if not _pushcheckoutgoing(pushop):
359 if not _pushcheckoutgoing(pushop):
360 return
360 return
361 pushop.repo.prepushoutgoinghooks(pushop.repo,
361 pushop.repo.prepushoutgoinghooks(pushop.repo,
362 pushop.remote,
362 pushop.remote,
363 pushop.outgoing)
363 pushop.outgoing)
364 if not pushop.force:
364 if not pushop.force:
365 bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads))
365 bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads))
366 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
366 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
367 cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks())
367 cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks())
368 def handlereply(op):
368 def handlereply(op):
369 """extract addchangroup returns from server reply"""
369 """extract addchangroup returns from server reply"""
370 cgreplies = op.records.getreplies(cgpart.id)
370 cgreplies = op.records.getreplies(cgpart.id)
371 assert len(cgreplies['changegroup']) == 1
371 assert len(cgreplies['changegroup']) == 1
372 pushop.ret = cgreplies['changegroup'][0]['return']
372 pushop.ret = cgreplies['changegroup'][0]['return']
373 return handlereply
373 return handlereply
374
374
375 @b2partsgenerator('phase')
375 @b2partsgenerator('phase')
376 def _pushb2phases(pushop, bundler):
376 def _pushb2phases(pushop, bundler):
377 """handle phase push through bundle2"""
377 """handle phase push through bundle2"""
378 if 'phases' in pushop.stepsdone:
378 if 'phases' in pushop.stepsdone:
379 return
379 return
380 b2caps = bundle2.bundle2caps(pushop.remote)
380 b2caps = bundle2.bundle2caps(pushop.remote)
381 if not 'b2x:pushkey' in b2caps:
381 if not 'b2x:pushkey' in b2caps:
382 return
382 return
383 pushop.stepsdone.add('phases')
383 pushop.stepsdone.add('phases')
384 part2node = []
384 part2node = []
385 enc = pushkey.encode
385 enc = pushkey.encode
386 for newremotehead in pushop.outdatedphases:
386 for newremotehead in pushop.outdatedphases:
387 part = bundler.newpart('b2x:pushkey')
387 part = bundler.newpart('b2x:pushkey')
388 part.addparam('namespace', enc('phases'))
388 part.addparam('namespace', enc('phases'))
389 part.addparam('key', enc(newremotehead.hex()))
389 part.addparam('key', enc(newremotehead.hex()))
390 part.addparam('old', enc(str(phases.draft)))
390 part.addparam('old', enc(str(phases.draft)))
391 part.addparam('new', enc(str(phases.public)))
391 part.addparam('new', enc(str(phases.public)))
392 part2node.append((part.id, newremotehead))
392 part2node.append((part.id, newremotehead))
393 def handlereply(op):
393 def handlereply(op):
394 for partid, node in part2node:
394 for partid, node in part2node:
395 partrep = op.records.getreplies(partid)
395 partrep = op.records.getreplies(partid)
396 results = partrep['pushkey']
396 results = partrep['pushkey']
397 assert len(results) <= 1
397 assert len(results) <= 1
398 msg = None
398 msg = None
399 if not results:
399 if not results:
400 msg = _('server ignored update of %s to public!\n') % node
400 msg = _('server ignored update of %s to public!\n') % node
401 elif not int(results[0]['return']):
401 elif not int(results[0]['return']):
402 msg = _('updating %s to public failed!\n') % node
402 msg = _('updating %s to public failed!\n') % node
403 if msg is not None:
403 if msg is not None:
404 pushop.ui.warn(msg)
404 pushop.ui.warn(msg)
405 return handlereply
405 return handlereply
406
406
407 def _pushbundle2(pushop):
407 def _pushbundle2(pushop):
408 """push data to the remote using bundle2
408 """push data to the remote using bundle2
409
409
410 The only currently supported type of data is changegroup but this will
410 The only currently supported type of data is changegroup but this will
411 evolve in the future."""
411 evolve in the future."""
412 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
412 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
413 # create reply capability
413 # create reply capability
414 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
414 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
415 bundler.newpart('b2x:replycaps', data=capsblob)
415 bundler.newpart('b2x:replycaps', data=capsblob)
416 replyhandlers = []
416 replyhandlers = []
417 for partgenname in b2partsgenorder:
417 for partgenname in b2partsgenorder:
418 partgen = b2partsgenmapping[partgenname]
418 partgen = b2partsgenmapping[partgenname]
419 ret = partgen(pushop, bundler)
419 ret = partgen(pushop, bundler)
420 if callable(ret):
420 if callable(ret):
421 replyhandlers.append(ret)
421 replyhandlers.append(ret)
422 # do not push if nothing to push
422 # do not push if nothing to push
423 if bundler.nbparts <= 1:
423 if bundler.nbparts <= 1:
424 return
424 return
425 stream = util.chunkbuffer(bundler.getchunks())
425 stream = util.chunkbuffer(bundler.getchunks())
426 try:
426 try:
427 reply = pushop.remote.unbundle(stream, ['force'], 'push')
427 reply = pushop.remote.unbundle(stream, ['force'], 'push')
428 except error.BundleValueError, exc:
428 except error.BundleValueError, exc:
429 raise util.Abort('missing support for %s' % exc)
429 raise util.Abort('missing support for %s' % exc)
430 try:
430 try:
431 op = bundle2.processbundle(pushop.repo, reply)
431 op = bundle2.processbundle(pushop.repo, reply)
432 except error.BundleValueError, exc:
432 except error.BundleValueError, exc:
433 raise util.Abort('missing support for %s' % exc)
433 raise util.Abort('missing support for %s' % exc)
434 for rephand in replyhandlers:
434 for rephand in replyhandlers:
435 rephand(op)
435 rephand(op)
436
436
437 def _pushchangeset(pushop):
437 def _pushchangeset(pushop):
438 """Make the actual push of changeset bundle to remote repo"""
438 """Make the actual push of changeset bundle to remote repo"""
439 if 'changesets' in pushop.stepsdone:
439 if 'changesets' in pushop.stepsdone:
440 return
440 return
441 pushop.stepsdone.add('changesets')
441 pushop.stepsdone.add('changesets')
442 if not _pushcheckoutgoing(pushop):
442 if not _pushcheckoutgoing(pushop):
443 return
443 return
444 pushop.repo.prepushoutgoinghooks(pushop.repo,
444 pushop.repo.prepushoutgoinghooks(pushop.repo,
445 pushop.remote,
445 pushop.remote,
446 pushop.outgoing)
446 pushop.outgoing)
447 outgoing = pushop.outgoing
447 outgoing = pushop.outgoing
448 unbundle = pushop.remote.capable('unbundle')
448 unbundle = pushop.remote.capable('unbundle')
449 # TODO: get bundlecaps from remote
449 # TODO: get bundlecaps from remote
450 bundlecaps = None
450 bundlecaps = None
451 # create a changegroup from local
451 # create a changegroup from local
452 if pushop.revs is None and not (outgoing.excluded
452 if pushop.revs is None and not (outgoing.excluded
453 or pushop.repo.changelog.filteredrevs):
453 or pushop.repo.changelog.filteredrevs):
454 # push everything,
454 # push everything,
455 # use the fast path, no race possible on push
455 # use the fast path, no race possible on push
456 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
456 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
457 cg = changegroup.getsubset(pushop.repo,
457 cg = changegroup.getsubset(pushop.repo,
458 outgoing,
458 outgoing,
459 bundler,
459 bundler,
460 'push',
460 'push',
461 fastpath=True)
461 fastpath=True)
462 else:
462 else:
463 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
463 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
464 bundlecaps)
464 bundlecaps)
465
465
466 # apply changegroup to remote
466 # apply changegroup to remote
467 if unbundle:
467 if unbundle:
468 # local repo finds heads on server, finds out what
468 # local repo finds heads on server, finds out what
469 # revs it must push. once revs transferred, if server
469 # revs it must push. once revs transferred, if server
470 # finds it has different heads (someone else won
470 # finds it has different heads (someone else won
471 # commit/push race), server aborts.
471 # commit/push race), server aborts.
472 if pushop.force:
472 if pushop.force:
473 remoteheads = ['force']
473 remoteheads = ['force']
474 else:
474 else:
475 remoteheads = pushop.remoteheads
475 remoteheads = pushop.remoteheads
476 # ssh: return remote's addchangegroup()
476 # ssh: return remote's addchangegroup()
477 # http: return remote's addchangegroup() or 0 for error
477 # http: return remote's addchangegroup() or 0 for error
478 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
478 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
479 pushop.repo.url())
479 pushop.repo.url())
480 else:
480 else:
481 # we return an integer indicating remote head count
481 # we return an integer indicating remote head count
482 # change
482 # change
483 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
483 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
484
484
485 def _pushsyncphase(pushop):
485 def _pushsyncphase(pushop):
486 """synchronise phase information locally and remotely"""
486 """synchronise phase information locally and remotely"""
487 cheads = pushop.commonheads
487 cheads = pushop.commonheads
488 # even when we don't push, exchanging phase data is useful
488 # even when we don't push, exchanging phase data is useful
489 remotephases = pushop.remote.listkeys('phases')
489 remotephases = pushop.remote.listkeys('phases')
490 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
490 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
491 and remotephases # server supports phases
491 and remotephases # server supports phases
492 and pushop.ret is None # nothing was pushed
492 and pushop.ret is None # nothing was pushed
493 and remotephases.get('publishing', False)):
493 and remotephases.get('publishing', False)):
494 # When:
494 # When:
495 # - this is a subrepo push
495 # - this is a subrepo push
496 # - and remote support phase
496 # - and remote support phase
497 # - and no changeset was pushed
497 # - and no changeset was pushed
498 # - and remote is publishing
498 # - and remote is publishing
499 # We may be in issue 3871 case!
499 # We may be in issue 3871 case!
500 # We drop the possible phase synchronisation done by
500 # We drop the possible phase synchronisation done by
501 # courtesy to publish changesets possibly locally draft
501 # courtesy to publish changesets possibly locally draft
502 # on the remote.
502 # on the remote.
503 remotephases = {'publishing': 'True'}
503 remotephases = {'publishing': 'True'}
504 if not remotephases: # old server or public only reply from non-publishing
504 if not remotephases: # old server or public only reply from non-publishing
505 _localphasemove(pushop, cheads)
505 _localphasemove(pushop, cheads)
506 # don't push any phase data as there is nothing to push
506 # don't push any phase data as there is nothing to push
507 else:
507 else:
508 ana = phases.analyzeremotephases(pushop.repo, cheads,
508 ana = phases.analyzeremotephases(pushop.repo, cheads,
509 remotephases)
509 remotephases)
510 pheads, droots = ana
510 pheads, droots = ana
511 ### Apply remote phase on local
511 ### Apply remote phase on local
512 if remotephases.get('publishing', False):
512 if remotephases.get('publishing', False):
513 _localphasemove(pushop, cheads)
513 _localphasemove(pushop, cheads)
514 else: # publish = False
514 else: # publish = False
515 _localphasemove(pushop, pheads)
515 _localphasemove(pushop, pheads)
516 _localphasemove(pushop, cheads, phases.draft)
516 _localphasemove(pushop, cheads, phases.draft)
517 ### Apply local phase on remote
517 ### Apply local phase on remote
518
518
519 if pushop.ret:
519 if pushop.ret:
520 if 'phases' in pushop.stepsdone:
520 if 'phases' in pushop.stepsdone:
521 # phases already pushed though bundle2
521 # phases already pushed though bundle2
522 return
522 return
523 outdated = pushop.outdatedphases
523 outdated = pushop.outdatedphases
524 else:
524 else:
525 outdated = pushop.fallbackoutdatedphases
525 outdated = pushop.fallbackoutdatedphases
526
526
527 pushop.stepsdone.add('phases')
527 pushop.stepsdone.add('phases')
528
528
529 # filter heads already turned public by the push
529 # filter heads already turned public by the push
530 outdated = [c for c in outdated if c.node() not in pheads]
530 outdated = [c for c in outdated if c.node() not in pheads]
531 b2caps = bundle2.bundle2caps(pushop.remote)
531 b2caps = bundle2.bundle2caps(pushop.remote)
532 if 'b2x:pushkey' in b2caps:
532 if 'b2x:pushkey' in b2caps:
533 # server supports bundle2, let's do a batched push through it
533 # server supports bundle2, let's do a batched push through it
534 #
534 #
535 # This will eventually be unified with the changesets bundle2 push
535 # This will eventually be unified with the changesets bundle2 push
536 bundler = bundle2.bundle20(pushop.ui, b2caps)
536 bundler = bundle2.bundle20(pushop.ui, b2caps)
537 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
537 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
538 bundler.newpart('b2x:replycaps', data=capsblob)
538 bundler.newpart('b2x:replycaps', data=capsblob)
539 part2node = []
539 part2node = []
540 enc = pushkey.encode
540 enc = pushkey.encode
541 for newremotehead in outdated:
541 for newremotehead in outdated:
542 part = bundler.newpart('b2x:pushkey')
542 part = bundler.newpart('b2x:pushkey')
543 part.addparam('namespace', enc('phases'))
543 part.addparam('namespace', enc('phases'))
544 part.addparam('key', enc(newremotehead.hex()))
544 part.addparam('key', enc(newremotehead.hex()))
545 part.addparam('old', enc(str(phases.draft)))
545 part.addparam('old', enc(str(phases.draft)))
546 part.addparam('new', enc(str(phases.public)))
546 part.addparam('new', enc(str(phases.public)))
547 part2node.append((part.id, newremotehead))
547 part2node.append((part.id, newremotehead))
548 stream = util.chunkbuffer(bundler.getchunks())
548 stream = util.chunkbuffer(bundler.getchunks())
549 try:
549 try:
550 reply = pushop.remote.unbundle(stream, ['force'], 'push')
550 reply = pushop.remote.unbundle(stream, ['force'], 'push')
551 op = bundle2.processbundle(pushop.repo, reply)
551 op = bundle2.processbundle(pushop.repo, reply)
552 except error.BundleValueError, exc:
552 except error.BundleValueError, exc:
553 raise util.Abort('missing support for %s' % exc)
553 raise util.Abort('missing support for %s' % exc)
554 for partid, node in part2node:
554 for partid, node in part2node:
555 partrep = op.records.getreplies(partid)
555 partrep = op.records.getreplies(partid)
556 results = partrep['pushkey']
556 results = partrep['pushkey']
557 assert len(results) <= 1
557 assert len(results) <= 1
558 msg = None
558 msg = None
559 if not results:
559 if not results:
560 msg = _('server ignored update of %s to public!\n') % node
560 msg = _('server ignored update of %s to public!\n') % node
561 elif not int(results[0]['return']):
561 elif not int(results[0]['return']):
562 msg = _('updating %s to public failed!\n') % node
562 msg = _('updating %s to public failed!\n') % node
563 if msg is not None:
563 if msg is not None:
564 pushop.ui.warn(msg)
564 pushop.ui.warn(msg)
565
565
566 else:
566 else:
567 # fallback to independant pushkey command
567 # fallback to independant pushkey command
568 for newremotehead in outdated:
568 for newremotehead in outdated:
569 r = pushop.remote.pushkey('phases',
569 r = pushop.remote.pushkey('phases',
570 newremotehead.hex(),
570 newremotehead.hex(),
571 str(phases.draft),
571 str(phases.draft),
572 str(phases.public))
572 str(phases.public))
573 if not r:
573 if not r:
574 pushop.ui.warn(_('updating %s to public failed!\n')
574 pushop.ui.warn(_('updating %s to public failed!\n')
575 % newremotehead)
575 % newremotehead)
576
576
577 def _localphasemove(pushop, nodes, phase=phases.public):
577 def _localphasemove(pushop, nodes, phase=phases.public):
578 """move <nodes> to <phase> in the local source repo"""
578 """move <nodes> to <phase> in the local source repo"""
579 if pushop.locallocked:
579 if pushop.locallocked:
580 phases.advanceboundary(pushop.repo, phase, nodes)
580 tr = pushop.repo.transaction('push-phase-sync')
581 try:
582 phases.advanceboundary(pushop.repo, phase, nodes)
583 tr.close()
584 finally:
585 tr.release()
581 else:
586 else:
582 # repo is not locked, do not change any phases!
587 # repo is not locked, do not change any phases!
583 # Informs the user that phases should have been moved when
588 # Informs the user that phases should have been moved when
584 # applicable.
589 # applicable.
585 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
590 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
586 phasestr = phases.phasenames[phase]
591 phasestr = phases.phasenames[phase]
587 if actualmoves:
592 if actualmoves:
588 pushop.ui.status(_('cannot lock source repo, skipping '
593 pushop.ui.status(_('cannot lock source repo, skipping '
589 'local %s phase update\n') % phasestr)
594 'local %s phase update\n') % phasestr)
590
595
591 def _pushobsolete(pushop):
596 def _pushobsolete(pushop):
592 """utility function to push obsolete markers to a remote"""
597 """utility function to push obsolete markers to a remote"""
593 if 'obsmarkers' in pushop.stepsdone:
598 if 'obsmarkers' in pushop.stepsdone:
594 return
599 return
595 pushop.ui.debug('try to push obsolete markers to remote\n')
600 pushop.ui.debug('try to push obsolete markers to remote\n')
596 repo = pushop.repo
601 repo = pushop.repo
597 remote = pushop.remote
602 remote = pushop.remote
598 pushop.stepsdone.add('obsmarkers')
603 pushop.stepsdone.add('obsmarkers')
599 if (obsolete._enabled and repo.obsstore and
604 if (obsolete._enabled and repo.obsstore and
600 'obsolete' in remote.listkeys('namespaces')):
605 'obsolete' in remote.listkeys('namespaces')):
601 rslts = []
606 rslts = []
602 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
607 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
603 for key in sorted(remotedata, reverse=True):
608 for key in sorted(remotedata, reverse=True):
604 # reverse sort to ensure we end with dump0
609 # reverse sort to ensure we end with dump0
605 data = remotedata[key]
610 data = remotedata[key]
606 rslts.append(remote.pushkey('obsolete', key, '', data))
611 rslts.append(remote.pushkey('obsolete', key, '', data))
607 if [r for r in rslts if not r]:
612 if [r for r in rslts if not r]:
608 msg = _('failed to push some obsolete markers!\n')
613 msg = _('failed to push some obsolete markers!\n')
609 repo.ui.warn(msg)
614 repo.ui.warn(msg)
610
615
611 def _pushbookmark(pushop):
616 def _pushbookmark(pushop):
612 """Update bookmark position on remote"""
617 """Update bookmark position on remote"""
613 ui = pushop.ui
618 ui = pushop.ui
614 repo = pushop.repo.unfiltered()
619 repo = pushop.repo.unfiltered()
615 remote = pushop.remote
620 remote = pushop.remote
616 ui.debug("checking for updated bookmarks\n")
621 ui.debug("checking for updated bookmarks\n")
617 revnums = map(repo.changelog.rev, pushop.revs or [])
622 revnums = map(repo.changelog.rev, pushop.revs or [])
618 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
623 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
619 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
624 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
620 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
625 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
621 srchex=hex)
626 srchex=hex)
622
627
623 for b, scid, dcid in advsrc:
628 for b, scid, dcid in advsrc:
624 if ancestors and repo[scid].rev() not in ancestors:
629 if ancestors and repo[scid].rev() not in ancestors:
625 continue
630 continue
626 if remote.pushkey('bookmarks', b, dcid, scid):
631 if remote.pushkey('bookmarks', b, dcid, scid):
627 ui.status(_("updating bookmark %s\n") % b)
632 ui.status(_("updating bookmark %s\n") % b)
628 else:
633 else:
629 ui.warn(_('updating bookmark %s failed!\n') % b)
634 ui.warn(_('updating bookmark %s failed!\n') % b)
630
635
631 class pulloperation(object):
636 class pulloperation(object):
632 """A object that represent a single pull operation
637 """A object that represent a single pull operation
633
638
634 It purpose is to carry push related state and very common operation.
639 It purpose is to carry push related state and very common operation.
635
640
636 A new should be created at the beginning of each pull and discarded
641 A new should be created at the beginning of each pull and discarded
637 afterward.
642 afterward.
638 """
643 """
639
644
640 def __init__(self, repo, remote, heads=None, force=False):
645 def __init__(self, repo, remote, heads=None, force=False):
641 # repo we pull into
646 # repo we pull into
642 self.repo = repo
647 self.repo = repo
643 # repo we pull from
648 # repo we pull from
644 self.remote = remote
649 self.remote = remote
645 # revision we try to pull (None is "all")
650 # revision we try to pull (None is "all")
646 self.heads = heads
651 self.heads = heads
647 # do we force pull?
652 # do we force pull?
648 self.force = force
653 self.force = force
649 # the name the pull transaction
654 # the name the pull transaction
650 self._trname = 'pull\n' + util.hidepassword(remote.url())
655 self._trname = 'pull\n' + util.hidepassword(remote.url())
651 # hold the transaction once created
656 # hold the transaction once created
652 self._tr = None
657 self._tr = None
653 # set of common changeset between local and remote before pull
658 # set of common changeset between local and remote before pull
654 self.common = None
659 self.common = None
655 # set of pulled head
660 # set of pulled head
656 self.rheads = None
661 self.rheads = None
657 # list of missing changeset to fetch remotely
662 # list of missing changeset to fetch remotely
658 self.fetch = None
663 self.fetch = None
659 # result of changegroup pulling (used as return code by pull)
664 # result of changegroup pulling (used as return code by pull)
660 self.cgresult = None
665 self.cgresult = None
661 # list of step remaining todo (related to future bundle2 usage)
666 # list of step remaining todo (related to future bundle2 usage)
662 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
667 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
663
668
664 @util.propertycache
669 @util.propertycache
665 def pulledsubset(self):
670 def pulledsubset(self):
666 """heads of the set of changeset target by the pull"""
671 """heads of the set of changeset target by the pull"""
667 # compute target subset
672 # compute target subset
668 if self.heads is None:
673 if self.heads is None:
669 # We pulled every thing possible
674 # We pulled every thing possible
670 # sync on everything common
675 # sync on everything common
671 c = set(self.common)
676 c = set(self.common)
672 ret = list(self.common)
677 ret = list(self.common)
673 for n in self.rheads:
678 for n in self.rheads:
674 if n not in c:
679 if n not in c:
675 ret.append(n)
680 ret.append(n)
676 return ret
681 return ret
677 else:
682 else:
678 # We pulled a specific subset
683 # We pulled a specific subset
679 # sync on this subset
684 # sync on this subset
680 return self.heads
685 return self.heads
681
686
682 def gettransaction(self):
687 def gettransaction(self):
683 """get appropriate pull transaction, creating it if needed"""
688 """get appropriate pull transaction, creating it if needed"""
684 if self._tr is None:
689 if self._tr is None:
685 self._tr = self.repo.transaction(self._trname)
690 self._tr = self.repo.transaction(self._trname)
686 return self._tr
691 return self._tr
687
692
688 def closetransaction(self):
693 def closetransaction(self):
689 """close transaction if created"""
694 """close transaction if created"""
690 if self._tr is not None:
695 if self._tr is not None:
691 self._tr.close()
696 self._tr.close()
692
697
693 def releasetransaction(self):
698 def releasetransaction(self):
694 """release transaction if created"""
699 """release transaction if created"""
695 if self._tr is not None:
700 if self._tr is not None:
696 self._tr.release()
701 self._tr.release()
697
702
698 def pull(repo, remote, heads=None, force=False):
703 def pull(repo, remote, heads=None, force=False):
699 pullop = pulloperation(repo, remote, heads, force)
704 pullop = pulloperation(repo, remote, heads, force)
700 if pullop.remote.local():
705 if pullop.remote.local():
701 missing = set(pullop.remote.requirements) - pullop.repo.supported
706 missing = set(pullop.remote.requirements) - pullop.repo.supported
702 if missing:
707 if missing:
703 msg = _("required features are not"
708 msg = _("required features are not"
704 " supported in the destination:"
709 " supported in the destination:"
705 " %s") % (', '.join(sorted(missing)))
710 " %s") % (', '.join(sorted(missing)))
706 raise util.Abort(msg)
711 raise util.Abort(msg)
707
712
708 lock = pullop.repo.lock()
713 lock = pullop.repo.lock()
709 try:
714 try:
710 _pulldiscovery(pullop)
715 _pulldiscovery(pullop)
711 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
716 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
712 and pullop.remote.capable('bundle2-exp')):
717 and pullop.remote.capable('bundle2-exp')):
713 _pullbundle2(pullop)
718 _pullbundle2(pullop)
714 if 'changegroup' in pullop.todosteps:
719 if 'changegroup' in pullop.todosteps:
715 _pullchangeset(pullop)
720 _pullchangeset(pullop)
716 if 'phases' in pullop.todosteps:
721 if 'phases' in pullop.todosteps:
717 _pullphase(pullop)
722 _pullphase(pullop)
718 if 'obsmarkers' in pullop.todosteps:
723 if 'obsmarkers' in pullop.todosteps:
719 _pullobsolete(pullop)
724 _pullobsolete(pullop)
720 pullop.closetransaction()
725 pullop.closetransaction()
721 finally:
726 finally:
722 pullop.releasetransaction()
727 pullop.releasetransaction()
723 lock.release()
728 lock.release()
724
729
725 return pullop.cgresult
730 return pullop.cgresult
726
731
727 def _pulldiscovery(pullop):
732 def _pulldiscovery(pullop):
728 """discovery phase for the pull
733 """discovery phase for the pull
729
734
730 Current handle changeset discovery only, will change handle all discovery
735 Current handle changeset discovery only, will change handle all discovery
731 at some point."""
736 at some point."""
732 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
737 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
733 pullop.remote,
738 pullop.remote,
734 heads=pullop.heads,
739 heads=pullop.heads,
735 force=pullop.force)
740 force=pullop.force)
736 pullop.common, pullop.fetch, pullop.rheads = tmp
741 pullop.common, pullop.fetch, pullop.rheads = tmp
737
742
738 def _pullbundle2(pullop):
743 def _pullbundle2(pullop):
739 """pull data using bundle2
744 """pull data using bundle2
740
745
741 For now, the only supported data are changegroup."""
746 For now, the only supported data are changegroup."""
742 remotecaps = bundle2.bundle2caps(pullop.remote)
747 remotecaps = bundle2.bundle2caps(pullop.remote)
743 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
748 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
744 # pulling changegroup
749 # pulling changegroup
745 pullop.todosteps.remove('changegroup')
750 pullop.todosteps.remove('changegroup')
746
751
747 kwargs['common'] = pullop.common
752 kwargs['common'] = pullop.common
748 kwargs['heads'] = pullop.heads or pullop.rheads
753 kwargs['heads'] = pullop.heads or pullop.rheads
749 if 'b2x:listkeys' in remotecaps:
754 if 'b2x:listkeys' in remotecaps:
750 kwargs['listkeys'] = ['phase']
755 kwargs['listkeys'] = ['phase']
751 if not pullop.fetch:
756 if not pullop.fetch:
752 pullop.repo.ui.status(_("no changes found\n"))
757 pullop.repo.ui.status(_("no changes found\n"))
753 pullop.cgresult = 0
758 pullop.cgresult = 0
754 else:
759 else:
755 if pullop.heads is None and list(pullop.common) == [nullid]:
760 if pullop.heads is None and list(pullop.common) == [nullid]:
756 pullop.repo.ui.status(_("requesting all changes\n"))
761 pullop.repo.ui.status(_("requesting all changes\n"))
757 _pullbundle2extraprepare(pullop, kwargs)
762 _pullbundle2extraprepare(pullop, kwargs)
758 if kwargs.keys() == ['format']:
763 if kwargs.keys() == ['format']:
759 return # nothing to pull
764 return # nothing to pull
760 bundle = pullop.remote.getbundle('pull', **kwargs)
765 bundle = pullop.remote.getbundle('pull', **kwargs)
761 try:
766 try:
762 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
767 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
763 except error.BundleValueError, exc:
768 except error.BundleValueError, exc:
764 raise util.Abort('missing support for %s' % exc)
769 raise util.Abort('missing support for %s' % exc)
765
770
766 if pullop.fetch:
771 if pullop.fetch:
767 assert len(op.records['changegroup']) == 1
772 assert len(op.records['changegroup']) == 1
768 pullop.cgresult = op.records['changegroup'][0]['return']
773 pullop.cgresult = op.records['changegroup'][0]['return']
769
774
770 # processing phases change
775 # processing phases change
771 for namespace, value in op.records['listkeys']:
776 for namespace, value in op.records['listkeys']:
772 if namespace == 'phases':
777 if namespace == 'phases':
773 _pullapplyphases(pullop, value)
778 _pullapplyphases(pullop, value)
774
779
775 def _pullbundle2extraprepare(pullop, kwargs):
780 def _pullbundle2extraprepare(pullop, kwargs):
776 """hook function so that extensions can extend the getbundle call"""
781 """hook function so that extensions can extend the getbundle call"""
777 pass
782 pass
778
783
779 def _pullchangeset(pullop):
784 def _pullchangeset(pullop):
780 """pull changeset from unbundle into the local repo"""
785 """pull changeset from unbundle into the local repo"""
781 # We delay the open of the transaction as late as possible so we
786 # We delay the open of the transaction as late as possible so we
782 # don't open transaction for nothing or you break future useful
787 # don't open transaction for nothing or you break future useful
783 # rollback call
788 # rollback call
784 pullop.todosteps.remove('changegroup')
789 pullop.todosteps.remove('changegroup')
785 if not pullop.fetch:
790 if not pullop.fetch:
786 pullop.repo.ui.status(_("no changes found\n"))
791 pullop.repo.ui.status(_("no changes found\n"))
787 pullop.cgresult = 0
792 pullop.cgresult = 0
788 return
793 return
789 pullop.gettransaction()
794 pullop.gettransaction()
790 if pullop.heads is None and list(pullop.common) == [nullid]:
795 if pullop.heads is None and list(pullop.common) == [nullid]:
791 pullop.repo.ui.status(_("requesting all changes\n"))
796 pullop.repo.ui.status(_("requesting all changes\n"))
792 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
797 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
793 # issue1320, avoid a race if remote changed after discovery
798 # issue1320, avoid a race if remote changed after discovery
794 pullop.heads = pullop.rheads
799 pullop.heads = pullop.rheads
795
800
796 if pullop.remote.capable('getbundle'):
801 if pullop.remote.capable('getbundle'):
797 # TODO: get bundlecaps from remote
802 # TODO: get bundlecaps from remote
798 cg = pullop.remote.getbundle('pull', common=pullop.common,
803 cg = pullop.remote.getbundle('pull', common=pullop.common,
799 heads=pullop.heads or pullop.rheads)
804 heads=pullop.heads or pullop.rheads)
800 elif pullop.heads is None:
805 elif pullop.heads is None:
801 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
806 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
802 elif not pullop.remote.capable('changegroupsubset'):
807 elif not pullop.remote.capable('changegroupsubset'):
803 raise util.Abort(_("partial pull cannot be done because "
808 raise util.Abort(_("partial pull cannot be done because "
804 "other repository doesn't support "
809 "other repository doesn't support "
805 "changegroupsubset."))
810 "changegroupsubset."))
806 else:
811 else:
807 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
812 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
808 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
813 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
809 pullop.remote.url())
814 pullop.remote.url())
810
815
811 def _pullphase(pullop):
816 def _pullphase(pullop):
812 # Get remote phases data from remote
817 # Get remote phases data from remote
813 remotephases = pullop.remote.listkeys('phases')
818 remotephases = pullop.remote.listkeys('phases')
814 _pullapplyphases(pullop, remotephases)
819 _pullapplyphases(pullop, remotephases)
815
820
816 def _pullapplyphases(pullop, remotephases):
821 def _pullapplyphases(pullop, remotephases):
817 """apply phase movement from observed remote state"""
822 """apply phase movement from observed remote state"""
818 pullop.todosteps.remove('phases')
823 pullop.todosteps.remove('phases')
819 publishing = bool(remotephases.get('publishing', False))
824 publishing = bool(remotephases.get('publishing', False))
820 if remotephases and not publishing:
825 if remotephases and not publishing:
821 # remote is new and unpublishing
826 # remote is new and unpublishing
822 pheads, _dr = phases.analyzeremotephases(pullop.repo,
827 pheads, _dr = phases.analyzeremotephases(pullop.repo,
823 pullop.pulledsubset,
828 pullop.pulledsubset,
824 remotephases)
829 remotephases)
825 phases.advanceboundary(pullop.repo, phases.public, pheads)
830 phases.advanceboundary(pullop.repo, phases.public, pheads)
826 phases.advanceboundary(pullop.repo, phases.draft,
831 phases.advanceboundary(pullop.repo, phases.draft,
827 pullop.pulledsubset)
832 pullop.pulledsubset)
828 else:
833 else:
829 # Remote is old or publishing all common changesets
834 # Remote is old or publishing all common changesets
830 # should be seen as public
835 # should be seen as public
831 phases.advanceboundary(pullop.repo, phases.public,
836 phases.advanceboundary(pullop.repo, phases.public,
832 pullop.pulledsubset)
837 pullop.pulledsubset)
833
838
834 def _pullobsolete(pullop):
839 def _pullobsolete(pullop):
835 """utility function to pull obsolete markers from a remote
840 """utility function to pull obsolete markers from a remote
836
841
837 The `gettransaction` is function that return the pull transaction, creating
842 The `gettransaction` is function that return the pull transaction, creating
838 one if necessary. We return the transaction to inform the calling code that
843 one if necessary. We return the transaction to inform the calling code that
839 a new transaction have been created (when applicable).
844 a new transaction have been created (when applicable).
840
845
841 Exists mostly to allow overriding for experimentation purpose"""
846 Exists mostly to allow overriding for experimentation purpose"""
842 pullop.todosteps.remove('obsmarkers')
847 pullop.todosteps.remove('obsmarkers')
843 tr = None
848 tr = None
844 if obsolete._enabled:
849 if obsolete._enabled:
845 pullop.repo.ui.debug('fetching remote obsolete markers\n')
850 pullop.repo.ui.debug('fetching remote obsolete markers\n')
846 remoteobs = pullop.remote.listkeys('obsolete')
851 remoteobs = pullop.remote.listkeys('obsolete')
847 if 'dump0' in remoteobs:
852 if 'dump0' in remoteobs:
848 tr = pullop.gettransaction()
853 tr = pullop.gettransaction()
849 for key in sorted(remoteobs, reverse=True):
854 for key in sorted(remoteobs, reverse=True):
850 if key.startswith('dump'):
855 if key.startswith('dump'):
851 data = base85.b85decode(remoteobs[key])
856 data = base85.b85decode(remoteobs[key])
852 pullop.repo.obsstore.mergemarkers(tr, data)
857 pullop.repo.obsstore.mergemarkers(tr, data)
853 pullop.repo.invalidatevolatilesets()
858 pullop.repo.invalidatevolatilesets()
854 return tr
859 return tr
855
860
856 def caps20to10(repo):
861 def caps20to10(repo):
857 """return a set with appropriate options to use bundle20 during getbundle"""
862 """return a set with appropriate options to use bundle20 during getbundle"""
858 caps = set(['HG2X'])
863 caps = set(['HG2X'])
859 capsblob = bundle2.encodecaps(repo.bundle2caps)
864 capsblob = bundle2.encodecaps(repo.bundle2caps)
860 caps.add('bundle2=' + urllib.quote(capsblob))
865 caps.add('bundle2=' + urllib.quote(capsblob))
861 return caps
866 return caps
862
867
863 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
868 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
864 **kwargs):
869 **kwargs):
865 """return a full bundle (with potentially multiple kind of parts)
870 """return a full bundle (with potentially multiple kind of parts)
866
871
867 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
872 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
868 passed. For now, the bundle can contain only changegroup, but this will
873 passed. For now, the bundle can contain only changegroup, but this will
869 changes when more part type will be available for bundle2.
874 changes when more part type will be available for bundle2.
870
875
871 This is different from changegroup.getbundle that only returns an HG10
876 This is different from changegroup.getbundle that only returns an HG10
872 changegroup bundle. They may eventually get reunited in the future when we
877 changegroup bundle. They may eventually get reunited in the future when we
873 have a clearer idea of the API we what to query different data.
878 have a clearer idea of the API we what to query different data.
874
879
875 The implementation is at a very early stage and will get massive rework
880 The implementation is at a very early stage and will get massive rework
876 when the API of bundle is refined.
881 when the API of bundle is refined.
877 """
882 """
878 cg = None
883 cg = None
879 if kwargs.get('cg', True):
884 if kwargs.get('cg', True):
880 # build changegroup bundle here.
885 # build changegroup bundle here.
881 cg = changegroup.getbundle(repo, source, heads=heads,
886 cg = changegroup.getbundle(repo, source, heads=heads,
882 common=common, bundlecaps=bundlecaps)
887 common=common, bundlecaps=bundlecaps)
883 elif 'HG2X' not in bundlecaps:
888 elif 'HG2X' not in bundlecaps:
884 raise ValueError(_('request for bundle10 must include changegroup'))
889 raise ValueError(_('request for bundle10 must include changegroup'))
885 if bundlecaps is None or 'HG2X' not in bundlecaps:
890 if bundlecaps is None or 'HG2X' not in bundlecaps:
886 if kwargs:
891 if kwargs:
887 raise ValueError(_('unsupported getbundle arguments: %s')
892 raise ValueError(_('unsupported getbundle arguments: %s')
888 % ', '.join(sorted(kwargs.keys())))
893 % ', '.join(sorted(kwargs.keys())))
889 return cg
894 return cg
890 # very crude first implementation,
895 # very crude first implementation,
891 # the bundle API will change and the generation will be done lazily.
896 # the bundle API will change and the generation will be done lazily.
892 b2caps = {}
897 b2caps = {}
893 for bcaps in bundlecaps:
898 for bcaps in bundlecaps:
894 if bcaps.startswith('bundle2='):
899 if bcaps.startswith('bundle2='):
895 blob = urllib.unquote(bcaps[len('bundle2='):])
900 blob = urllib.unquote(bcaps[len('bundle2='):])
896 b2caps.update(bundle2.decodecaps(blob))
901 b2caps.update(bundle2.decodecaps(blob))
897 bundler = bundle2.bundle20(repo.ui, b2caps)
902 bundler = bundle2.bundle20(repo.ui, b2caps)
898 if cg:
903 if cg:
899 bundler.newpart('b2x:changegroup', data=cg.getchunks())
904 bundler.newpart('b2x:changegroup', data=cg.getchunks())
900 listkeys = kwargs.get('listkeys', ())
905 listkeys = kwargs.get('listkeys', ())
901 for namespace in listkeys:
906 for namespace in listkeys:
902 part = bundler.newpart('b2x:listkeys')
907 part = bundler.newpart('b2x:listkeys')
903 part.addparam('namespace', namespace)
908 part.addparam('namespace', namespace)
904 keys = repo.listkeys(namespace).items()
909 keys = repo.listkeys(namespace).items()
905 part.data = pushkey.encodekeys(keys)
910 part.data = pushkey.encodekeys(keys)
906 _getbundleextrapart(bundler, repo, source, heads=heads, common=common,
911 _getbundleextrapart(bundler, repo, source, heads=heads, common=common,
907 bundlecaps=bundlecaps, **kwargs)
912 bundlecaps=bundlecaps, **kwargs)
908 return util.chunkbuffer(bundler.getchunks())
913 return util.chunkbuffer(bundler.getchunks())
909
914
910 def _getbundleextrapart(bundler, repo, source, heads=None, common=None,
915 def _getbundleextrapart(bundler, repo, source, heads=None, common=None,
911 bundlecaps=None, **kwargs):
916 bundlecaps=None, **kwargs):
912 """hook function to let extensions add parts to the requested bundle"""
917 """hook function to let extensions add parts to the requested bundle"""
913 pass
918 pass
914
919
915 def check_heads(repo, their_heads, context):
920 def check_heads(repo, their_heads, context):
916 """check if the heads of a repo have been modified
921 """check if the heads of a repo have been modified
917
922
918 Used by peer for unbundling.
923 Used by peer for unbundling.
919 """
924 """
920 heads = repo.heads()
925 heads = repo.heads()
921 heads_hash = util.sha1(''.join(sorted(heads))).digest()
926 heads_hash = util.sha1(''.join(sorted(heads))).digest()
922 if not (their_heads == ['force'] or their_heads == heads or
927 if not (their_heads == ['force'] or their_heads == heads or
923 their_heads == ['hashed', heads_hash]):
928 their_heads == ['hashed', heads_hash]):
924 # someone else committed/pushed/unbundled while we
929 # someone else committed/pushed/unbundled while we
925 # were transferring data
930 # were transferring data
926 raise error.PushRaced('repository changed while %s - '
931 raise error.PushRaced('repository changed while %s - '
927 'please try again' % context)
932 'please try again' % context)
928
933
929 def unbundle(repo, cg, heads, source, url):
934 def unbundle(repo, cg, heads, source, url):
930 """Apply a bundle to a repo.
935 """Apply a bundle to a repo.
931
936
932 this function makes sure the repo is locked during the application and have
937 this function makes sure the repo is locked during the application and have
933 mechanism to check that no push race occurred between the creation of the
938 mechanism to check that no push race occurred between the creation of the
934 bundle and its application.
939 bundle and its application.
935
940
936 If the push was raced as PushRaced exception is raised."""
941 If the push was raced as PushRaced exception is raised."""
937 r = 0
942 r = 0
938 # need a transaction when processing a bundle2 stream
943 # need a transaction when processing a bundle2 stream
939 tr = None
944 tr = None
940 lock = repo.lock()
945 lock = repo.lock()
941 try:
946 try:
942 check_heads(repo, heads, 'uploading changes')
947 check_heads(repo, heads, 'uploading changes')
943 # push can proceed
948 # push can proceed
944 if util.safehasattr(cg, 'params'):
949 if util.safehasattr(cg, 'params'):
945 try:
950 try:
946 tr = repo.transaction('unbundle')
951 tr = repo.transaction('unbundle')
947 tr.hookargs['bundle2-exp'] = '1'
952 tr.hookargs['bundle2-exp'] = '1'
948 r = bundle2.processbundle(repo, cg, lambda: tr).reply
953 r = bundle2.processbundle(repo, cg, lambda: tr).reply
949 cl = repo.unfiltered().changelog
954 cl = repo.unfiltered().changelog
950 p = cl.writepending() and repo.root or ""
955 p = cl.writepending() and repo.root or ""
951 repo.hook('b2x-pretransactionclose', throw=True, source=source,
956 repo.hook('b2x-pretransactionclose', throw=True, source=source,
952 url=url, pending=p, **tr.hookargs)
957 url=url, pending=p, **tr.hookargs)
953 tr.close()
958 tr.close()
954 repo.hook('b2x-transactionclose', source=source, url=url,
959 repo.hook('b2x-transactionclose', source=source, url=url,
955 **tr.hookargs)
960 **tr.hookargs)
956 except Exception, exc:
961 except Exception, exc:
957 exc.duringunbundle2 = True
962 exc.duringunbundle2 = True
958 raise
963 raise
959 else:
964 else:
960 r = changegroup.addchangegroup(repo, cg, source, url)
965 r = changegroup.addchangegroup(repo, cg, source, url)
961 finally:
966 finally:
962 if tr is not None:
967 if tr is not None:
963 tr.release()
968 tr.release()
964 lock.release()
969 lock.release()
965 return r
970 return r
General Comments 0
You need to be logged in to leave comments. Login now