##// END OF EJS Templates
push: use stepsdone for obsmarkers push...
Pierre-Yves David -
r22036:f1528ef1 default
parent child Browse files
Show More
@@ -1,962 +1,965 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno, urllib
10 import errno, urllib
11 import util, scmutil, changegroup, base85, error
11 import util, scmutil, changegroup, base85, error
12 import discovery, phases, obsolete, bookmarks, bundle2, pushkey
12 import discovery, phases, obsolete, bookmarks, bundle2, pushkey
13
13
14 def readbundle(ui, fh, fname, vfs=None):
14 def readbundle(ui, fh, fname, vfs=None):
15 header = changegroup.readexactly(fh, 4)
15 header = changegroup.readexactly(fh, 4)
16
16
17 alg = None
17 alg = None
18 if not fname:
18 if not fname:
19 fname = "stream"
19 fname = "stream"
20 if not header.startswith('HG') and header.startswith('\0'):
20 if not header.startswith('HG') and header.startswith('\0'):
21 fh = changegroup.headerlessfixup(fh, header)
21 fh = changegroup.headerlessfixup(fh, header)
22 header = "HG10"
22 header = "HG10"
23 alg = 'UN'
23 alg = 'UN'
24 elif vfs:
24 elif vfs:
25 fname = vfs.join(fname)
25 fname = vfs.join(fname)
26
26
27 magic, version = header[0:2], header[2:4]
27 magic, version = header[0:2], header[2:4]
28
28
29 if magic != 'HG':
29 if magic != 'HG':
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 if version == '10':
31 if version == '10':
32 if alg is None:
32 if alg is None:
33 alg = changegroup.readexactly(fh, 2)
33 alg = changegroup.readexactly(fh, 2)
34 return changegroup.unbundle10(fh, alg)
34 return changegroup.unbundle10(fh, alg)
35 elif version == '2X':
35 elif version == '2X':
36 return bundle2.unbundle20(ui, fh, header=magic + version)
36 return bundle2.unbundle20(ui, fh, header=magic + version)
37 else:
37 else:
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39
39
40
40
41 class pushoperation(object):
41 class pushoperation(object):
42 """A object that represent a single push operation
42 """A object that represent a single push operation
43
43
44 It purpose is to carry push related state and very common operation.
44 It purpose is to carry push related state and very common operation.
45
45
46 A new should be created at the beginning of each push and discarded
46 A new should be created at the beginning of each push and discarded
47 afterward.
47 afterward.
48 """
48 """
49
49
50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
51 # repo we push from
51 # repo we push from
52 self.repo = repo
52 self.repo = repo
53 self.ui = repo.ui
53 self.ui = repo.ui
54 # repo we push to
54 # repo we push to
55 self.remote = remote
55 self.remote = remote
56 # force option provided
56 # force option provided
57 self.force = force
57 self.force = force
58 # revs to be pushed (None is "all")
58 # revs to be pushed (None is "all")
59 self.revs = revs
59 self.revs = revs
60 # allow push of new branch
60 # allow push of new branch
61 self.newbranch = newbranch
61 self.newbranch = newbranch
62 # did a local lock get acquired?
62 # did a local lock get acquired?
63 self.locallocked = None
63 self.locallocked = None
64 # step already performed
64 # step already performed
65 # (used to check what steps have been already performed through bundle2)
65 # (used to check what steps have been already performed through bundle2)
66 self.stepsdone = set()
66 self.stepsdone = set()
67 # Integer version of the push result
67 # Integer version of the push result
68 # - None means nothing to push
68 # - None means nothing to push
69 # - 0 means HTTP error
69 # - 0 means HTTP error
70 # - 1 means we pushed and remote head count is unchanged *or*
70 # - 1 means we pushed and remote head count is unchanged *or*
71 # we have outgoing changesets but refused to push
71 # we have outgoing changesets but refused to push
72 # - other values as described by addchangegroup()
72 # - other values as described by addchangegroup()
73 self.ret = None
73 self.ret = None
74 # discover.outgoing object (contains common and outgoing data)
74 # discover.outgoing object (contains common and outgoing data)
75 self.outgoing = None
75 self.outgoing = None
76 # all remote heads before the push
76 # all remote heads before the push
77 self.remoteheads = None
77 self.remoteheads = None
78 # testable as a boolean indicating if any nodes are missing locally.
78 # testable as a boolean indicating if any nodes are missing locally.
79 self.incoming = None
79 self.incoming = None
80 # phases changes that must be pushed along side the changesets
80 # phases changes that must be pushed along side the changesets
81 self.outdatedphases = None
81 self.outdatedphases = None
82 # phases changes that must be pushed if changeset push fails
82 # phases changes that must be pushed if changeset push fails
83 self.fallbackoutdatedphases = None
83 self.fallbackoutdatedphases = None
84 # outgoing obsmarkers
84 # outgoing obsmarkers
85 self.outobsmarkers = set()
85 self.outobsmarkers = set()
86
86
87 @util.propertycache
87 @util.propertycache
88 def futureheads(self):
88 def futureheads(self):
89 """future remote heads if the changeset push succeeds"""
89 """future remote heads if the changeset push succeeds"""
90 return self.outgoing.missingheads
90 return self.outgoing.missingheads
91
91
92 @util.propertycache
92 @util.propertycache
93 def fallbackheads(self):
93 def fallbackheads(self):
94 """future remote heads if the changeset push fails"""
94 """future remote heads if the changeset push fails"""
95 if self.revs is None:
95 if self.revs is None:
96 # not target to push, all common are relevant
96 # not target to push, all common are relevant
97 return self.outgoing.commonheads
97 return self.outgoing.commonheads
98 unfi = self.repo.unfiltered()
98 unfi = self.repo.unfiltered()
99 # I want cheads = heads(::missingheads and ::commonheads)
99 # I want cheads = heads(::missingheads and ::commonheads)
100 # (missingheads is revs with secret changeset filtered out)
100 # (missingheads is revs with secret changeset filtered out)
101 #
101 #
102 # This can be expressed as:
102 # This can be expressed as:
103 # cheads = ( (missingheads and ::commonheads)
103 # cheads = ( (missingheads and ::commonheads)
104 # + (commonheads and ::missingheads))"
104 # + (commonheads and ::missingheads))"
105 # )
105 # )
106 #
106 #
107 # while trying to push we already computed the following:
107 # while trying to push we already computed the following:
108 # common = (::commonheads)
108 # common = (::commonheads)
109 # missing = ((commonheads::missingheads) - commonheads)
109 # missing = ((commonheads::missingheads) - commonheads)
110 #
110 #
111 # We can pick:
111 # We can pick:
112 # * missingheads part of common (::commonheads)
112 # * missingheads part of common (::commonheads)
113 common = set(self.outgoing.common)
113 common = set(self.outgoing.common)
114 nm = self.repo.changelog.nodemap
114 nm = self.repo.changelog.nodemap
115 cheads = [node for node in self.revs if nm[node] in common]
115 cheads = [node for node in self.revs if nm[node] in common]
116 # and
116 # and
117 # * commonheads parents on missing
117 # * commonheads parents on missing
118 revset = unfi.set('%ln and parents(roots(%ln))',
118 revset = unfi.set('%ln and parents(roots(%ln))',
119 self.outgoing.commonheads,
119 self.outgoing.commonheads,
120 self.outgoing.missing)
120 self.outgoing.missing)
121 cheads.extend(c.node() for c in revset)
121 cheads.extend(c.node() for c in revset)
122 return cheads
122 return cheads
123
123
124 @property
124 @property
125 def commonheads(self):
125 def commonheads(self):
126 """set of all common heads after changeset bundle push"""
126 """set of all common heads after changeset bundle push"""
127 if self.ret:
127 if self.ret:
128 return self.futureheads
128 return self.futureheads
129 else:
129 else:
130 return self.fallbackheads
130 return self.fallbackheads
131
131
132 def push(repo, remote, force=False, revs=None, newbranch=False):
132 def push(repo, remote, force=False, revs=None, newbranch=False):
133 '''Push outgoing changesets (limited by revs) from a local
133 '''Push outgoing changesets (limited by revs) from a local
134 repository to remote. Return an integer:
134 repository to remote. Return an integer:
135 - None means nothing to push
135 - None means nothing to push
136 - 0 means HTTP error
136 - 0 means HTTP error
137 - 1 means we pushed and remote head count is unchanged *or*
137 - 1 means we pushed and remote head count is unchanged *or*
138 we have outgoing changesets but refused to push
138 we have outgoing changesets but refused to push
139 - other values as described by addchangegroup()
139 - other values as described by addchangegroup()
140 '''
140 '''
141 pushop = pushoperation(repo, remote, force, revs, newbranch)
141 pushop = pushoperation(repo, remote, force, revs, newbranch)
142 if pushop.remote.local():
142 if pushop.remote.local():
143 missing = (set(pushop.repo.requirements)
143 missing = (set(pushop.repo.requirements)
144 - pushop.remote.local().supported)
144 - pushop.remote.local().supported)
145 if missing:
145 if missing:
146 msg = _("required features are not"
146 msg = _("required features are not"
147 " supported in the destination:"
147 " supported in the destination:"
148 " %s") % (', '.join(sorted(missing)))
148 " %s") % (', '.join(sorted(missing)))
149 raise util.Abort(msg)
149 raise util.Abort(msg)
150
150
151 # there are two ways to push to remote repo:
151 # there are two ways to push to remote repo:
152 #
152 #
153 # addchangegroup assumes local user can lock remote
153 # addchangegroup assumes local user can lock remote
154 # repo (local filesystem, old ssh servers).
154 # repo (local filesystem, old ssh servers).
155 #
155 #
156 # unbundle assumes local user cannot lock remote repo (new ssh
156 # unbundle assumes local user cannot lock remote repo (new ssh
157 # servers, http servers).
157 # servers, http servers).
158
158
159 if not pushop.remote.canpush():
159 if not pushop.remote.canpush():
160 raise util.Abort(_("destination does not support push"))
160 raise util.Abort(_("destination does not support push"))
161 # get local lock as we might write phase data
161 # get local lock as we might write phase data
162 locallock = None
162 locallock = None
163 try:
163 try:
164 locallock = pushop.repo.lock()
164 locallock = pushop.repo.lock()
165 pushop.locallocked = True
165 pushop.locallocked = True
166 except IOError, err:
166 except IOError, err:
167 pushop.locallocked = False
167 pushop.locallocked = False
168 if err.errno != errno.EACCES:
168 if err.errno != errno.EACCES:
169 raise
169 raise
170 # source repo cannot be locked.
170 # source repo cannot be locked.
171 # We do not abort the push, but just disable the local phase
171 # We do not abort the push, but just disable the local phase
172 # synchronisation.
172 # synchronisation.
173 msg = 'cannot lock source repository: %s\n' % err
173 msg = 'cannot lock source repository: %s\n' % err
174 pushop.ui.debug(msg)
174 pushop.ui.debug(msg)
175 try:
175 try:
176 pushop.repo.checkpush(pushop)
176 pushop.repo.checkpush(pushop)
177 lock = None
177 lock = None
178 unbundle = pushop.remote.capable('unbundle')
178 unbundle = pushop.remote.capable('unbundle')
179 if not unbundle:
179 if not unbundle:
180 lock = pushop.remote.lock()
180 lock = pushop.remote.lock()
181 try:
181 try:
182 _pushdiscovery(pushop)
182 _pushdiscovery(pushop)
183 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
183 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
184 False)
184 False)
185 and pushop.remote.capable('bundle2-exp')):
185 and pushop.remote.capable('bundle2-exp')):
186 _pushbundle2(pushop)
186 _pushbundle2(pushop)
187 _pushchangeset(pushop)
187 _pushchangeset(pushop)
188 _pushsyncphase(pushop)
188 _pushsyncphase(pushop)
189 _pushobsolete(pushop)
189 _pushobsolete(pushop)
190 finally:
190 finally:
191 if lock is not None:
191 if lock is not None:
192 lock.release()
192 lock.release()
193 finally:
193 finally:
194 if locallock is not None:
194 if locallock is not None:
195 locallock.release()
195 locallock.release()
196
196
197 _pushbookmark(pushop)
197 _pushbookmark(pushop)
198 return pushop.ret
198 return pushop.ret
199
199
200 # list of steps to perform discovery before push
200 # list of steps to perform discovery before push
201 pushdiscoveryorder = []
201 pushdiscoveryorder = []
202
202
203 # Mapping between step name and function
203 # Mapping between step name and function
204 #
204 #
205 # This exists to help extensions wrap steps if necessary
205 # This exists to help extensions wrap steps if necessary
206 pushdiscoverymapping = {}
206 pushdiscoverymapping = {}
207
207
208 def pushdiscovery(stepname):
208 def pushdiscovery(stepname):
209 """decorator for function performing discovery before push
209 """decorator for function performing discovery before push
210
210
211 The function is added to the step -> function mapping and appended to the
211 The function is added to the step -> function mapping and appended to the
212 list of steps. Beware that decorated function will be added in order (this
212 list of steps. Beware that decorated function will be added in order (this
213 may matter).
213 may matter).
214
214
215 You can only use this decorator for a new step, if you want to wrap a step
215 You can only use this decorator for a new step, if you want to wrap a step
216 from an extension, change the pushdiscovery dictionary directly."""
216 from an extension, change the pushdiscovery dictionary directly."""
217 def dec(func):
217 def dec(func):
218 assert stepname not in pushdiscoverymapping
218 assert stepname not in pushdiscoverymapping
219 pushdiscoverymapping[stepname] = func
219 pushdiscoverymapping[stepname] = func
220 pushdiscoveryorder.append(stepname)
220 pushdiscoveryorder.append(stepname)
221 return func
221 return func
222 return dec
222 return dec
223
223
224
224
225
225
226 def _pushdiscovery(pushop):
226 def _pushdiscovery(pushop):
227 """Run all discovery steps"""
227 """Run all discovery steps"""
228 for stepname in pushdiscoveryorder:
228 for stepname in pushdiscoveryorder:
229 step = pushdiscoverymapping[stepname]
229 step = pushdiscoverymapping[stepname]
230 step(pushop)
230 step(pushop)
231
231
232 @pushdiscovery('changeset')
232 @pushdiscovery('changeset')
233 def _pushdiscoverychangeset(pushop):
233 def _pushdiscoverychangeset(pushop):
234 """discover the changeset that need to be pushed"""
234 """discover the changeset that need to be pushed"""
235 unfi = pushop.repo.unfiltered()
235 unfi = pushop.repo.unfiltered()
236 fci = discovery.findcommonincoming
236 fci = discovery.findcommonincoming
237 commoninc = fci(unfi, pushop.remote, force=pushop.force)
237 commoninc = fci(unfi, pushop.remote, force=pushop.force)
238 common, inc, remoteheads = commoninc
238 common, inc, remoteheads = commoninc
239 fco = discovery.findcommonoutgoing
239 fco = discovery.findcommonoutgoing
240 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
240 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
241 commoninc=commoninc, force=pushop.force)
241 commoninc=commoninc, force=pushop.force)
242 pushop.outgoing = outgoing
242 pushop.outgoing = outgoing
243 pushop.remoteheads = remoteheads
243 pushop.remoteheads = remoteheads
244 pushop.incoming = inc
244 pushop.incoming = inc
245
245
246 @pushdiscovery('phase')
246 @pushdiscovery('phase')
247 def _pushdiscoveryphase(pushop):
247 def _pushdiscoveryphase(pushop):
248 """discover the phase that needs to be pushed
248 """discover the phase that needs to be pushed
249
249
250 (computed for both success and failure case for changesets push)"""
250 (computed for both success and failure case for changesets push)"""
251 outgoing = pushop.outgoing
251 outgoing = pushop.outgoing
252 unfi = pushop.repo.unfiltered()
252 unfi = pushop.repo.unfiltered()
253 remotephases = pushop.remote.listkeys('phases')
253 remotephases = pushop.remote.listkeys('phases')
254 publishing = remotephases.get('publishing', False)
254 publishing = remotephases.get('publishing', False)
255 ana = phases.analyzeremotephases(pushop.repo,
255 ana = phases.analyzeremotephases(pushop.repo,
256 pushop.fallbackheads,
256 pushop.fallbackheads,
257 remotephases)
257 remotephases)
258 pheads, droots = ana
258 pheads, droots = ana
259 extracond = ''
259 extracond = ''
260 if not publishing:
260 if not publishing:
261 extracond = ' and public()'
261 extracond = ' and public()'
262 revset = 'heads((%%ln::%%ln) %s)' % extracond
262 revset = 'heads((%%ln::%%ln) %s)' % extracond
263 # Get the list of all revs draft on remote by public here.
263 # Get the list of all revs draft on remote by public here.
264 # XXX Beware that revset break if droots is not strictly
264 # XXX Beware that revset break if droots is not strictly
265 # XXX root we may want to ensure it is but it is costly
265 # XXX root we may want to ensure it is but it is costly
266 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
266 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
267 if not outgoing.missing:
267 if not outgoing.missing:
268 future = fallback
268 future = fallback
269 else:
269 else:
270 # adds changeset we are going to push as draft
270 # adds changeset we are going to push as draft
271 #
271 #
272 # should not be necessary for pushblishing server, but because of an
272 # should not be necessary for pushblishing server, but because of an
273 # issue fixed in xxxxx we have to do it anyway.
273 # issue fixed in xxxxx we have to do it anyway.
274 fdroots = list(unfi.set('roots(%ln + %ln::)',
274 fdroots = list(unfi.set('roots(%ln + %ln::)',
275 outgoing.missing, droots))
275 outgoing.missing, droots))
276 fdroots = [f.node() for f in fdroots]
276 fdroots = [f.node() for f in fdroots]
277 future = list(unfi.set(revset, fdroots, pushop.futureheads))
277 future = list(unfi.set(revset, fdroots, pushop.futureheads))
278 pushop.outdatedphases = future
278 pushop.outdatedphases = future
279 pushop.fallbackoutdatedphases = fallback
279 pushop.fallbackoutdatedphases = fallback
280
280
281 @pushdiscovery('obsmarker')
281 @pushdiscovery('obsmarker')
282 def _pushdiscoveryobsmarkers(pushop):
282 def _pushdiscoveryobsmarkers(pushop):
283 pushop.outobsmarkers = pushop.repo.obsstore
283 pushop.outobsmarkers = pushop.repo.obsstore
284
284
285 def _pushcheckoutgoing(pushop):
285 def _pushcheckoutgoing(pushop):
286 outgoing = pushop.outgoing
286 outgoing = pushop.outgoing
287 unfi = pushop.repo.unfiltered()
287 unfi = pushop.repo.unfiltered()
288 if not outgoing.missing:
288 if not outgoing.missing:
289 # nothing to push
289 # nothing to push
290 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
290 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
291 return False
291 return False
292 # something to push
292 # something to push
293 if not pushop.force:
293 if not pushop.force:
294 # if repo.obsstore == False --> no obsolete
294 # if repo.obsstore == False --> no obsolete
295 # then, save the iteration
295 # then, save the iteration
296 if unfi.obsstore:
296 if unfi.obsstore:
297 # this message are here for 80 char limit reason
297 # this message are here for 80 char limit reason
298 mso = _("push includes obsolete changeset: %s!")
298 mso = _("push includes obsolete changeset: %s!")
299 mst = "push includes %s changeset: %s!"
299 mst = "push includes %s changeset: %s!"
300 # plain versions for i18n tool to detect them
300 # plain versions for i18n tool to detect them
301 _("push includes unstable changeset: %s!")
301 _("push includes unstable changeset: %s!")
302 _("push includes bumped changeset: %s!")
302 _("push includes bumped changeset: %s!")
303 _("push includes divergent changeset: %s!")
303 _("push includes divergent changeset: %s!")
304 # If we are to push if there is at least one
304 # If we are to push if there is at least one
305 # obsolete or unstable changeset in missing, at
305 # obsolete or unstable changeset in missing, at
306 # least one of the missinghead will be obsolete or
306 # least one of the missinghead will be obsolete or
307 # unstable. So checking heads only is ok
307 # unstable. So checking heads only is ok
308 for node in outgoing.missingheads:
308 for node in outgoing.missingheads:
309 ctx = unfi[node]
309 ctx = unfi[node]
310 if ctx.obsolete():
310 if ctx.obsolete():
311 raise util.Abort(mso % ctx)
311 raise util.Abort(mso % ctx)
312 elif ctx.troubled():
312 elif ctx.troubled():
313 raise util.Abort(_(mst)
313 raise util.Abort(_(mst)
314 % (ctx.troubles()[0],
314 % (ctx.troubles()[0],
315 ctx))
315 ctx))
316 newbm = pushop.ui.configlist('bookmarks', 'pushing')
316 newbm = pushop.ui.configlist('bookmarks', 'pushing')
317 discovery.checkheads(unfi, pushop.remote, outgoing,
317 discovery.checkheads(unfi, pushop.remote, outgoing,
318 pushop.remoteheads,
318 pushop.remoteheads,
319 pushop.newbranch,
319 pushop.newbranch,
320 bool(pushop.incoming),
320 bool(pushop.incoming),
321 newbm)
321 newbm)
322 return True
322 return True
323
323
324 # List of names of steps to perform for an outgoing bundle2, order matters.
324 # List of names of steps to perform for an outgoing bundle2, order matters.
325 b2partsgenorder = []
325 b2partsgenorder = []
326
326
327 # Mapping between step name and function
327 # Mapping between step name and function
328 #
328 #
329 # This exists to help extensions wrap steps if necessary
329 # This exists to help extensions wrap steps if necessary
330 b2partsgenmapping = {}
330 b2partsgenmapping = {}
331
331
332 def b2partsgenerator(stepname):
332 def b2partsgenerator(stepname):
333 """decorator for function generating bundle2 part
333 """decorator for function generating bundle2 part
334
334
335 The function is added to the step -> function mapping and appended to the
335 The function is added to the step -> function mapping and appended to the
336 list of steps. Beware that decorated functions will be added in order
336 list of steps. Beware that decorated functions will be added in order
337 (this may matter).
337 (this may matter).
338
338
339 You can only use this decorator for new steps, if you want to wrap a step
339 You can only use this decorator for new steps, if you want to wrap a step
340 from an extension, attack the b2partsgenmapping dictionary directly."""
340 from an extension, attack the b2partsgenmapping dictionary directly."""
341 def dec(func):
341 def dec(func):
342 assert stepname not in b2partsgenmapping
342 assert stepname not in b2partsgenmapping
343 b2partsgenmapping[stepname] = func
343 b2partsgenmapping[stepname] = func
344 b2partsgenorder.append(stepname)
344 b2partsgenorder.append(stepname)
345 return func
345 return func
346 return dec
346 return dec
347
347
348 @b2partsgenerator('changeset')
348 @b2partsgenerator('changeset')
349 def _pushb2ctx(pushop, bundler):
349 def _pushb2ctx(pushop, bundler):
350 """handle changegroup push through bundle2
350 """handle changegroup push through bundle2
351
351
352 addchangegroup result is stored in the ``pushop.ret`` attribute.
352 addchangegroup result is stored in the ``pushop.ret`` attribute.
353 """
353 """
354 if 'changesets' in pushop.stepsdone:
354 if 'changesets' in pushop.stepsdone:
355 return
355 return
356 pushop.stepsdone.add('changesets')
356 pushop.stepsdone.add('changesets')
357 # Send known heads to the server for race detection.
357 # Send known heads to the server for race detection.
358 pushop.stepsdone.add('changesets')
358 pushop.stepsdone.add('changesets')
359 if not _pushcheckoutgoing(pushop):
359 if not _pushcheckoutgoing(pushop):
360 return
360 return
361 pushop.repo.prepushoutgoinghooks(pushop.repo,
361 pushop.repo.prepushoutgoinghooks(pushop.repo,
362 pushop.remote,
362 pushop.remote,
363 pushop.outgoing)
363 pushop.outgoing)
364 if not pushop.force:
364 if not pushop.force:
365 bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads))
365 bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads))
366 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
366 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
367 cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks())
367 cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks())
368 def handlereply(op):
368 def handlereply(op):
369 """extract addchangroup returns from server reply"""
369 """extract addchangroup returns from server reply"""
370 cgreplies = op.records.getreplies(cgpart.id)
370 cgreplies = op.records.getreplies(cgpart.id)
371 assert len(cgreplies['changegroup']) == 1
371 assert len(cgreplies['changegroup']) == 1
372 pushop.ret = cgreplies['changegroup'][0]['return']
372 pushop.ret = cgreplies['changegroup'][0]['return']
373 return handlereply
373 return handlereply
374
374
375 @b2partsgenerator('phase')
375 @b2partsgenerator('phase')
376 def _pushb2phases(pushop, bundler):
376 def _pushb2phases(pushop, bundler):
377 """handle phase push through bundle2"""
377 """handle phase push through bundle2"""
378 if 'phases' in pushop.stepsdone:
378 if 'phases' in pushop.stepsdone:
379 return
379 return
380 b2caps = bundle2.bundle2caps(pushop.remote)
380 b2caps = bundle2.bundle2caps(pushop.remote)
381 if not 'b2x:pushkey' in b2caps:
381 if not 'b2x:pushkey' in b2caps:
382 return
382 return
383 pushop.stepsdone.add('phases')
383 pushop.stepsdone.add('phases')
384 part2node = []
384 part2node = []
385 enc = pushkey.encode
385 enc = pushkey.encode
386 for newremotehead in pushop.outdatedphases:
386 for newremotehead in pushop.outdatedphases:
387 part = bundler.newpart('b2x:pushkey')
387 part = bundler.newpart('b2x:pushkey')
388 part.addparam('namespace', enc('phases'))
388 part.addparam('namespace', enc('phases'))
389 part.addparam('key', enc(newremotehead.hex()))
389 part.addparam('key', enc(newremotehead.hex()))
390 part.addparam('old', enc(str(phases.draft)))
390 part.addparam('old', enc(str(phases.draft)))
391 part.addparam('new', enc(str(phases.public)))
391 part.addparam('new', enc(str(phases.public)))
392 part2node.append((part.id, newremotehead))
392 part2node.append((part.id, newremotehead))
393 def handlereply(op):
393 def handlereply(op):
394 for partid, node in part2node:
394 for partid, node in part2node:
395 partrep = op.records.getreplies(partid)
395 partrep = op.records.getreplies(partid)
396 results = partrep['pushkey']
396 results = partrep['pushkey']
397 assert len(results) <= 1
397 assert len(results) <= 1
398 msg = None
398 msg = None
399 if not results:
399 if not results:
400 msg = _('server ignored update of %s to public!\n') % node
400 msg = _('server ignored update of %s to public!\n') % node
401 elif not int(results[0]['return']):
401 elif not int(results[0]['return']):
402 msg = _('updating %s to public failed!\n') % node
402 msg = _('updating %s to public failed!\n') % node
403 if msg is not None:
403 if msg is not None:
404 pushop.ui.warn(msg)
404 pushop.ui.warn(msg)
405 return handlereply
405 return handlereply
406
406
407 def _pushbundle2(pushop):
407 def _pushbundle2(pushop):
408 """push data to the remote using bundle2
408 """push data to the remote using bundle2
409
409
410 The only currently supported type of data is changegroup but this will
410 The only currently supported type of data is changegroup but this will
411 evolve in the future."""
411 evolve in the future."""
412 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
412 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
413 # create reply capability
413 # create reply capability
414 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
414 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
415 bundler.newpart('b2x:replycaps', data=capsblob)
415 bundler.newpart('b2x:replycaps', data=capsblob)
416 replyhandlers = []
416 replyhandlers = []
417 for partgenname in b2partsgenorder:
417 for partgenname in b2partsgenorder:
418 partgen = b2partsgenmapping[partgenname]
418 partgen = b2partsgenmapping[partgenname]
419 ret = partgen(pushop, bundler)
419 ret = partgen(pushop, bundler)
420 if callable(ret):
420 if callable(ret):
421 replyhandlers.append(ret)
421 replyhandlers.append(ret)
422 # do not push if nothing to push
422 # do not push if nothing to push
423 if bundler.nbparts <= 1:
423 if bundler.nbparts <= 1:
424 return
424 return
425 stream = util.chunkbuffer(bundler.getchunks())
425 stream = util.chunkbuffer(bundler.getchunks())
426 try:
426 try:
427 reply = pushop.remote.unbundle(stream, ['force'], 'push')
427 reply = pushop.remote.unbundle(stream, ['force'], 'push')
428 except error.BundleValueError, exc:
428 except error.BundleValueError, exc:
429 raise util.Abort('missing support for %s' % exc)
429 raise util.Abort('missing support for %s' % exc)
430 try:
430 try:
431 op = bundle2.processbundle(pushop.repo, reply)
431 op = bundle2.processbundle(pushop.repo, reply)
432 except error.BundleValueError, exc:
432 except error.BundleValueError, exc:
433 raise util.Abort('missing support for %s' % exc)
433 raise util.Abort('missing support for %s' % exc)
434 for rephand in replyhandlers:
434 for rephand in replyhandlers:
435 rephand(op)
435 rephand(op)
436
436
437 def _pushchangeset(pushop):
437 def _pushchangeset(pushop):
438 """Make the actual push of changeset bundle to remote repo"""
438 """Make the actual push of changeset bundle to remote repo"""
439 if 'changesets' in pushop.stepsdone:
439 if 'changesets' in pushop.stepsdone:
440 return
440 return
441 pushop.stepsdone.add('changesets')
441 pushop.stepsdone.add('changesets')
442 if not _pushcheckoutgoing(pushop):
442 if not _pushcheckoutgoing(pushop):
443 return
443 return
444 pushop.repo.prepushoutgoinghooks(pushop.repo,
444 pushop.repo.prepushoutgoinghooks(pushop.repo,
445 pushop.remote,
445 pushop.remote,
446 pushop.outgoing)
446 pushop.outgoing)
447 outgoing = pushop.outgoing
447 outgoing = pushop.outgoing
448 unbundle = pushop.remote.capable('unbundle')
448 unbundle = pushop.remote.capable('unbundle')
449 # TODO: get bundlecaps from remote
449 # TODO: get bundlecaps from remote
450 bundlecaps = None
450 bundlecaps = None
451 # create a changegroup from local
451 # create a changegroup from local
452 if pushop.revs is None and not (outgoing.excluded
452 if pushop.revs is None and not (outgoing.excluded
453 or pushop.repo.changelog.filteredrevs):
453 or pushop.repo.changelog.filteredrevs):
454 # push everything,
454 # push everything,
455 # use the fast path, no race possible on push
455 # use the fast path, no race possible on push
456 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
456 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
457 cg = changegroup.getsubset(pushop.repo,
457 cg = changegroup.getsubset(pushop.repo,
458 outgoing,
458 outgoing,
459 bundler,
459 bundler,
460 'push',
460 'push',
461 fastpath=True)
461 fastpath=True)
462 else:
462 else:
463 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
463 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
464 bundlecaps)
464 bundlecaps)
465
465
466 # apply changegroup to remote
466 # apply changegroup to remote
467 if unbundle:
467 if unbundle:
468 # local repo finds heads on server, finds out what
468 # local repo finds heads on server, finds out what
469 # revs it must push. once revs transferred, if server
469 # revs it must push. once revs transferred, if server
470 # finds it has different heads (someone else won
470 # finds it has different heads (someone else won
471 # commit/push race), server aborts.
471 # commit/push race), server aborts.
472 if pushop.force:
472 if pushop.force:
473 remoteheads = ['force']
473 remoteheads = ['force']
474 else:
474 else:
475 remoteheads = pushop.remoteheads
475 remoteheads = pushop.remoteheads
476 # ssh: return remote's addchangegroup()
476 # ssh: return remote's addchangegroup()
477 # http: return remote's addchangegroup() or 0 for error
477 # http: return remote's addchangegroup() or 0 for error
478 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
478 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
479 pushop.repo.url())
479 pushop.repo.url())
480 else:
480 else:
481 # we return an integer indicating remote head count
481 # we return an integer indicating remote head count
482 # change
482 # change
483 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
483 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
484
484
485 def _pushsyncphase(pushop):
485 def _pushsyncphase(pushop):
486 """synchronise phase information locally and remotely"""
486 """synchronise phase information locally and remotely"""
487 cheads = pushop.commonheads
487 cheads = pushop.commonheads
488 # even when we don't push, exchanging phase data is useful
488 # even when we don't push, exchanging phase data is useful
489 remotephases = pushop.remote.listkeys('phases')
489 remotephases = pushop.remote.listkeys('phases')
490 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
490 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
491 and remotephases # server supports phases
491 and remotephases # server supports phases
492 and pushop.ret is None # nothing was pushed
492 and pushop.ret is None # nothing was pushed
493 and remotephases.get('publishing', False)):
493 and remotephases.get('publishing', False)):
494 # When:
494 # When:
495 # - this is a subrepo push
495 # - this is a subrepo push
496 # - and remote support phase
496 # - and remote support phase
497 # - and no changeset was pushed
497 # - and no changeset was pushed
498 # - and remote is publishing
498 # - and remote is publishing
499 # We may be in issue 3871 case!
499 # We may be in issue 3871 case!
500 # We drop the possible phase synchronisation done by
500 # We drop the possible phase synchronisation done by
501 # courtesy to publish changesets possibly locally draft
501 # courtesy to publish changesets possibly locally draft
502 # on the remote.
502 # on the remote.
503 remotephases = {'publishing': 'True'}
503 remotephases = {'publishing': 'True'}
504 if not remotephases: # old server or public only reply from non-publishing
504 if not remotephases: # old server or public only reply from non-publishing
505 _localphasemove(pushop, cheads)
505 _localphasemove(pushop, cheads)
506 # don't push any phase data as there is nothing to push
506 # don't push any phase data as there is nothing to push
507 else:
507 else:
508 ana = phases.analyzeremotephases(pushop.repo, cheads,
508 ana = phases.analyzeremotephases(pushop.repo, cheads,
509 remotephases)
509 remotephases)
510 pheads, droots = ana
510 pheads, droots = ana
511 ### Apply remote phase on local
511 ### Apply remote phase on local
512 if remotephases.get('publishing', False):
512 if remotephases.get('publishing', False):
513 _localphasemove(pushop, cheads)
513 _localphasemove(pushop, cheads)
514 else: # publish = False
514 else: # publish = False
515 _localphasemove(pushop, pheads)
515 _localphasemove(pushop, pheads)
516 _localphasemove(pushop, cheads, phases.draft)
516 _localphasemove(pushop, cheads, phases.draft)
517 ### Apply local phase on remote
517 ### Apply local phase on remote
518
518
519 if pushop.ret:
519 if pushop.ret:
520 if 'phases' in pushop.stepsdone:
520 if 'phases' in pushop.stepsdone:
521 # phases already pushed though bundle2
521 # phases already pushed though bundle2
522 return
522 return
523 outdated = pushop.outdatedphases
523 outdated = pushop.outdatedphases
524 else:
524 else:
525 outdated = pushop.fallbackoutdatedphases
525 outdated = pushop.fallbackoutdatedphases
526
526
527 pushop.stepsdone.add('phases')
527 pushop.stepsdone.add('phases')
528
528
529 # filter heads already turned public by the push
529 # filter heads already turned public by the push
530 outdated = [c for c in outdated if c.node() not in pheads]
530 outdated = [c for c in outdated if c.node() not in pheads]
531 b2caps = bundle2.bundle2caps(pushop.remote)
531 b2caps = bundle2.bundle2caps(pushop.remote)
532 if 'b2x:pushkey' in b2caps:
532 if 'b2x:pushkey' in b2caps:
533 # server supports bundle2, let's do a batched push through it
533 # server supports bundle2, let's do a batched push through it
534 #
534 #
535 # This will eventually be unified with the changesets bundle2 push
535 # This will eventually be unified with the changesets bundle2 push
536 bundler = bundle2.bundle20(pushop.ui, b2caps)
536 bundler = bundle2.bundle20(pushop.ui, b2caps)
537 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
537 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
538 bundler.newpart('b2x:replycaps', data=capsblob)
538 bundler.newpart('b2x:replycaps', data=capsblob)
539 part2node = []
539 part2node = []
540 enc = pushkey.encode
540 enc = pushkey.encode
541 for newremotehead in outdated:
541 for newremotehead in outdated:
542 part = bundler.newpart('b2x:pushkey')
542 part = bundler.newpart('b2x:pushkey')
543 part.addparam('namespace', enc('phases'))
543 part.addparam('namespace', enc('phases'))
544 part.addparam('key', enc(newremotehead.hex()))
544 part.addparam('key', enc(newremotehead.hex()))
545 part.addparam('old', enc(str(phases.draft)))
545 part.addparam('old', enc(str(phases.draft)))
546 part.addparam('new', enc(str(phases.public)))
546 part.addparam('new', enc(str(phases.public)))
547 part2node.append((part.id, newremotehead))
547 part2node.append((part.id, newremotehead))
548 stream = util.chunkbuffer(bundler.getchunks())
548 stream = util.chunkbuffer(bundler.getchunks())
549 try:
549 try:
550 reply = pushop.remote.unbundle(stream, ['force'], 'push')
550 reply = pushop.remote.unbundle(stream, ['force'], 'push')
551 op = bundle2.processbundle(pushop.repo, reply)
551 op = bundle2.processbundle(pushop.repo, reply)
552 except error.BundleValueError, exc:
552 except error.BundleValueError, exc:
553 raise util.Abort('missing support for %s' % exc)
553 raise util.Abort('missing support for %s' % exc)
554 for partid, node in part2node:
554 for partid, node in part2node:
555 partrep = op.records.getreplies(partid)
555 partrep = op.records.getreplies(partid)
556 results = partrep['pushkey']
556 results = partrep['pushkey']
557 assert len(results) <= 1
557 assert len(results) <= 1
558 msg = None
558 msg = None
559 if not results:
559 if not results:
560 msg = _('server ignored update of %s to public!\n') % node
560 msg = _('server ignored update of %s to public!\n') % node
561 elif not int(results[0]['return']):
561 elif not int(results[0]['return']):
562 msg = _('updating %s to public failed!\n') % node
562 msg = _('updating %s to public failed!\n') % node
563 if msg is not None:
563 if msg is not None:
564 pushop.ui.warn(msg)
564 pushop.ui.warn(msg)
565
565
566 else:
566 else:
567 # fallback to independant pushkey command
567 # fallback to independant pushkey command
568 for newremotehead in outdated:
568 for newremotehead in outdated:
569 r = pushop.remote.pushkey('phases',
569 r = pushop.remote.pushkey('phases',
570 newremotehead.hex(),
570 newremotehead.hex(),
571 str(phases.draft),
571 str(phases.draft),
572 str(phases.public))
572 str(phases.public))
573 if not r:
573 if not r:
574 pushop.ui.warn(_('updating %s to public failed!\n')
574 pushop.ui.warn(_('updating %s to public failed!\n')
575 % newremotehead)
575 % newremotehead)
576
576
577 def _localphasemove(pushop, nodes, phase=phases.public):
577 def _localphasemove(pushop, nodes, phase=phases.public):
578 """move <nodes> to <phase> in the local source repo"""
578 """move <nodes> to <phase> in the local source repo"""
579 if pushop.locallocked:
579 if pushop.locallocked:
580 phases.advanceboundary(pushop.repo, phase, nodes)
580 phases.advanceboundary(pushop.repo, phase, nodes)
581 else:
581 else:
582 # repo is not locked, do not change any phases!
582 # repo is not locked, do not change any phases!
583 # Informs the user that phases should have been moved when
583 # Informs the user that phases should have been moved when
584 # applicable.
584 # applicable.
585 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
585 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
586 phasestr = phases.phasenames[phase]
586 phasestr = phases.phasenames[phase]
587 if actualmoves:
587 if actualmoves:
588 pushop.ui.status(_('cannot lock source repo, skipping '
588 pushop.ui.status(_('cannot lock source repo, skipping '
589 'local %s phase update\n') % phasestr)
589 'local %s phase update\n') % phasestr)
590
590
591 def _pushobsolete(pushop):
591 def _pushobsolete(pushop):
592 """utility function to push obsolete markers to a remote"""
592 """utility function to push obsolete markers to a remote"""
593 if 'obsmarkers' in pushop.stepsdone:
594 return
593 pushop.ui.debug('try to push obsolete markers to remote\n')
595 pushop.ui.debug('try to push obsolete markers to remote\n')
594 repo = pushop.repo
596 repo = pushop.repo
595 remote = pushop.remote
597 remote = pushop.remote
598 pushop.stepsdone.add('obsmarkers')
596 if (obsolete._enabled and repo.obsstore and
599 if (obsolete._enabled and repo.obsstore and
597 'obsolete' in remote.listkeys('namespaces')):
600 'obsolete' in remote.listkeys('namespaces')):
598 rslts = []
601 rslts = []
599 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
602 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
600 for key in sorted(remotedata, reverse=True):
603 for key in sorted(remotedata, reverse=True):
601 # reverse sort to ensure we end with dump0
604 # reverse sort to ensure we end with dump0
602 data = remotedata[key]
605 data = remotedata[key]
603 rslts.append(remote.pushkey('obsolete', key, '', data))
606 rslts.append(remote.pushkey('obsolete', key, '', data))
604 if [r for r in rslts if not r]:
607 if [r for r in rslts if not r]:
605 msg = _('failed to push some obsolete markers!\n')
608 msg = _('failed to push some obsolete markers!\n')
606 repo.ui.warn(msg)
609 repo.ui.warn(msg)
607
610
608 def _pushbookmark(pushop):
611 def _pushbookmark(pushop):
609 """Update bookmark position on remote"""
612 """Update bookmark position on remote"""
610 ui = pushop.ui
613 ui = pushop.ui
611 repo = pushop.repo.unfiltered()
614 repo = pushop.repo.unfiltered()
612 remote = pushop.remote
615 remote = pushop.remote
613 ui.debug("checking for updated bookmarks\n")
616 ui.debug("checking for updated bookmarks\n")
614 revnums = map(repo.changelog.rev, pushop.revs or [])
617 revnums = map(repo.changelog.rev, pushop.revs or [])
615 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
618 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
616 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
619 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
617 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
620 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
618 srchex=hex)
621 srchex=hex)
619
622
620 for b, scid, dcid in advsrc:
623 for b, scid, dcid in advsrc:
621 if ancestors and repo[scid].rev() not in ancestors:
624 if ancestors and repo[scid].rev() not in ancestors:
622 continue
625 continue
623 if remote.pushkey('bookmarks', b, dcid, scid):
626 if remote.pushkey('bookmarks', b, dcid, scid):
624 ui.status(_("updating bookmark %s\n") % b)
627 ui.status(_("updating bookmark %s\n") % b)
625 else:
628 else:
626 ui.warn(_('updating bookmark %s failed!\n') % b)
629 ui.warn(_('updating bookmark %s failed!\n') % b)
627
630
628 class pulloperation(object):
631 class pulloperation(object):
629 """A object that represent a single pull operation
632 """A object that represent a single pull operation
630
633
631 It purpose is to carry push related state and very common operation.
634 It purpose is to carry push related state and very common operation.
632
635
633 A new should be created at the beginning of each pull and discarded
636 A new should be created at the beginning of each pull and discarded
634 afterward.
637 afterward.
635 """
638 """
636
639
637 def __init__(self, repo, remote, heads=None, force=False):
640 def __init__(self, repo, remote, heads=None, force=False):
638 # repo we pull into
641 # repo we pull into
639 self.repo = repo
642 self.repo = repo
640 # repo we pull from
643 # repo we pull from
641 self.remote = remote
644 self.remote = remote
642 # revision we try to pull (None is "all")
645 # revision we try to pull (None is "all")
643 self.heads = heads
646 self.heads = heads
644 # do we force pull?
647 # do we force pull?
645 self.force = force
648 self.force = force
646 # the name the pull transaction
649 # the name the pull transaction
647 self._trname = 'pull\n' + util.hidepassword(remote.url())
650 self._trname = 'pull\n' + util.hidepassword(remote.url())
648 # hold the transaction once created
651 # hold the transaction once created
649 self._tr = None
652 self._tr = None
650 # set of common changeset between local and remote before pull
653 # set of common changeset between local and remote before pull
651 self.common = None
654 self.common = None
652 # set of pulled head
655 # set of pulled head
653 self.rheads = None
656 self.rheads = None
654 # list of missing changeset to fetch remotely
657 # list of missing changeset to fetch remotely
655 self.fetch = None
658 self.fetch = None
656 # result of changegroup pulling (used as return code by pull)
659 # result of changegroup pulling (used as return code by pull)
657 self.cgresult = None
660 self.cgresult = None
658 # list of step remaining todo (related to future bundle2 usage)
661 # list of step remaining todo (related to future bundle2 usage)
659 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
662 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
660
663
661 @util.propertycache
664 @util.propertycache
662 def pulledsubset(self):
665 def pulledsubset(self):
663 """heads of the set of changeset target by the pull"""
666 """heads of the set of changeset target by the pull"""
664 # compute target subset
667 # compute target subset
665 if self.heads is None:
668 if self.heads is None:
666 # We pulled every thing possible
669 # We pulled every thing possible
667 # sync on everything common
670 # sync on everything common
668 c = set(self.common)
671 c = set(self.common)
669 ret = list(self.common)
672 ret = list(self.common)
670 for n in self.rheads:
673 for n in self.rheads:
671 if n not in c:
674 if n not in c:
672 ret.append(n)
675 ret.append(n)
673 return ret
676 return ret
674 else:
677 else:
675 # We pulled a specific subset
678 # We pulled a specific subset
676 # sync on this subset
679 # sync on this subset
677 return self.heads
680 return self.heads
678
681
679 def gettransaction(self):
682 def gettransaction(self):
680 """get appropriate pull transaction, creating it if needed"""
683 """get appropriate pull transaction, creating it if needed"""
681 if self._tr is None:
684 if self._tr is None:
682 self._tr = self.repo.transaction(self._trname)
685 self._tr = self.repo.transaction(self._trname)
683 return self._tr
686 return self._tr
684
687
685 def closetransaction(self):
688 def closetransaction(self):
686 """close transaction if created"""
689 """close transaction if created"""
687 if self._tr is not None:
690 if self._tr is not None:
688 self._tr.close()
691 self._tr.close()
689
692
690 def releasetransaction(self):
693 def releasetransaction(self):
691 """release transaction if created"""
694 """release transaction if created"""
692 if self._tr is not None:
695 if self._tr is not None:
693 self._tr.release()
696 self._tr.release()
694
697
695 def pull(repo, remote, heads=None, force=False):
698 def pull(repo, remote, heads=None, force=False):
696 pullop = pulloperation(repo, remote, heads, force)
699 pullop = pulloperation(repo, remote, heads, force)
697 if pullop.remote.local():
700 if pullop.remote.local():
698 missing = set(pullop.remote.requirements) - pullop.repo.supported
701 missing = set(pullop.remote.requirements) - pullop.repo.supported
699 if missing:
702 if missing:
700 msg = _("required features are not"
703 msg = _("required features are not"
701 " supported in the destination:"
704 " supported in the destination:"
702 " %s") % (', '.join(sorted(missing)))
705 " %s") % (', '.join(sorted(missing)))
703 raise util.Abort(msg)
706 raise util.Abort(msg)
704
707
705 lock = pullop.repo.lock()
708 lock = pullop.repo.lock()
706 try:
709 try:
707 _pulldiscovery(pullop)
710 _pulldiscovery(pullop)
708 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
711 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
709 and pullop.remote.capable('bundle2-exp')):
712 and pullop.remote.capable('bundle2-exp')):
710 _pullbundle2(pullop)
713 _pullbundle2(pullop)
711 if 'changegroup' in pullop.todosteps:
714 if 'changegroup' in pullop.todosteps:
712 _pullchangeset(pullop)
715 _pullchangeset(pullop)
713 if 'phases' in pullop.todosteps:
716 if 'phases' in pullop.todosteps:
714 _pullphase(pullop)
717 _pullphase(pullop)
715 if 'obsmarkers' in pullop.todosteps:
718 if 'obsmarkers' in pullop.todosteps:
716 _pullobsolete(pullop)
719 _pullobsolete(pullop)
717 pullop.closetransaction()
720 pullop.closetransaction()
718 finally:
721 finally:
719 pullop.releasetransaction()
722 pullop.releasetransaction()
720 lock.release()
723 lock.release()
721
724
722 return pullop.cgresult
725 return pullop.cgresult
723
726
724 def _pulldiscovery(pullop):
727 def _pulldiscovery(pullop):
725 """discovery phase for the pull
728 """discovery phase for the pull
726
729
727 Current handle changeset discovery only, will change handle all discovery
730 Current handle changeset discovery only, will change handle all discovery
728 at some point."""
731 at some point."""
729 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
732 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
730 pullop.remote,
733 pullop.remote,
731 heads=pullop.heads,
734 heads=pullop.heads,
732 force=pullop.force)
735 force=pullop.force)
733 pullop.common, pullop.fetch, pullop.rheads = tmp
736 pullop.common, pullop.fetch, pullop.rheads = tmp
734
737
735 def _pullbundle2(pullop):
738 def _pullbundle2(pullop):
736 """pull data using bundle2
739 """pull data using bundle2
737
740
738 For now, the only supported data are changegroup."""
741 For now, the only supported data are changegroup."""
739 remotecaps = bundle2.bundle2caps(pullop.remote)
742 remotecaps = bundle2.bundle2caps(pullop.remote)
740 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
743 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
741 # pulling changegroup
744 # pulling changegroup
742 pullop.todosteps.remove('changegroup')
745 pullop.todosteps.remove('changegroup')
743
746
744 kwargs['common'] = pullop.common
747 kwargs['common'] = pullop.common
745 kwargs['heads'] = pullop.heads or pullop.rheads
748 kwargs['heads'] = pullop.heads or pullop.rheads
746 if 'b2x:listkeys' in remotecaps:
749 if 'b2x:listkeys' in remotecaps:
747 kwargs['listkeys'] = ['phase']
750 kwargs['listkeys'] = ['phase']
748 if not pullop.fetch:
751 if not pullop.fetch:
749 pullop.repo.ui.status(_("no changes found\n"))
752 pullop.repo.ui.status(_("no changes found\n"))
750 pullop.cgresult = 0
753 pullop.cgresult = 0
751 else:
754 else:
752 if pullop.heads is None and list(pullop.common) == [nullid]:
755 if pullop.heads is None and list(pullop.common) == [nullid]:
753 pullop.repo.ui.status(_("requesting all changes\n"))
756 pullop.repo.ui.status(_("requesting all changes\n"))
754 _pullbundle2extraprepare(pullop, kwargs)
757 _pullbundle2extraprepare(pullop, kwargs)
755 if kwargs.keys() == ['format']:
758 if kwargs.keys() == ['format']:
756 return # nothing to pull
759 return # nothing to pull
757 bundle = pullop.remote.getbundle('pull', **kwargs)
760 bundle = pullop.remote.getbundle('pull', **kwargs)
758 try:
761 try:
759 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
762 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
760 except error.BundleValueError, exc:
763 except error.BundleValueError, exc:
761 raise util.Abort('missing support for %s' % exc)
764 raise util.Abort('missing support for %s' % exc)
762
765
763 if pullop.fetch:
766 if pullop.fetch:
764 assert len(op.records['changegroup']) == 1
767 assert len(op.records['changegroup']) == 1
765 pullop.cgresult = op.records['changegroup'][0]['return']
768 pullop.cgresult = op.records['changegroup'][0]['return']
766
769
767 # processing phases change
770 # processing phases change
768 for namespace, value in op.records['listkeys']:
771 for namespace, value in op.records['listkeys']:
769 if namespace == 'phases':
772 if namespace == 'phases':
770 _pullapplyphases(pullop, value)
773 _pullapplyphases(pullop, value)
771
774
772 def _pullbundle2extraprepare(pullop, kwargs):
775 def _pullbundle2extraprepare(pullop, kwargs):
773 """hook function so that extensions can extend the getbundle call"""
776 """hook function so that extensions can extend the getbundle call"""
774 pass
777 pass
775
778
776 def _pullchangeset(pullop):
779 def _pullchangeset(pullop):
777 """pull changeset from unbundle into the local repo"""
780 """pull changeset from unbundle into the local repo"""
778 # We delay the open of the transaction as late as possible so we
781 # We delay the open of the transaction as late as possible so we
779 # don't open transaction for nothing or you break future useful
782 # don't open transaction for nothing or you break future useful
780 # rollback call
783 # rollback call
781 pullop.todosteps.remove('changegroup')
784 pullop.todosteps.remove('changegroup')
782 if not pullop.fetch:
785 if not pullop.fetch:
783 pullop.repo.ui.status(_("no changes found\n"))
786 pullop.repo.ui.status(_("no changes found\n"))
784 pullop.cgresult = 0
787 pullop.cgresult = 0
785 return
788 return
786 pullop.gettransaction()
789 pullop.gettransaction()
787 if pullop.heads is None and list(pullop.common) == [nullid]:
790 if pullop.heads is None and list(pullop.common) == [nullid]:
788 pullop.repo.ui.status(_("requesting all changes\n"))
791 pullop.repo.ui.status(_("requesting all changes\n"))
789 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
792 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
790 # issue1320, avoid a race if remote changed after discovery
793 # issue1320, avoid a race if remote changed after discovery
791 pullop.heads = pullop.rheads
794 pullop.heads = pullop.rheads
792
795
793 if pullop.remote.capable('getbundle'):
796 if pullop.remote.capable('getbundle'):
794 # TODO: get bundlecaps from remote
797 # TODO: get bundlecaps from remote
795 cg = pullop.remote.getbundle('pull', common=pullop.common,
798 cg = pullop.remote.getbundle('pull', common=pullop.common,
796 heads=pullop.heads or pullop.rheads)
799 heads=pullop.heads or pullop.rheads)
797 elif pullop.heads is None:
800 elif pullop.heads is None:
798 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
801 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
799 elif not pullop.remote.capable('changegroupsubset'):
802 elif not pullop.remote.capable('changegroupsubset'):
800 raise util.Abort(_("partial pull cannot be done because "
803 raise util.Abort(_("partial pull cannot be done because "
801 "other repository doesn't support "
804 "other repository doesn't support "
802 "changegroupsubset."))
805 "changegroupsubset."))
803 else:
806 else:
804 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
807 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
805 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
808 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
806 pullop.remote.url())
809 pullop.remote.url())
807
810
808 def _pullphase(pullop):
811 def _pullphase(pullop):
809 # Get remote phases data from remote
812 # Get remote phases data from remote
810 remotephases = pullop.remote.listkeys('phases')
813 remotephases = pullop.remote.listkeys('phases')
811 _pullapplyphases(pullop, remotephases)
814 _pullapplyphases(pullop, remotephases)
812
815
813 def _pullapplyphases(pullop, remotephases):
816 def _pullapplyphases(pullop, remotephases):
814 """apply phase movement from observed remote state"""
817 """apply phase movement from observed remote state"""
815 pullop.todosteps.remove('phases')
818 pullop.todosteps.remove('phases')
816 publishing = bool(remotephases.get('publishing', False))
819 publishing = bool(remotephases.get('publishing', False))
817 if remotephases and not publishing:
820 if remotephases and not publishing:
818 # remote is new and unpublishing
821 # remote is new and unpublishing
819 pheads, _dr = phases.analyzeremotephases(pullop.repo,
822 pheads, _dr = phases.analyzeremotephases(pullop.repo,
820 pullop.pulledsubset,
823 pullop.pulledsubset,
821 remotephases)
824 remotephases)
822 phases.advanceboundary(pullop.repo, phases.public, pheads)
825 phases.advanceboundary(pullop.repo, phases.public, pheads)
823 phases.advanceboundary(pullop.repo, phases.draft,
826 phases.advanceboundary(pullop.repo, phases.draft,
824 pullop.pulledsubset)
827 pullop.pulledsubset)
825 else:
828 else:
826 # Remote is old or publishing all common changesets
829 # Remote is old or publishing all common changesets
827 # should be seen as public
830 # should be seen as public
828 phases.advanceboundary(pullop.repo, phases.public,
831 phases.advanceboundary(pullop.repo, phases.public,
829 pullop.pulledsubset)
832 pullop.pulledsubset)
830
833
831 def _pullobsolete(pullop):
834 def _pullobsolete(pullop):
832 """utility function to pull obsolete markers from a remote
835 """utility function to pull obsolete markers from a remote
833
836
834 The `gettransaction` is function that return the pull transaction, creating
837 The `gettransaction` is function that return the pull transaction, creating
835 one if necessary. We return the transaction to inform the calling code that
838 one if necessary. We return the transaction to inform the calling code that
836 a new transaction have been created (when applicable).
839 a new transaction have been created (when applicable).
837
840
838 Exists mostly to allow overriding for experimentation purpose"""
841 Exists mostly to allow overriding for experimentation purpose"""
839 pullop.todosteps.remove('obsmarkers')
842 pullop.todosteps.remove('obsmarkers')
840 tr = None
843 tr = None
841 if obsolete._enabled:
844 if obsolete._enabled:
842 pullop.repo.ui.debug('fetching remote obsolete markers\n')
845 pullop.repo.ui.debug('fetching remote obsolete markers\n')
843 remoteobs = pullop.remote.listkeys('obsolete')
846 remoteobs = pullop.remote.listkeys('obsolete')
844 if 'dump0' in remoteobs:
847 if 'dump0' in remoteobs:
845 tr = pullop.gettransaction()
848 tr = pullop.gettransaction()
846 for key in sorted(remoteobs, reverse=True):
849 for key in sorted(remoteobs, reverse=True):
847 if key.startswith('dump'):
850 if key.startswith('dump'):
848 data = base85.b85decode(remoteobs[key])
851 data = base85.b85decode(remoteobs[key])
849 pullop.repo.obsstore.mergemarkers(tr, data)
852 pullop.repo.obsstore.mergemarkers(tr, data)
850 pullop.repo.invalidatevolatilesets()
853 pullop.repo.invalidatevolatilesets()
851 return tr
854 return tr
852
855
853 def caps20to10(repo):
856 def caps20to10(repo):
854 """return a set with appropriate options to use bundle20 during getbundle"""
857 """return a set with appropriate options to use bundle20 during getbundle"""
855 caps = set(['HG2X'])
858 caps = set(['HG2X'])
856 capsblob = bundle2.encodecaps(repo.bundle2caps)
859 capsblob = bundle2.encodecaps(repo.bundle2caps)
857 caps.add('bundle2=' + urllib.quote(capsblob))
860 caps.add('bundle2=' + urllib.quote(capsblob))
858 return caps
861 return caps
859
862
860 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
863 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
861 **kwargs):
864 **kwargs):
862 """return a full bundle (with potentially multiple kind of parts)
865 """return a full bundle (with potentially multiple kind of parts)
863
866
864 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
867 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
865 passed. For now, the bundle can contain only changegroup, but this will
868 passed. For now, the bundle can contain only changegroup, but this will
866 changes when more part type will be available for bundle2.
869 changes when more part type will be available for bundle2.
867
870
868 This is different from changegroup.getbundle that only returns an HG10
871 This is different from changegroup.getbundle that only returns an HG10
869 changegroup bundle. They may eventually get reunited in the future when we
872 changegroup bundle. They may eventually get reunited in the future when we
870 have a clearer idea of the API we what to query different data.
873 have a clearer idea of the API we what to query different data.
871
874
872 The implementation is at a very early stage and will get massive rework
875 The implementation is at a very early stage and will get massive rework
873 when the API of bundle is refined.
876 when the API of bundle is refined.
874 """
877 """
875 cg = None
878 cg = None
876 if kwargs.get('cg', True):
879 if kwargs.get('cg', True):
877 # build changegroup bundle here.
880 # build changegroup bundle here.
878 cg = changegroup.getbundle(repo, source, heads=heads,
881 cg = changegroup.getbundle(repo, source, heads=heads,
879 common=common, bundlecaps=bundlecaps)
882 common=common, bundlecaps=bundlecaps)
880 elif 'HG2X' not in bundlecaps:
883 elif 'HG2X' not in bundlecaps:
881 raise ValueError(_('request for bundle10 must include changegroup'))
884 raise ValueError(_('request for bundle10 must include changegroup'))
882 if bundlecaps is None or 'HG2X' not in bundlecaps:
885 if bundlecaps is None or 'HG2X' not in bundlecaps:
883 if kwargs:
886 if kwargs:
884 raise ValueError(_('unsupported getbundle arguments: %s')
887 raise ValueError(_('unsupported getbundle arguments: %s')
885 % ', '.join(sorted(kwargs.keys())))
888 % ', '.join(sorted(kwargs.keys())))
886 return cg
889 return cg
887 # very crude first implementation,
890 # very crude first implementation,
888 # the bundle API will change and the generation will be done lazily.
891 # the bundle API will change and the generation will be done lazily.
889 b2caps = {}
892 b2caps = {}
890 for bcaps in bundlecaps:
893 for bcaps in bundlecaps:
891 if bcaps.startswith('bundle2='):
894 if bcaps.startswith('bundle2='):
892 blob = urllib.unquote(bcaps[len('bundle2='):])
895 blob = urllib.unquote(bcaps[len('bundle2='):])
893 b2caps.update(bundle2.decodecaps(blob))
896 b2caps.update(bundle2.decodecaps(blob))
894 bundler = bundle2.bundle20(repo.ui, b2caps)
897 bundler = bundle2.bundle20(repo.ui, b2caps)
895 if cg:
898 if cg:
896 bundler.newpart('b2x:changegroup', data=cg.getchunks())
899 bundler.newpart('b2x:changegroup', data=cg.getchunks())
897 listkeys = kwargs.get('listkeys', ())
900 listkeys = kwargs.get('listkeys', ())
898 for namespace in listkeys:
901 for namespace in listkeys:
899 part = bundler.newpart('b2x:listkeys')
902 part = bundler.newpart('b2x:listkeys')
900 part.addparam('namespace', namespace)
903 part.addparam('namespace', namespace)
901 keys = repo.listkeys(namespace).items()
904 keys = repo.listkeys(namespace).items()
902 part.data = pushkey.encodekeys(keys)
905 part.data = pushkey.encodekeys(keys)
903 _getbundleextrapart(bundler, repo, source, heads=heads, common=common,
906 _getbundleextrapart(bundler, repo, source, heads=heads, common=common,
904 bundlecaps=bundlecaps, **kwargs)
907 bundlecaps=bundlecaps, **kwargs)
905 return util.chunkbuffer(bundler.getchunks())
908 return util.chunkbuffer(bundler.getchunks())
906
909
907 def _getbundleextrapart(bundler, repo, source, heads=None, common=None,
910 def _getbundleextrapart(bundler, repo, source, heads=None, common=None,
908 bundlecaps=None, **kwargs):
911 bundlecaps=None, **kwargs):
909 """hook function to let extensions add parts to the requested bundle"""
912 """hook function to let extensions add parts to the requested bundle"""
910 pass
913 pass
911
914
912 def check_heads(repo, their_heads, context):
915 def check_heads(repo, their_heads, context):
913 """check if the heads of a repo have been modified
916 """check if the heads of a repo have been modified
914
917
915 Used by peer for unbundling.
918 Used by peer for unbundling.
916 """
919 """
917 heads = repo.heads()
920 heads = repo.heads()
918 heads_hash = util.sha1(''.join(sorted(heads))).digest()
921 heads_hash = util.sha1(''.join(sorted(heads))).digest()
919 if not (their_heads == ['force'] or their_heads == heads or
922 if not (their_heads == ['force'] or their_heads == heads or
920 their_heads == ['hashed', heads_hash]):
923 their_heads == ['hashed', heads_hash]):
921 # someone else committed/pushed/unbundled while we
924 # someone else committed/pushed/unbundled while we
922 # were transferring data
925 # were transferring data
923 raise error.PushRaced('repository changed while %s - '
926 raise error.PushRaced('repository changed while %s - '
924 'please try again' % context)
927 'please try again' % context)
925
928
926 def unbundle(repo, cg, heads, source, url):
929 def unbundle(repo, cg, heads, source, url):
927 """Apply a bundle to a repo.
930 """Apply a bundle to a repo.
928
931
929 this function makes sure the repo is locked during the application and have
932 this function makes sure the repo is locked during the application and have
930 mechanism to check that no push race occurred between the creation of the
933 mechanism to check that no push race occurred between the creation of the
931 bundle and its application.
934 bundle and its application.
932
935
933 If the push was raced as PushRaced exception is raised."""
936 If the push was raced as PushRaced exception is raised."""
934 r = 0
937 r = 0
935 # need a transaction when processing a bundle2 stream
938 # need a transaction when processing a bundle2 stream
936 tr = None
939 tr = None
937 lock = repo.lock()
940 lock = repo.lock()
938 try:
941 try:
939 check_heads(repo, heads, 'uploading changes')
942 check_heads(repo, heads, 'uploading changes')
940 # push can proceed
943 # push can proceed
941 if util.safehasattr(cg, 'params'):
944 if util.safehasattr(cg, 'params'):
942 try:
945 try:
943 tr = repo.transaction('unbundle')
946 tr = repo.transaction('unbundle')
944 tr.hookargs['bundle2-exp'] = '1'
947 tr.hookargs['bundle2-exp'] = '1'
945 r = bundle2.processbundle(repo, cg, lambda: tr).reply
948 r = bundle2.processbundle(repo, cg, lambda: tr).reply
946 cl = repo.unfiltered().changelog
949 cl = repo.unfiltered().changelog
947 p = cl.writepending() and repo.root or ""
950 p = cl.writepending() and repo.root or ""
948 repo.hook('b2x-pretransactionclose', throw=True, source=source,
951 repo.hook('b2x-pretransactionclose', throw=True, source=source,
949 url=url, pending=p, **tr.hookargs)
952 url=url, pending=p, **tr.hookargs)
950 tr.close()
953 tr.close()
951 repo.hook('b2x-transactionclose', source=source, url=url,
954 repo.hook('b2x-transactionclose', source=source, url=url,
952 **tr.hookargs)
955 **tr.hookargs)
953 except Exception, exc:
956 except Exception, exc:
954 exc.duringunbundle2 = True
957 exc.duringunbundle2 = True
955 raise
958 raise
956 else:
959 else:
957 r = changegroup.addchangegroup(repo, cg, source, url)
960 r = changegroup.addchangegroup(repo, cg, source, url)
958 finally:
961 finally:
959 if tr is not None:
962 if tr is not None:
960 tr.release()
963 tr.release()
961 lock.release()
964 lock.release()
962 return r
965 return r
General Comments 0
You need to be logged in to leave comments. Login now