##// END OF EJS Templates
push: make discovery extensible...
Pierre-Yves David -
r22018:ddb56e7e default
parent child Browse files
Show More
@@ -1,848 +1,881
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno, urllib
10 import errno, urllib
11 import util, scmutil, changegroup, base85, error
11 import util, scmutil, changegroup, base85, error
12 import discovery, phases, obsolete, bookmarks, bundle2, pushkey
12 import discovery, phases, obsolete, bookmarks, bundle2, pushkey
13
13
14 def readbundle(ui, fh, fname, vfs=None):
14 def readbundle(ui, fh, fname, vfs=None):
15 header = changegroup.readexactly(fh, 4)
15 header = changegroup.readexactly(fh, 4)
16
16
17 alg = None
17 alg = None
18 if not fname:
18 if not fname:
19 fname = "stream"
19 fname = "stream"
20 if not header.startswith('HG') and header.startswith('\0'):
20 if not header.startswith('HG') and header.startswith('\0'):
21 fh = changegroup.headerlessfixup(fh, header)
21 fh = changegroup.headerlessfixup(fh, header)
22 header = "HG10"
22 header = "HG10"
23 alg = 'UN'
23 alg = 'UN'
24 elif vfs:
24 elif vfs:
25 fname = vfs.join(fname)
25 fname = vfs.join(fname)
26
26
27 magic, version = header[0:2], header[2:4]
27 magic, version = header[0:2], header[2:4]
28
28
29 if magic != 'HG':
29 if magic != 'HG':
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 if version == '10':
31 if version == '10':
32 if alg is None:
32 if alg is None:
33 alg = changegroup.readexactly(fh, 2)
33 alg = changegroup.readexactly(fh, 2)
34 return changegroup.unbundle10(fh, alg)
34 return changegroup.unbundle10(fh, alg)
35 elif version == '2X':
35 elif version == '2X':
36 return bundle2.unbundle20(ui, fh, header=magic + version)
36 return bundle2.unbundle20(ui, fh, header=magic + version)
37 else:
37 else:
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39
39
40
40
41 class pushoperation(object):
41 class pushoperation(object):
42 """A object that represent a single push operation
42 """A object that represent a single push operation
43
43
44 It purpose is to carry push related state and very common operation.
44 It purpose is to carry push related state and very common operation.
45
45
46 A new should be created at the beginning of each push and discarded
46 A new should be created at the beginning of each push and discarded
47 afterward.
47 afterward.
48 """
48 """
49
49
50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
51 # repo we push from
51 # repo we push from
52 self.repo = repo
52 self.repo = repo
53 self.ui = repo.ui
53 self.ui = repo.ui
54 # repo we push to
54 # repo we push to
55 self.remote = remote
55 self.remote = remote
56 # force option provided
56 # force option provided
57 self.force = force
57 self.force = force
58 # revs to be pushed (None is "all")
58 # revs to be pushed (None is "all")
59 self.revs = revs
59 self.revs = revs
60 # allow push of new branch
60 # allow push of new branch
61 self.newbranch = newbranch
61 self.newbranch = newbranch
62 # did a local lock get acquired?
62 # did a local lock get acquired?
63 self.locallocked = None
63 self.locallocked = None
64 # step already performed
64 # step already performed
65 # (used to check what steps have been already performed through bundle2)
65 # (used to check what steps have been already performed through bundle2)
66 self.stepsdone = set()
66 self.stepsdone = set()
67 # Integer version of the push result
67 # Integer version of the push result
68 # - None means nothing to push
68 # - None means nothing to push
69 # - 0 means HTTP error
69 # - 0 means HTTP error
70 # - 1 means we pushed and remote head count is unchanged *or*
70 # - 1 means we pushed and remote head count is unchanged *or*
71 # we have outgoing changesets but refused to push
71 # we have outgoing changesets but refused to push
72 # - other values as described by addchangegroup()
72 # - other values as described by addchangegroup()
73 self.ret = None
73 self.ret = None
74 # discover.outgoing object (contains common and outgoing data)
74 # discover.outgoing object (contains common and outgoing data)
75 self.outgoing = None
75 self.outgoing = None
76 # all remote heads before the push
76 # all remote heads before the push
77 self.remoteheads = None
77 self.remoteheads = None
78 # testable as a boolean indicating if any nodes are missing locally.
78 # testable as a boolean indicating if any nodes are missing locally.
79 self.incoming = None
79 self.incoming = None
80
80
81 @util.propertycache
81 @util.propertycache
82 def futureheads(self):
82 def futureheads(self):
83 """future remote heads if the changeset push succeeds"""
83 """future remote heads if the changeset push succeeds"""
84 return self.outgoing.missingheads
84 return self.outgoing.missingheads
85
85
86 @util.propertycache
86 @util.propertycache
87 def fallbackheads(self):
87 def fallbackheads(self):
88 """future remote heads if the changeset push fails"""
88 """future remote heads if the changeset push fails"""
89 if self.revs is None:
89 if self.revs is None:
90 # not target to push, all common are relevant
90 # not target to push, all common are relevant
91 return self.outgoing.commonheads
91 return self.outgoing.commonheads
92 unfi = self.repo.unfiltered()
92 unfi = self.repo.unfiltered()
93 # I want cheads = heads(::missingheads and ::commonheads)
93 # I want cheads = heads(::missingheads and ::commonheads)
94 # (missingheads is revs with secret changeset filtered out)
94 # (missingheads is revs with secret changeset filtered out)
95 #
95 #
96 # This can be expressed as:
96 # This can be expressed as:
97 # cheads = ( (missingheads and ::commonheads)
97 # cheads = ( (missingheads and ::commonheads)
98 # + (commonheads and ::missingheads))"
98 # + (commonheads and ::missingheads))"
99 # )
99 # )
100 #
100 #
101 # while trying to push we already computed the following:
101 # while trying to push we already computed the following:
102 # common = (::commonheads)
102 # common = (::commonheads)
103 # missing = ((commonheads::missingheads) - commonheads)
103 # missing = ((commonheads::missingheads) - commonheads)
104 #
104 #
105 # We can pick:
105 # We can pick:
106 # * missingheads part of common (::commonheads)
106 # * missingheads part of common (::commonheads)
107 common = set(self.outgoing.common)
107 common = set(self.outgoing.common)
108 nm = self.repo.changelog.nodemap
108 nm = self.repo.changelog.nodemap
109 cheads = [node for node in self.revs if nm[node] in common]
109 cheads = [node for node in self.revs if nm[node] in common]
110 # and
110 # and
111 # * commonheads parents on missing
111 # * commonheads parents on missing
112 revset = unfi.set('%ln and parents(roots(%ln))',
112 revset = unfi.set('%ln and parents(roots(%ln))',
113 self.outgoing.commonheads,
113 self.outgoing.commonheads,
114 self.outgoing.missing)
114 self.outgoing.missing)
115 cheads.extend(c.node() for c in revset)
115 cheads.extend(c.node() for c in revset)
116 return cheads
116 return cheads
117
117
118 @property
118 @property
119 def commonheads(self):
119 def commonheads(self):
120 """set of all common heads after changeset bundle push"""
120 """set of all common heads after changeset bundle push"""
121 if self.ret:
121 if self.ret:
122 return self.futureheads
122 return self.futureheads
123 else:
123 else:
124 return self.fallbackheads
124 return self.fallbackheads
125
125
126 def push(repo, remote, force=False, revs=None, newbranch=False):
126 def push(repo, remote, force=False, revs=None, newbranch=False):
127 '''Push outgoing changesets (limited by revs) from a local
127 '''Push outgoing changesets (limited by revs) from a local
128 repository to remote. Return an integer:
128 repository to remote. Return an integer:
129 - None means nothing to push
129 - None means nothing to push
130 - 0 means HTTP error
130 - 0 means HTTP error
131 - 1 means we pushed and remote head count is unchanged *or*
131 - 1 means we pushed and remote head count is unchanged *or*
132 we have outgoing changesets but refused to push
132 we have outgoing changesets but refused to push
133 - other values as described by addchangegroup()
133 - other values as described by addchangegroup()
134 '''
134 '''
135 pushop = pushoperation(repo, remote, force, revs, newbranch)
135 pushop = pushoperation(repo, remote, force, revs, newbranch)
136 if pushop.remote.local():
136 if pushop.remote.local():
137 missing = (set(pushop.repo.requirements)
137 missing = (set(pushop.repo.requirements)
138 - pushop.remote.local().supported)
138 - pushop.remote.local().supported)
139 if missing:
139 if missing:
140 msg = _("required features are not"
140 msg = _("required features are not"
141 " supported in the destination:"
141 " supported in the destination:"
142 " %s") % (', '.join(sorted(missing)))
142 " %s") % (', '.join(sorted(missing)))
143 raise util.Abort(msg)
143 raise util.Abort(msg)
144
144
145 # there are two ways to push to remote repo:
145 # there are two ways to push to remote repo:
146 #
146 #
147 # addchangegroup assumes local user can lock remote
147 # addchangegroup assumes local user can lock remote
148 # repo (local filesystem, old ssh servers).
148 # repo (local filesystem, old ssh servers).
149 #
149 #
150 # unbundle assumes local user cannot lock remote repo (new ssh
150 # unbundle assumes local user cannot lock remote repo (new ssh
151 # servers, http servers).
151 # servers, http servers).
152
152
153 if not pushop.remote.canpush():
153 if not pushop.remote.canpush():
154 raise util.Abort(_("destination does not support push"))
154 raise util.Abort(_("destination does not support push"))
155 # get local lock as we might write phase data
155 # get local lock as we might write phase data
156 locallock = None
156 locallock = None
157 try:
157 try:
158 locallock = pushop.repo.lock()
158 locallock = pushop.repo.lock()
159 pushop.locallocked = True
159 pushop.locallocked = True
160 except IOError, err:
160 except IOError, err:
161 pushop.locallocked = False
161 pushop.locallocked = False
162 if err.errno != errno.EACCES:
162 if err.errno != errno.EACCES:
163 raise
163 raise
164 # source repo cannot be locked.
164 # source repo cannot be locked.
165 # We do not abort the push, but just disable the local phase
165 # We do not abort the push, but just disable the local phase
166 # synchronisation.
166 # synchronisation.
167 msg = 'cannot lock source repository: %s\n' % err
167 msg = 'cannot lock source repository: %s\n' % err
168 pushop.ui.debug(msg)
168 pushop.ui.debug(msg)
169 try:
169 try:
170 pushop.repo.checkpush(pushop)
170 pushop.repo.checkpush(pushop)
171 lock = None
171 lock = None
172 unbundle = pushop.remote.capable('unbundle')
172 unbundle = pushop.remote.capable('unbundle')
173 if not unbundle:
173 if not unbundle:
174 lock = pushop.remote.lock()
174 lock = pushop.remote.lock()
175 try:
175 try:
176 _pushdiscovery(pushop)
176 _pushdiscovery(pushop)
177 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
177 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
178 False)
178 False)
179 and pushop.remote.capable('bundle2-exp')):
179 and pushop.remote.capable('bundle2-exp')):
180 _pushbundle2(pushop)
180 _pushbundle2(pushop)
181 _pushchangeset(pushop)
181 _pushchangeset(pushop)
182 _pushsyncphase(pushop)
182 _pushsyncphase(pushop)
183 _pushobsolete(pushop)
183 _pushobsolete(pushop)
184 finally:
184 finally:
185 if lock is not None:
185 if lock is not None:
186 lock.release()
186 lock.release()
187 finally:
187 finally:
188 if locallock is not None:
188 if locallock is not None:
189 locallock.release()
189 locallock.release()
190
190
191 _pushbookmark(pushop)
191 _pushbookmark(pushop)
192 return pushop.ret
192 return pushop.ret
193
193
194 # list of steps to perform discovery before push
195 pushdiscoveryorder = []
196
197 # Mapping between step name and function
198 #
199 # This exists to help extensions wrap steps if necessary
200 pushdiscoverymapping = {}
201
202 def pushdiscovery(stepname):
203 """decorator for function performing discovery before push
204
205 The function is added to the step -> function mapping and appended to the
206 list of steps. Beware that decorated function will be added in order (this
207 may matter).
208
209 You can only use this decorator for a new step, if you want to wrap a step
210 from an extension, change the pushdiscovery dictionary directly."""
211 def dec(func):
212 assert stepname not in pushdiscoverymapping
213 pushdiscoverymapping[stepname] = func
214 pushdiscoveryorder.append(stepname)
215 return func
216 return dec
217
218
219
194 def _pushdiscovery(pushop):
220 def _pushdiscovery(pushop):
195 # discovery
221 """Run all discovery steps"""
222 for stepname in pushdiscoveryorder:
223 step = pushdiscoverymapping[stepname]
224 step(pushop)
225
226 @pushdiscovery('changeset')
227 def _pushdiscoverychangeset(pushop):
228 """discover the changeset that need to be pushed"""
196 unfi = pushop.repo.unfiltered()
229 unfi = pushop.repo.unfiltered()
197 fci = discovery.findcommonincoming
230 fci = discovery.findcommonincoming
198 commoninc = fci(unfi, pushop.remote, force=pushop.force)
231 commoninc = fci(unfi, pushop.remote, force=pushop.force)
199 common, inc, remoteheads = commoninc
232 common, inc, remoteheads = commoninc
200 fco = discovery.findcommonoutgoing
233 fco = discovery.findcommonoutgoing
201 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
234 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
202 commoninc=commoninc, force=pushop.force)
235 commoninc=commoninc, force=pushop.force)
203 pushop.outgoing = outgoing
236 pushop.outgoing = outgoing
204 pushop.remoteheads = remoteheads
237 pushop.remoteheads = remoteheads
205 pushop.incoming = inc
238 pushop.incoming = inc
206
239
207 def _pushcheckoutgoing(pushop):
240 def _pushcheckoutgoing(pushop):
208 outgoing = pushop.outgoing
241 outgoing = pushop.outgoing
209 unfi = pushop.repo.unfiltered()
242 unfi = pushop.repo.unfiltered()
210 if not outgoing.missing:
243 if not outgoing.missing:
211 # nothing to push
244 # nothing to push
212 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
245 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
213 return False
246 return False
214 # something to push
247 # something to push
215 if not pushop.force:
248 if not pushop.force:
216 # if repo.obsstore == False --> no obsolete
249 # if repo.obsstore == False --> no obsolete
217 # then, save the iteration
250 # then, save the iteration
218 if unfi.obsstore:
251 if unfi.obsstore:
219 # this message are here for 80 char limit reason
252 # this message are here for 80 char limit reason
220 mso = _("push includes obsolete changeset: %s!")
253 mso = _("push includes obsolete changeset: %s!")
221 mst = "push includes %s changeset: %s!"
254 mst = "push includes %s changeset: %s!"
222 # plain versions for i18n tool to detect them
255 # plain versions for i18n tool to detect them
223 _("push includes unstable changeset: %s!")
256 _("push includes unstable changeset: %s!")
224 _("push includes bumped changeset: %s!")
257 _("push includes bumped changeset: %s!")
225 _("push includes divergent changeset: %s!")
258 _("push includes divergent changeset: %s!")
226 # If we are to push if there is at least one
259 # If we are to push if there is at least one
227 # obsolete or unstable changeset in missing, at
260 # obsolete or unstable changeset in missing, at
228 # least one of the missinghead will be obsolete or
261 # least one of the missinghead will be obsolete or
229 # unstable. So checking heads only is ok
262 # unstable. So checking heads only is ok
230 for node in outgoing.missingheads:
263 for node in outgoing.missingheads:
231 ctx = unfi[node]
264 ctx = unfi[node]
232 if ctx.obsolete():
265 if ctx.obsolete():
233 raise util.Abort(mso % ctx)
266 raise util.Abort(mso % ctx)
234 elif ctx.troubled():
267 elif ctx.troubled():
235 raise util.Abort(_(mst)
268 raise util.Abort(_(mst)
236 % (ctx.troubles()[0],
269 % (ctx.troubles()[0],
237 ctx))
270 ctx))
238 newbm = pushop.ui.configlist('bookmarks', 'pushing')
271 newbm = pushop.ui.configlist('bookmarks', 'pushing')
239 discovery.checkheads(unfi, pushop.remote, outgoing,
272 discovery.checkheads(unfi, pushop.remote, outgoing,
240 pushop.remoteheads,
273 pushop.remoteheads,
241 pushop.newbranch,
274 pushop.newbranch,
242 bool(pushop.incoming),
275 bool(pushop.incoming),
243 newbm)
276 newbm)
244 return True
277 return True
245
278
246 # List of names of steps to perform for an outgoing bundle2, order matters.
279 # List of names of steps to perform for an outgoing bundle2, order matters.
247 b2partsgenorder = []
280 b2partsgenorder = []
248
281
249 # Mapping between step name and function
282 # Mapping between step name and function
250 #
283 #
251 # This exists to help extensions wrap steps if necessary
284 # This exists to help extensions wrap steps if necessary
252 b2partsgenmapping = {}
285 b2partsgenmapping = {}
253
286
254 def b2partsgenerator(stepname):
287 def b2partsgenerator(stepname):
255 """decorator for function generating bundle2 part
288 """decorator for function generating bundle2 part
256
289
257 The function is added to the step -> function mapping and appended to the
290 The function is added to the step -> function mapping and appended to the
258 list of steps. Beware that decorated functions will be added in order
291 list of steps. Beware that decorated functions will be added in order
259 (this may matter).
292 (this may matter).
260
293
261 You can only use this decorator for new steps, if you want to wrap a step
294 You can only use this decorator for new steps, if you want to wrap a step
262 from an extension, attack the b2partsgenmapping dictionary directly."""
295 from an extension, attack the b2partsgenmapping dictionary directly."""
263 def dec(func):
296 def dec(func):
264 assert stepname not in b2partsgenmapping
297 assert stepname not in b2partsgenmapping
265 b2partsgenmapping[stepname] = func
298 b2partsgenmapping[stepname] = func
266 b2partsgenorder.append(stepname)
299 b2partsgenorder.append(stepname)
267 return func
300 return func
268 return dec
301 return dec
269
302
270 @b2partsgenerator('changeset')
303 @b2partsgenerator('changeset')
271 def _pushb2ctx(pushop, bundler):
304 def _pushb2ctx(pushop, bundler):
272 """handle changegroup push through bundle2
305 """handle changegroup push through bundle2
273
306
274 addchangegroup result is stored in the ``pushop.ret`` attribute.
307 addchangegroup result is stored in the ``pushop.ret`` attribute.
275 """
308 """
276 if 'changesets' in pushop.stepsdone:
309 if 'changesets' in pushop.stepsdone:
277 return
310 return
278 pushop.stepsdone.add('changesets')
311 pushop.stepsdone.add('changesets')
279 # Send known heads to the server for race detection.
312 # Send known heads to the server for race detection.
280 pushop.stepsdone.add('changesets')
313 pushop.stepsdone.add('changesets')
281 if not _pushcheckoutgoing(pushop):
314 if not _pushcheckoutgoing(pushop):
282 return
315 return
283 pushop.repo.prepushoutgoinghooks(pushop.repo,
316 pushop.repo.prepushoutgoinghooks(pushop.repo,
284 pushop.remote,
317 pushop.remote,
285 pushop.outgoing)
318 pushop.outgoing)
286 if not pushop.force:
319 if not pushop.force:
287 bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads))
320 bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads))
288 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
321 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
289 cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks())
322 cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks())
290 def handlereply(op):
323 def handlereply(op):
291 """extract addchangroup returns from server reply"""
324 """extract addchangroup returns from server reply"""
292 cgreplies = op.records.getreplies(cgpart.id)
325 cgreplies = op.records.getreplies(cgpart.id)
293 assert len(cgreplies['changegroup']) == 1
326 assert len(cgreplies['changegroup']) == 1
294 pushop.ret = cgreplies['changegroup'][0]['return']
327 pushop.ret = cgreplies['changegroup'][0]['return']
295 return handlereply
328 return handlereply
296
329
297
330
298 def _pushbundle2(pushop):
331 def _pushbundle2(pushop):
299 """push data to the remote using bundle2
332 """push data to the remote using bundle2
300
333
301 The only currently supported type of data is changegroup but this will
334 The only currently supported type of data is changegroup but this will
302 evolve in the future."""
335 evolve in the future."""
303 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
336 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
304 # create reply capability
337 # create reply capability
305 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
338 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
306 bundler.newpart('b2x:replycaps', data=capsblob)
339 bundler.newpart('b2x:replycaps', data=capsblob)
307 replyhandlers = []
340 replyhandlers = []
308 for partgenname in b2partsgenorder:
341 for partgenname in b2partsgenorder:
309 partgen = b2partsgenmapping[partgenname]
342 partgen = b2partsgenmapping[partgenname]
310 ret = partgen(pushop, bundler)
343 ret = partgen(pushop, bundler)
311 if callable(ret):
344 if callable(ret):
312 replyhandlers.append(ret)
345 replyhandlers.append(ret)
313 # do not push if nothing to push
346 # do not push if nothing to push
314 if bundler.nbparts <= 1:
347 if bundler.nbparts <= 1:
315 return
348 return
316 stream = util.chunkbuffer(bundler.getchunks())
349 stream = util.chunkbuffer(bundler.getchunks())
317 try:
350 try:
318 reply = pushop.remote.unbundle(stream, ['force'], 'push')
351 reply = pushop.remote.unbundle(stream, ['force'], 'push')
319 except error.BundleValueError, exc:
352 except error.BundleValueError, exc:
320 raise util.Abort('missing support for %s' % exc)
353 raise util.Abort('missing support for %s' % exc)
321 try:
354 try:
322 op = bundle2.processbundle(pushop.repo, reply)
355 op = bundle2.processbundle(pushop.repo, reply)
323 except error.BundleValueError, exc:
356 except error.BundleValueError, exc:
324 raise util.Abort('missing support for %s' % exc)
357 raise util.Abort('missing support for %s' % exc)
325 for rephand in replyhandlers:
358 for rephand in replyhandlers:
326 rephand(op)
359 rephand(op)
327
360
328 def _pushchangeset(pushop):
361 def _pushchangeset(pushop):
329 """Make the actual push of changeset bundle to remote repo"""
362 """Make the actual push of changeset bundle to remote repo"""
330 if 'changesets' in pushop.stepsdone:
363 if 'changesets' in pushop.stepsdone:
331 return
364 return
332 pushop.stepsdone.add('changesets')
365 pushop.stepsdone.add('changesets')
333 if not _pushcheckoutgoing(pushop):
366 if not _pushcheckoutgoing(pushop):
334 return
367 return
335 pushop.repo.prepushoutgoinghooks(pushop.repo,
368 pushop.repo.prepushoutgoinghooks(pushop.repo,
336 pushop.remote,
369 pushop.remote,
337 pushop.outgoing)
370 pushop.outgoing)
338 outgoing = pushop.outgoing
371 outgoing = pushop.outgoing
339 unbundle = pushop.remote.capable('unbundle')
372 unbundle = pushop.remote.capable('unbundle')
340 # TODO: get bundlecaps from remote
373 # TODO: get bundlecaps from remote
341 bundlecaps = None
374 bundlecaps = None
342 # create a changegroup from local
375 # create a changegroup from local
343 if pushop.revs is None and not (outgoing.excluded
376 if pushop.revs is None and not (outgoing.excluded
344 or pushop.repo.changelog.filteredrevs):
377 or pushop.repo.changelog.filteredrevs):
345 # push everything,
378 # push everything,
346 # use the fast path, no race possible on push
379 # use the fast path, no race possible on push
347 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
380 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
348 cg = changegroup.getsubset(pushop.repo,
381 cg = changegroup.getsubset(pushop.repo,
349 outgoing,
382 outgoing,
350 bundler,
383 bundler,
351 'push',
384 'push',
352 fastpath=True)
385 fastpath=True)
353 else:
386 else:
354 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
387 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
355 bundlecaps)
388 bundlecaps)
356
389
357 # apply changegroup to remote
390 # apply changegroup to remote
358 if unbundle:
391 if unbundle:
359 # local repo finds heads on server, finds out what
392 # local repo finds heads on server, finds out what
360 # revs it must push. once revs transferred, if server
393 # revs it must push. once revs transferred, if server
361 # finds it has different heads (someone else won
394 # finds it has different heads (someone else won
362 # commit/push race), server aborts.
395 # commit/push race), server aborts.
363 if pushop.force:
396 if pushop.force:
364 remoteheads = ['force']
397 remoteheads = ['force']
365 else:
398 else:
366 remoteheads = pushop.remoteheads
399 remoteheads = pushop.remoteheads
367 # ssh: return remote's addchangegroup()
400 # ssh: return remote's addchangegroup()
368 # http: return remote's addchangegroup() or 0 for error
401 # http: return remote's addchangegroup() or 0 for error
369 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
402 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
370 pushop.repo.url())
403 pushop.repo.url())
371 else:
404 else:
372 # we return an integer indicating remote head count
405 # we return an integer indicating remote head count
373 # change
406 # change
374 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
407 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
375
408
376 def _pushsyncphase(pushop):
409 def _pushsyncphase(pushop):
377 """synchronise phase information locally and remotely"""
410 """synchronise phase information locally and remotely"""
378 unfi = pushop.repo.unfiltered()
411 unfi = pushop.repo.unfiltered()
379 cheads = pushop.commonheads
412 cheads = pushop.commonheads
380 # even when we don't push, exchanging phase data is useful
413 # even when we don't push, exchanging phase data is useful
381 remotephases = pushop.remote.listkeys('phases')
414 remotephases = pushop.remote.listkeys('phases')
382 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
415 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
383 and remotephases # server supports phases
416 and remotephases # server supports phases
384 and pushop.ret is None # nothing was pushed
417 and pushop.ret is None # nothing was pushed
385 and remotephases.get('publishing', False)):
418 and remotephases.get('publishing', False)):
386 # When:
419 # When:
387 # - this is a subrepo push
420 # - this is a subrepo push
388 # - and remote support phase
421 # - and remote support phase
389 # - and no changeset was pushed
422 # - and no changeset was pushed
390 # - and remote is publishing
423 # - and remote is publishing
391 # We may be in issue 3871 case!
424 # We may be in issue 3871 case!
392 # We drop the possible phase synchronisation done by
425 # We drop the possible phase synchronisation done by
393 # courtesy to publish changesets possibly locally draft
426 # courtesy to publish changesets possibly locally draft
394 # on the remote.
427 # on the remote.
395 remotephases = {'publishing': 'True'}
428 remotephases = {'publishing': 'True'}
396 if not remotephases: # old server or public only reply from non-publishing
429 if not remotephases: # old server or public only reply from non-publishing
397 _localphasemove(pushop, cheads)
430 _localphasemove(pushop, cheads)
398 # don't push any phase data as there is nothing to push
431 # don't push any phase data as there is nothing to push
399 else:
432 else:
400 ana = phases.analyzeremotephases(pushop.repo, cheads,
433 ana = phases.analyzeremotephases(pushop.repo, cheads,
401 remotephases)
434 remotephases)
402 pheads, droots = ana
435 pheads, droots = ana
403 ### Apply remote phase on local
436 ### Apply remote phase on local
404 if remotephases.get('publishing', False):
437 if remotephases.get('publishing', False):
405 _localphasemove(pushop, cheads)
438 _localphasemove(pushop, cheads)
406 else: # publish = False
439 else: # publish = False
407 _localphasemove(pushop, pheads)
440 _localphasemove(pushop, pheads)
408 _localphasemove(pushop, cheads, phases.draft)
441 _localphasemove(pushop, cheads, phases.draft)
409 ### Apply local phase on remote
442 ### Apply local phase on remote
410
443
411 # Get the list of all revs draft on remote by public here.
444 # Get the list of all revs draft on remote by public here.
412 # XXX Beware that revset break if droots is not strictly
445 # XXX Beware that revset break if droots is not strictly
413 # XXX root we may want to ensure it is but it is costly
446 # XXX root we may want to ensure it is but it is costly
414 outdated = unfi.set('heads((%ln::%ln) and public())',
447 outdated = unfi.set('heads((%ln::%ln) and public())',
415 droots, cheads)
448 droots, cheads)
416
449
417 b2caps = bundle2.bundle2caps(pushop.remote)
450 b2caps = bundle2.bundle2caps(pushop.remote)
418 if 'b2x:pushkey' in b2caps:
451 if 'b2x:pushkey' in b2caps:
419 # server supports bundle2, let's do a batched push through it
452 # server supports bundle2, let's do a batched push through it
420 #
453 #
421 # This will eventually be unified with the changesets bundle2 push
454 # This will eventually be unified with the changesets bundle2 push
422 bundler = bundle2.bundle20(pushop.ui, b2caps)
455 bundler = bundle2.bundle20(pushop.ui, b2caps)
423 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
456 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
424 bundler.newpart('b2x:replycaps', data=capsblob)
457 bundler.newpart('b2x:replycaps', data=capsblob)
425 part2node = []
458 part2node = []
426 enc = pushkey.encode
459 enc = pushkey.encode
427 for newremotehead in outdated:
460 for newremotehead in outdated:
428 part = bundler.newpart('b2x:pushkey')
461 part = bundler.newpart('b2x:pushkey')
429 part.addparam('namespace', enc('phases'))
462 part.addparam('namespace', enc('phases'))
430 part.addparam('key', enc(newremotehead.hex()))
463 part.addparam('key', enc(newremotehead.hex()))
431 part.addparam('old', enc(str(phases.draft)))
464 part.addparam('old', enc(str(phases.draft)))
432 part.addparam('new', enc(str(phases.public)))
465 part.addparam('new', enc(str(phases.public)))
433 part2node.append((part.id, newremotehead))
466 part2node.append((part.id, newremotehead))
434 stream = util.chunkbuffer(bundler.getchunks())
467 stream = util.chunkbuffer(bundler.getchunks())
435 try:
468 try:
436 reply = pushop.remote.unbundle(stream, ['force'], 'push')
469 reply = pushop.remote.unbundle(stream, ['force'], 'push')
437 op = bundle2.processbundle(pushop.repo, reply)
470 op = bundle2.processbundle(pushop.repo, reply)
438 except error.BundleValueError, exc:
471 except error.BundleValueError, exc:
439 raise util.Abort('missing support for %s' % exc)
472 raise util.Abort('missing support for %s' % exc)
440 for partid, node in part2node:
473 for partid, node in part2node:
441 partrep = op.records.getreplies(partid)
474 partrep = op.records.getreplies(partid)
442 results = partrep['pushkey']
475 results = partrep['pushkey']
443 assert len(results) <= 1
476 assert len(results) <= 1
444 msg = None
477 msg = None
445 if not results:
478 if not results:
446 msg = _('server ignored update of %s to public!\n') % node
479 msg = _('server ignored update of %s to public!\n') % node
447 elif not int(results[0]['return']):
480 elif not int(results[0]['return']):
448 msg = _('updating %s to public failed!\n') % node
481 msg = _('updating %s to public failed!\n') % node
449 if msg is not None:
482 if msg is not None:
450 pushop.ui.warn(msg)
483 pushop.ui.warn(msg)
451
484
452 else:
485 else:
453 # fallback to independant pushkey command
486 # fallback to independant pushkey command
454 for newremotehead in outdated:
487 for newremotehead in outdated:
455 r = pushop.remote.pushkey('phases',
488 r = pushop.remote.pushkey('phases',
456 newremotehead.hex(),
489 newremotehead.hex(),
457 str(phases.draft),
490 str(phases.draft),
458 str(phases.public))
491 str(phases.public))
459 if not r:
492 if not r:
460 pushop.ui.warn(_('updating %s to public failed!\n')
493 pushop.ui.warn(_('updating %s to public failed!\n')
461 % newremotehead)
494 % newremotehead)
462
495
463 def _localphasemove(pushop, nodes, phase=phases.public):
496 def _localphasemove(pushop, nodes, phase=phases.public):
464 """move <nodes> to <phase> in the local source repo"""
497 """move <nodes> to <phase> in the local source repo"""
465 if pushop.locallocked:
498 if pushop.locallocked:
466 phases.advanceboundary(pushop.repo, phase, nodes)
499 phases.advanceboundary(pushop.repo, phase, nodes)
467 else:
500 else:
468 # repo is not locked, do not change any phases!
501 # repo is not locked, do not change any phases!
469 # Informs the user that phases should have been moved when
502 # Informs the user that phases should have been moved when
470 # applicable.
503 # applicable.
471 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
504 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
472 phasestr = phases.phasenames[phase]
505 phasestr = phases.phasenames[phase]
473 if actualmoves:
506 if actualmoves:
474 pushop.ui.status(_('cannot lock source repo, skipping '
507 pushop.ui.status(_('cannot lock source repo, skipping '
475 'local %s phase update\n') % phasestr)
508 'local %s phase update\n') % phasestr)
476
509
477 def _pushobsolete(pushop):
510 def _pushobsolete(pushop):
478 """utility function to push obsolete markers to a remote"""
511 """utility function to push obsolete markers to a remote"""
479 pushop.ui.debug('try to push obsolete markers to remote\n')
512 pushop.ui.debug('try to push obsolete markers to remote\n')
480 repo = pushop.repo
513 repo = pushop.repo
481 remote = pushop.remote
514 remote = pushop.remote
482 if (obsolete._enabled and repo.obsstore and
515 if (obsolete._enabled and repo.obsstore and
483 'obsolete' in remote.listkeys('namespaces')):
516 'obsolete' in remote.listkeys('namespaces')):
484 rslts = []
517 rslts = []
485 remotedata = repo.listkeys('obsolete')
518 remotedata = repo.listkeys('obsolete')
486 for key in sorted(remotedata, reverse=True):
519 for key in sorted(remotedata, reverse=True):
487 # reverse sort to ensure we end with dump0
520 # reverse sort to ensure we end with dump0
488 data = remotedata[key]
521 data = remotedata[key]
489 rslts.append(remote.pushkey('obsolete', key, '', data))
522 rslts.append(remote.pushkey('obsolete', key, '', data))
490 if [r for r in rslts if not r]:
523 if [r for r in rslts if not r]:
491 msg = _('failed to push some obsolete markers!\n')
524 msg = _('failed to push some obsolete markers!\n')
492 repo.ui.warn(msg)
525 repo.ui.warn(msg)
493
526
494 def _pushbookmark(pushop):
527 def _pushbookmark(pushop):
495 """Update bookmark position on remote"""
528 """Update bookmark position on remote"""
496 ui = pushop.ui
529 ui = pushop.ui
497 repo = pushop.repo.unfiltered()
530 repo = pushop.repo.unfiltered()
498 remote = pushop.remote
531 remote = pushop.remote
499 ui.debug("checking for updated bookmarks\n")
532 ui.debug("checking for updated bookmarks\n")
500 revnums = map(repo.changelog.rev, pushop.revs or [])
533 revnums = map(repo.changelog.rev, pushop.revs or [])
501 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
534 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
502 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
535 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
503 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
536 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
504 srchex=hex)
537 srchex=hex)
505
538
506 for b, scid, dcid in advsrc:
539 for b, scid, dcid in advsrc:
507 if ancestors and repo[scid].rev() not in ancestors:
540 if ancestors and repo[scid].rev() not in ancestors:
508 continue
541 continue
509 if remote.pushkey('bookmarks', b, dcid, scid):
542 if remote.pushkey('bookmarks', b, dcid, scid):
510 ui.status(_("updating bookmark %s\n") % b)
543 ui.status(_("updating bookmark %s\n") % b)
511 else:
544 else:
512 ui.warn(_('updating bookmark %s failed!\n') % b)
545 ui.warn(_('updating bookmark %s failed!\n') % b)
513
546
514 class pulloperation(object):
547 class pulloperation(object):
515 """A object that represent a single pull operation
548 """A object that represent a single pull operation
516
549
517 It purpose is to carry push related state and very common operation.
550 It purpose is to carry push related state and very common operation.
518
551
519 A new should be created at the beginning of each pull and discarded
552 A new should be created at the beginning of each pull and discarded
520 afterward.
553 afterward.
521 """
554 """
522
555
523 def __init__(self, repo, remote, heads=None, force=False):
556 def __init__(self, repo, remote, heads=None, force=False):
524 # repo we pull into
557 # repo we pull into
525 self.repo = repo
558 self.repo = repo
526 # repo we pull from
559 # repo we pull from
527 self.remote = remote
560 self.remote = remote
528 # revision we try to pull (None is "all")
561 # revision we try to pull (None is "all")
529 self.heads = heads
562 self.heads = heads
530 # do we force pull?
563 # do we force pull?
531 self.force = force
564 self.force = force
532 # the name the pull transaction
565 # the name the pull transaction
533 self._trname = 'pull\n' + util.hidepassword(remote.url())
566 self._trname = 'pull\n' + util.hidepassword(remote.url())
534 # hold the transaction once created
567 # hold the transaction once created
535 self._tr = None
568 self._tr = None
536 # set of common changeset between local and remote before pull
569 # set of common changeset between local and remote before pull
537 self.common = None
570 self.common = None
538 # set of pulled head
571 # set of pulled head
539 self.rheads = None
572 self.rheads = None
540 # list of missing changeset to fetch remotely
573 # list of missing changeset to fetch remotely
541 self.fetch = None
574 self.fetch = None
542 # result of changegroup pulling (used as return code by pull)
575 # result of changegroup pulling (used as return code by pull)
543 self.cgresult = None
576 self.cgresult = None
544 # list of step remaining todo (related to future bundle2 usage)
577 # list of step remaining todo (related to future bundle2 usage)
545 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
578 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
546
579
547 @util.propertycache
580 @util.propertycache
548 def pulledsubset(self):
581 def pulledsubset(self):
549 """heads of the set of changeset target by the pull"""
582 """heads of the set of changeset target by the pull"""
550 # compute target subset
583 # compute target subset
551 if self.heads is None:
584 if self.heads is None:
552 # We pulled every thing possible
585 # We pulled every thing possible
553 # sync on everything common
586 # sync on everything common
554 c = set(self.common)
587 c = set(self.common)
555 ret = list(self.common)
588 ret = list(self.common)
556 for n in self.rheads:
589 for n in self.rheads:
557 if n not in c:
590 if n not in c:
558 ret.append(n)
591 ret.append(n)
559 return ret
592 return ret
560 else:
593 else:
561 # We pulled a specific subset
594 # We pulled a specific subset
562 # sync on this subset
595 # sync on this subset
563 return self.heads
596 return self.heads
564
597
565 def gettransaction(self):
598 def gettransaction(self):
566 """get appropriate pull transaction, creating it if needed"""
599 """get appropriate pull transaction, creating it if needed"""
567 if self._tr is None:
600 if self._tr is None:
568 self._tr = self.repo.transaction(self._trname)
601 self._tr = self.repo.transaction(self._trname)
569 return self._tr
602 return self._tr
570
603
571 def closetransaction(self):
604 def closetransaction(self):
572 """close transaction if created"""
605 """close transaction if created"""
573 if self._tr is not None:
606 if self._tr is not None:
574 self._tr.close()
607 self._tr.close()
575
608
576 def releasetransaction(self):
609 def releasetransaction(self):
577 """release transaction if created"""
610 """release transaction if created"""
578 if self._tr is not None:
611 if self._tr is not None:
579 self._tr.release()
612 self._tr.release()
580
613
581 def pull(repo, remote, heads=None, force=False):
614 def pull(repo, remote, heads=None, force=False):
582 pullop = pulloperation(repo, remote, heads, force)
615 pullop = pulloperation(repo, remote, heads, force)
583 if pullop.remote.local():
616 if pullop.remote.local():
584 missing = set(pullop.remote.requirements) - pullop.repo.supported
617 missing = set(pullop.remote.requirements) - pullop.repo.supported
585 if missing:
618 if missing:
586 msg = _("required features are not"
619 msg = _("required features are not"
587 " supported in the destination:"
620 " supported in the destination:"
588 " %s") % (', '.join(sorted(missing)))
621 " %s") % (', '.join(sorted(missing)))
589 raise util.Abort(msg)
622 raise util.Abort(msg)
590
623
591 lock = pullop.repo.lock()
624 lock = pullop.repo.lock()
592 try:
625 try:
593 _pulldiscovery(pullop)
626 _pulldiscovery(pullop)
594 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
627 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
595 and pullop.remote.capable('bundle2-exp')):
628 and pullop.remote.capable('bundle2-exp')):
596 _pullbundle2(pullop)
629 _pullbundle2(pullop)
597 if 'changegroup' in pullop.todosteps:
630 if 'changegroup' in pullop.todosteps:
598 _pullchangeset(pullop)
631 _pullchangeset(pullop)
599 if 'phases' in pullop.todosteps:
632 if 'phases' in pullop.todosteps:
600 _pullphase(pullop)
633 _pullphase(pullop)
601 if 'obsmarkers' in pullop.todosteps:
634 if 'obsmarkers' in pullop.todosteps:
602 _pullobsolete(pullop)
635 _pullobsolete(pullop)
603 pullop.closetransaction()
636 pullop.closetransaction()
604 finally:
637 finally:
605 pullop.releasetransaction()
638 pullop.releasetransaction()
606 lock.release()
639 lock.release()
607
640
608 return pullop.cgresult
641 return pullop.cgresult
609
642
610 def _pulldiscovery(pullop):
643 def _pulldiscovery(pullop):
611 """discovery phase for the pull
644 """discovery phase for the pull
612
645
613 Current handle changeset discovery only, will change handle all discovery
646 Current handle changeset discovery only, will change handle all discovery
614 at some point."""
647 at some point."""
615 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
648 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
616 pullop.remote,
649 pullop.remote,
617 heads=pullop.heads,
650 heads=pullop.heads,
618 force=pullop.force)
651 force=pullop.force)
619 pullop.common, pullop.fetch, pullop.rheads = tmp
652 pullop.common, pullop.fetch, pullop.rheads = tmp
620
653
621 def _pullbundle2(pullop):
654 def _pullbundle2(pullop):
622 """pull data using bundle2
655 """pull data using bundle2
623
656
624 For now, the only supported data are changegroup."""
657 For now, the only supported data are changegroup."""
625 remotecaps = bundle2.bundle2caps(pullop.remote)
658 remotecaps = bundle2.bundle2caps(pullop.remote)
626 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
659 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
627 # pulling changegroup
660 # pulling changegroup
628 pullop.todosteps.remove('changegroup')
661 pullop.todosteps.remove('changegroup')
629
662
630 kwargs['common'] = pullop.common
663 kwargs['common'] = pullop.common
631 kwargs['heads'] = pullop.heads or pullop.rheads
664 kwargs['heads'] = pullop.heads or pullop.rheads
632 if 'b2x:listkeys' in remotecaps:
665 if 'b2x:listkeys' in remotecaps:
633 kwargs['listkeys'] = ['phase']
666 kwargs['listkeys'] = ['phase']
634 if not pullop.fetch:
667 if not pullop.fetch:
635 pullop.repo.ui.status(_("no changes found\n"))
668 pullop.repo.ui.status(_("no changes found\n"))
636 pullop.cgresult = 0
669 pullop.cgresult = 0
637 else:
670 else:
638 if pullop.heads is None and list(pullop.common) == [nullid]:
671 if pullop.heads is None and list(pullop.common) == [nullid]:
639 pullop.repo.ui.status(_("requesting all changes\n"))
672 pullop.repo.ui.status(_("requesting all changes\n"))
640 _pullbundle2extraprepare(pullop, kwargs)
673 _pullbundle2extraprepare(pullop, kwargs)
641 if kwargs.keys() == ['format']:
674 if kwargs.keys() == ['format']:
642 return # nothing to pull
675 return # nothing to pull
643 bundle = pullop.remote.getbundle('pull', **kwargs)
676 bundle = pullop.remote.getbundle('pull', **kwargs)
644 try:
677 try:
645 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
678 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
646 except error.BundleValueError, exc:
679 except error.BundleValueError, exc:
647 raise util.Abort('missing support for %s' % exc)
680 raise util.Abort('missing support for %s' % exc)
648
681
649 if pullop.fetch:
682 if pullop.fetch:
650 assert len(op.records['changegroup']) == 1
683 assert len(op.records['changegroup']) == 1
651 pullop.cgresult = op.records['changegroup'][0]['return']
684 pullop.cgresult = op.records['changegroup'][0]['return']
652
685
653 # processing phases change
686 # processing phases change
654 for namespace, value in op.records['listkeys']:
687 for namespace, value in op.records['listkeys']:
655 if namespace == 'phases':
688 if namespace == 'phases':
656 _pullapplyphases(pullop, value)
689 _pullapplyphases(pullop, value)
657
690
658 def _pullbundle2extraprepare(pullop, kwargs):
691 def _pullbundle2extraprepare(pullop, kwargs):
659 """hook function so that extensions can extend the getbundle call"""
692 """hook function so that extensions can extend the getbundle call"""
660 pass
693 pass
661
694
662 def _pullchangeset(pullop):
695 def _pullchangeset(pullop):
663 """pull changeset from unbundle into the local repo"""
696 """pull changeset from unbundle into the local repo"""
664 # We delay the open of the transaction as late as possible so we
697 # We delay the open of the transaction as late as possible so we
665 # don't open transaction for nothing or you break future useful
698 # don't open transaction for nothing or you break future useful
666 # rollback call
699 # rollback call
667 pullop.todosteps.remove('changegroup')
700 pullop.todosteps.remove('changegroup')
668 if not pullop.fetch:
701 if not pullop.fetch:
669 pullop.repo.ui.status(_("no changes found\n"))
702 pullop.repo.ui.status(_("no changes found\n"))
670 pullop.cgresult = 0
703 pullop.cgresult = 0
671 return
704 return
672 pullop.gettransaction()
705 pullop.gettransaction()
673 if pullop.heads is None and list(pullop.common) == [nullid]:
706 if pullop.heads is None and list(pullop.common) == [nullid]:
674 pullop.repo.ui.status(_("requesting all changes\n"))
707 pullop.repo.ui.status(_("requesting all changes\n"))
675 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
708 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
676 # issue1320, avoid a race if remote changed after discovery
709 # issue1320, avoid a race if remote changed after discovery
677 pullop.heads = pullop.rheads
710 pullop.heads = pullop.rheads
678
711
679 if pullop.remote.capable('getbundle'):
712 if pullop.remote.capable('getbundle'):
680 # TODO: get bundlecaps from remote
713 # TODO: get bundlecaps from remote
681 cg = pullop.remote.getbundle('pull', common=pullop.common,
714 cg = pullop.remote.getbundle('pull', common=pullop.common,
682 heads=pullop.heads or pullop.rheads)
715 heads=pullop.heads or pullop.rheads)
683 elif pullop.heads is None:
716 elif pullop.heads is None:
684 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
717 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
685 elif not pullop.remote.capable('changegroupsubset'):
718 elif not pullop.remote.capable('changegroupsubset'):
686 raise util.Abort(_("partial pull cannot be done because "
719 raise util.Abort(_("partial pull cannot be done because "
687 "other repository doesn't support "
720 "other repository doesn't support "
688 "changegroupsubset."))
721 "changegroupsubset."))
689 else:
722 else:
690 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
723 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
691 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
724 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
692 pullop.remote.url())
725 pullop.remote.url())
693
726
694 def _pullphase(pullop):
727 def _pullphase(pullop):
695 # Get remote phases data from remote
728 # Get remote phases data from remote
696 remotephases = pullop.remote.listkeys('phases')
729 remotephases = pullop.remote.listkeys('phases')
697 _pullapplyphases(pullop, remotephases)
730 _pullapplyphases(pullop, remotephases)
698
731
699 def _pullapplyphases(pullop, remotephases):
732 def _pullapplyphases(pullop, remotephases):
700 """apply phase movement from observed remote state"""
733 """apply phase movement from observed remote state"""
701 pullop.todosteps.remove('phases')
734 pullop.todosteps.remove('phases')
702 publishing = bool(remotephases.get('publishing', False))
735 publishing = bool(remotephases.get('publishing', False))
703 if remotephases and not publishing:
736 if remotephases and not publishing:
704 # remote is new and unpublishing
737 # remote is new and unpublishing
705 pheads, _dr = phases.analyzeremotephases(pullop.repo,
738 pheads, _dr = phases.analyzeremotephases(pullop.repo,
706 pullop.pulledsubset,
739 pullop.pulledsubset,
707 remotephases)
740 remotephases)
708 phases.advanceboundary(pullop.repo, phases.public, pheads)
741 phases.advanceboundary(pullop.repo, phases.public, pheads)
709 phases.advanceboundary(pullop.repo, phases.draft,
742 phases.advanceboundary(pullop.repo, phases.draft,
710 pullop.pulledsubset)
743 pullop.pulledsubset)
711 else:
744 else:
712 # Remote is old or publishing all common changesets
745 # Remote is old or publishing all common changesets
713 # should be seen as public
746 # should be seen as public
714 phases.advanceboundary(pullop.repo, phases.public,
747 phases.advanceboundary(pullop.repo, phases.public,
715 pullop.pulledsubset)
748 pullop.pulledsubset)
716
749
717 def _pullobsolete(pullop):
750 def _pullobsolete(pullop):
718 """utility function to pull obsolete markers from a remote
751 """utility function to pull obsolete markers from a remote
719
752
720 The `gettransaction` is function that return the pull transaction, creating
753 The `gettransaction` is function that return the pull transaction, creating
721 one if necessary. We return the transaction to inform the calling code that
754 one if necessary. We return the transaction to inform the calling code that
722 a new transaction have been created (when applicable).
755 a new transaction have been created (when applicable).
723
756
724 Exists mostly to allow overriding for experimentation purpose"""
757 Exists mostly to allow overriding for experimentation purpose"""
725 pullop.todosteps.remove('obsmarkers')
758 pullop.todosteps.remove('obsmarkers')
726 tr = None
759 tr = None
727 if obsolete._enabled:
760 if obsolete._enabled:
728 pullop.repo.ui.debug('fetching remote obsolete markers\n')
761 pullop.repo.ui.debug('fetching remote obsolete markers\n')
729 remoteobs = pullop.remote.listkeys('obsolete')
762 remoteobs = pullop.remote.listkeys('obsolete')
730 if 'dump0' in remoteobs:
763 if 'dump0' in remoteobs:
731 tr = pullop.gettransaction()
764 tr = pullop.gettransaction()
732 for key in sorted(remoteobs, reverse=True):
765 for key in sorted(remoteobs, reverse=True):
733 if key.startswith('dump'):
766 if key.startswith('dump'):
734 data = base85.b85decode(remoteobs[key])
767 data = base85.b85decode(remoteobs[key])
735 pullop.repo.obsstore.mergemarkers(tr, data)
768 pullop.repo.obsstore.mergemarkers(tr, data)
736 pullop.repo.invalidatevolatilesets()
769 pullop.repo.invalidatevolatilesets()
737 return tr
770 return tr
738
771
739 def caps20to10(repo):
772 def caps20to10(repo):
740 """return a set with appropriate options to use bundle20 during getbundle"""
773 """return a set with appropriate options to use bundle20 during getbundle"""
741 caps = set(['HG2X'])
774 caps = set(['HG2X'])
742 capsblob = bundle2.encodecaps(repo.bundle2caps)
775 capsblob = bundle2.encodecaps(repo.bundle2caps)
743 caps.add('bundle2=' + urllib.quote(capsblob))
776 caps.add('bundle2=' + urllib.quote(capsblob))
744 return caps
777 return caps
745
778
746 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
779 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
747 **kwargs):
780 **kwargs):
748 """return a full bundle (with potentially multiple kind of parts)
781 """return a full bundle (with potentially multiple kind of parts)
749
782
750 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
783 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
751 passed. For now, the bundle can contain only changegroup, but this will
784 passed. For now, the bundle can contain only changegroup, but this will
752 changes when more part type will be available for bundle2.
785 changes when more part type will be available for bundle2.
753
786
754 This is different from changegroup.getbundle that only returns an HG10
787 This is different from changegroup.getbundle that only returns an HG10
755 changegroup bundle. They may eventually get reunited in the future when we
788 changegroup bundle. They may eventually get reunited in the future when we
756 have a clearer idea of the API we what to query different data.
789 have a clearer idea of the API we what to query different data.
757
790
758 The implementation is at a very early stage and will get massive rework
791 The implementation is at a very early stage and will get massive rework
759 when the API of bundle is refined.
792 when the API of bundle is refined.
760 """
793 """
761 cg = None
794 cg = None
762 if kwargs.get('cg', True):
795 if kwargs.get('cg', True):
763 # build changegroup bundle here.
796 # build changegroup bundle here.
764 cg = changegroup.getbundle(repo, source, heads=heads,
797 cg = changegroup.getbundle(repo, source, heads=heads,
765 common=common, bundlecaps=bundlecaps)
798 common=common, bundlecaps=bundlecaps)
766 elif 'HG2X' not in bundlecaps:
799 elif 'HG2X' not in bundlecaps:
767 raise ValueError(_('request for bundle10 must include changegroup'))
800 raise ValueError(_('request for bundle10 must include changegroup'))
768 if bundlecaps is None or 'HG2X' not in bundlecaps:
801 if bundlecaps is None or 'HG2X' not in bundlecaps:
769 if kwargs:
802 if kwargs:
770 raise ValueError(_('unsupported getbundle arguments: %s')
803 raise ValueError(_('unsupported getbundle arguments: %s')
771 % ', '.join(sorted(kwargs.keys())))
804 % ', '.join(sorted(kwargs.keys())))
772 return cg
805 return cg
773 # very crude first implementation,
806 # very crude first implementation,
774 # the bundle API will change and the generation will be done lazily.
807 # the bundle API will change and the generation will be done lazily.
775 b2caps = {}
808 b2caps = {}
776 for bcaps in bundlecaps:
809 for bcaps in bundlecaps:
777 if bcaps.startswith('bundle2='):
810 if bcaps.startswith('bundle2='):
778 blob = urllib.unquote(bcaps[len('bundle2='):])
811 blob = urllib.unquote(bcaps[len('bundle2='):])
779 b2caps.update(bundle2.decodecaps(blob))
812 b2caps.update(bundle2.decodecaps(blob))
780 bundler = bundle2.bundle20(repo.ui, b2caps)
813 bundler = bundle2.bundle20(repo.ui, b2caps)
781 if cg:
814 if cg:
782 bundler.newpart('b2x:changegroup', data=cg.getchunks())
815 bundler.newpart('b2x:changegroup', data=cg.getchunks())
783 listkeys = kwargs.get('listkeys', ())
816 listkeys = kwargs.get('listkeys', ())
784 for namespace in listkeys:
817 for namespace in listkeys:
785 part = bundler.newpart('b2x:listkeys')
818 part = bundler.newpart('b2x:listkeys')
786 part.addparam('namespace', namespace)
819 part.addparam('namespace', namespace)
787 keys = repo.listkeys(namespace).items()
820 keys = repo.listkeys(namespace).items()
788 part.data = pushkey.encodekeys(keys)
821 part.data = pushkey.encodekeys(keys)
789 _getbundleextrapart(bundler, repo, source, heads=heads, common=common,
822 _getbundleextrapart(bundler, repo, source, heads=heads, common=common,
790 bundlecaps=bundlecaps, **kwargs)
823 bundlecaps=bundlecaps, **kwargs)
791 return util.chunkbuffer(bundler.getchunks())
824 return util.chunkbuffer(bundler.getchunks())
792
825
793 def _getbundleextrapart(bundler, repo, source, heads=None, common=None,
826 def _getbundleextrapart(bundler, repo, source, heads=None, common=None,
794 bundlecaps=None, **kwargs):
827 bundlecaps=None, **kwargs):
795 """hook function to let extensions add parts to the requested bundle"""
828 """hook function to let extensions add parts to the requested bundle"""
796 pass
829 pass
797
830
798 def check_heads(repo, their_heads, context):
831 def check_heads(repo, their_heads, context):
799 """check if the heads of a repo have been modified
832 """check if the heads of a repo have been modified
800
833
801 Used by peer for unbundling.
834 Used by peer for unbundling.
802 """
835 """
803 heads = repo.heads()
836 heads = repo.heads()
804 heads_hash = util.sha1(''.join(sorted(heads))).digest()
837 heads_hash = util.sha1(''.join(sorted(heads))).digest()
805 if not (their_heads == ['force'] or their_heads == heads or
838 if not (their_heads == ['force'] or their_heads == heads or
806 their_heads == ['hashed', heads_hash]):
839 their_heads == ['hashed', heads_hash]):
807 # someone else committed/pushed/unbundled while we
840 # someone else committed/pushed/unbundled while we
808 # were transferring data
841 # were transferring data
809 raise error.PushRaced('repository changed while %s - '
842 raise error.PushRaced('repository changed while %s - '
810 'please try again' % context)
843 'please try again' % context)
811
844
812 def unbundle(repo, cg, heads, source, url):
845 def unbundle(repo, cg, heads, source, url):
813 """Apply a bundle to a repo.
846 """Apply a bundle to a repo.
814
847
815 this function makes sure the repo is locked during the application and have
848 this function makes sure the repo is locked during the application and have
816 mechanism to check that no push race occurred between the creation of the
849 mechanism to check that no push race occurred between the creation of the
817 bundle and its application.
850 bundle and its application.
818
851
819 If the push was raced as PushRaced exception is raised."""
852 If the push was raced as PushRaced exception is raised."""
820 r = 0
853 r = 0
821 # need a transaction when processing a bundle2 stream
854 # need a transaction when processing a bundle2 stream
822 tr = None
855 tr = None
823 lock = repo.lock()
856 lock = repo.lock()
824 try:
857 try:
825 check_heads(repo, heads, 'uploading changes')
858 check_heads(repo, heads, 'uploading changes')
826 # push can proceed
859 # push can proceed
827 if util.safehasattr(cg, 'params'):
860 if util.safehasattr(cg, 'params'):
828 try:
861 try:
829 tr = repo.transaction('unbundle')
862 tr = repo.transaction('unbundle')
830 tr.hookargs['bundle2-exp'] = '1'
863 tr.hookargs['bundle2-exp'] = '1'
831 r = bundle2.processbundle(repo, cg, lambda: tr).reply
864 r = bundle2.processbundle(repo, cg, lambda: tr).reply
832 cl = repo.unfiltered().changelog
865 cl = repo.unfiltered().changelog
833 p = cl.writepending() and repo.root or ""
866 p = cl.writepending() and repo.root or ""
834 repo.hook('b2x-pretransactionclose', throw=True, source=source,
867 repo.hook('b2x-pretransactionclose', throw=True, source=source,
835 url=url, pending=p, **tr.hookargs)
868 url=url, pending=p, **tr.hookargs)
836 tr.close()
869 tr.close()
837 repo.hook('b2x-transactionclose', source=source, url=url,
870 repo.hook('b2x-transactionclose', source=source, url=url,
838 **tr.hookargs)
871 **tr.hookargs)
839 except Exception, exc:
872 except Exception, exc:
840 exc.duringunbundle2 = True
873 exc.duringunbundle2 = True
841 raise
874 raise
842 else:
875 else:
843 r = changegroup.addchangegroup(repo, cg, source, url)
876 r = changegroup.addchangegroup(repo, cg, source, url)
844 finally:
877 finally:
845 if tr is not None:
878 if tr is not None:
846 tr.release()
879 tr.release()
847 lock.release()
880 lock.release()
848 return r
881 return r
General Comments 0
You need to be logged in to leave comments. Login now