##// END OF EJS Templates
push: extract fallback heads computation into pushop...
Pierre-Yves David -
r22015:c478031d default
parent child Browse files
Show More
@@ -1,820 +1,827 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno, urllib
10 import errno, urllib
11 import util, scmutil, changegroup, base85, error
11 import util, scmutil, changegroup, base85, error
12 import discovery, phases, obsolete, bookmarks, bundle2, pushkey
12 import discovery, phases, obsolete, bookmarks, bundle2, pushkey
13
13
14 def readbundle(ui, fh, fname, vfs=None):
14 def readbundle(ui, fh, fname, vfs=None):
15 header = changegroup.readexactly(fh, 4)
15 header = changegroup.readexactly(fh, 4)
16
16
17 alg = None
17 alg = None
18 if not fname:
18 if not fname:
19 fname = "stream"
19 fname = "stream"
20 if not header.startswith('HG') and header.startswith('\0'):
20 if not header.startswith('HG') and header.startswith('\0'):
21 fh = changegroup.headerlessfixup(fh, header)
21 fh = changegroup.headerlessfixup(fh, header)
22 header = "HG10"
22 header = "HG10"
23 alg = 'UN'
23 alg = 'UN'
24 elif vfs:
24 elif vfs:
25 fname = vfs.join(fname)
25 fname = vfs.join(fname)
26
26
27 magic, version = header[0:2], header[2:4]
27 magic, version = header[0:2], header[2:4]
28
28
29 if magic != 'HG':
29 if magic != 'HG':
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 if version == '10':
31 if version == '10':
32 if alg is None:
32 if alg is None:
33 alg = changegroup.readexactly(fh, 2)
33 alg = changegroup.readexactly(fh, 2)
34 return changegroup.unbundle10(fh, alg)
34 return changegroup.unbundle10(fh, alg)
35 elif version == '2X':
35 elif version == '2X':
36 return bundle2.unbundle20(ui, fh, header=magic + version)
36 return bundle2.unbundle20(ui, fh, header=magic + version)
37 else:
37 else:
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39
39
40
40
41 class pushoperation(object):
41 class pushoperation(object):
42 """A object that represent a single push operation
42 """A object that represent a single push operation
43
43
44 It purpose is to carry push related state and very common operation.
44 It purpose is to carry push related state and very common operation.
45
45
46 A new should be created at the beginning of each push and discarded
46 A new should be created at the beginning of each push and discarded
47 afterward.
47 afterward.
48 """
48 """
49
49
50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
51 # repo we push from
51 # repo we push from
52 self.repo = repo
52 self.repo = repo
53 self.ui = repo.ui
53 self.ui = repo.ui
54 # repo we push to
54 # repo we push to
55 self.remote = remote
55 self.remote = remote
56 # force option provided
56 # force option provided
57 self.force = force
57 self.force = force
58 # revs to be pushed (None is "all")
58 # revs to be pushed (None is "all")
59 self.revs = revs
59 self.revs = revs
60 # allow push of new branch
60 # allow push of new branch
61 self.newbranch = newbranch
61 self.newbranch = newbranch
62 # did a local lock get acquired?
62 # did a local lock get acquired?
63 self.locallocked = None
63 self.locallocked = None
64 # step already performed
64 # step already performed
65 # (used to check what steps have been already performed through bundle2)
65 # (used to check what steps have been already performed through bundle2)
66 self.stepsdone = set()
66 self.stepsdone = set()
67 # Integer version of the push result
67 # Integer version of the push result
68 # - None means nothing to push
68 # - None means nothing to push
69 # - 0 means HTTP error
69 # - 0 means HTTP error
70 # - 1 means we pushed and remote head count is unchanged *or*
70 # - 1 means we pushed and remote head count is unchanged *or*
71 # we have outgoing changesets but refused to push
71 # we have outgoing changesets but refused to push
72 # - other values as described by addchangegroup()
72 # - other values as described by addchangegroup()
73 self.ret = None
73 self.ret = None
74 # discover.outgoing object (contains common and outgoing data)
74 # discover.outgoing object (contains common and outgoing data)
75 self.outgoing = None
75 self.outgoing = None
76 # all remote heads before the push
76 # all remote heads before the push
77 self.remoteheads = None
77 self.remoteheads = None
78 # testable as a boolean indicating if any nodes are missing locally.
78 # testable as a boolean indicating if any nodes are missing locally.
79 self.incoming = None
79 self.incoming = None
80 # set of all heads common after changeset bundle push
80 # set of all heads common after changeset bundle push
81 self.commonheads = None
81 self.commonheads = None
82
82
83 @util.propertycache
83 @util.propertycache
84 def futureheads(self):
84 def futureheads(self):
85 """future remote heads if the changeset push succeeds"""
85 """future remote heads if the changeset push succeeds"""
86 return self.outgoing.missingheads
86 return self.outgoing.missingheads
87
87
88 @util.propertycache
89 def fallbackheads(self):
90 """future remote heads if the changeset push fails"""
91 if self.revs is None:
92 # not target to push, all common are relevant
93 return self.outgoing.commonheads
94 unfi = self.repo.unfiltered()
95 # I want cheads = heads(::missingheads and ::commonheads)
96 # (missingheads is revs with secret changeset filtered out)
97 #
98 # This can be expressed as:
99 # cheads = ( (missingheads and ::commonheads)
100 # + (commonheads and ::missingheads))"
101 # )
102 #
103 # while trying to push we already computed the following:
104 # common = (::commonheads)
105 # missing = ((commonheads::missingheads) - commonheads)
106 #
107 # We can pick:
108 # * missingheads part of common (::commonheads)
109 common = set(self.outgoing.common)
110 nm = self.repo.changelog.nodemap
111 cheads = [node for node in self.revs if nm[node] in common]
112 # and
113 # * commonheads parents on missing
114 revset = unfi.set('%ln and parents(roots(%ln))',
115 self.outgoing.commonheads,
116 self.outgoing.missing)
117 cheads.extend(c.node() for c in revset)
118 return cheads
119
120
88 def push(repo, remote, force=False, revs=None, newbranch=False):
121 def push(repo, remote, force=False, revs=None, newbranch=False):
89 '''Push outgoing changesets (limited by revs) from a local
122 '''Push outgoing changesets (limited by revs) from a local
90 repository to remote. Return an integer:
123 repository to remote. Return an integer:
91 - None means nothing to push
124 - None means nothing to push
92 - 0 means HTTP error
125 - 0 means HTTP error
93 - 1 means we pushed and remote head count is unchanged *or*
126 - 1 means we pushed and remote head count is unchanged *or*
94 we have outgoing changesets but refused to push
127 we have outgoing changesets but refused to push
95 - other values as described by addchangegroup()
128 - other values as described by addchangegroup()
96 '''
129 '''
97 pushop = pushoperation(repo, remote, force, revs, newbranch)
130 pushop = pushoperation(repo, remote, force, revs, newbranch)
98 if pushop.remote.local():
131 if pushop.remote.local():
99 missing = (set(pushop.repo.requirements)
132 missing = (set(pushop.repo.requirements)
100 - pushop.remote.local().supported)
133 - pushop.remote.local().supported)
101 if missing:
134 if missing:
102 msg = _("required features are not"
135 msg = _("required features are not"
103 " supported in the destination:"
136 " supported in the destination:"
104 " %s") % (', '.join(sorted(missing)))
137 " %s") % (', '.join(sorted(missing)))
105 raise util.Abort(msg)
138 raise util.Abort(msg)
106
139
107 # there are two ways to push to remote repo:
140 # there are two ways to push to remote repo:
108 #
141 #
109 # addchangegroup assumes local user can lock remote
142 # addchangegroup assumes local user can lock remote
110 # repo (local filesystem, old ssh servers).
143 # repo (local filesystem, old ssh servers).
111 #
144 #
112 # unbundle assumes local user cannot lock remote repo (new ssh
145 # unbundle assumes local user cannot lock remote repo (new ssh
113 # servers, http servers).
146 # servers, http servers).
114
147
115 if not pushop.remote.canpush():
148 if not pushop.remote.canpush():
116 raise util.Abort(_("destination does not support push"))
149 raise util.Abort(_("destination does not support push"))
117 # get local lock as we might write phase data
150 # get local lock as we might write phase data
118 locallock = None
151 locallock = None
119 try:
152 try:
120 locallock = pushop.repo.lock()
153 locallock = pushop.repo.lock()
121 pushop.locallocked = True
154 pushop.locallocked = True
122 except IOError, err:
155 except IOError, err:
123 pushop.locallocked = False
156 pushop.locallocked = False
124 if err.errno != errno.EACCES:
157 if err.errno != errno.EACCES:
125 raise
158 raise
126 # source repo cannot be locked.
159 # source repo cannot be locked.
127 # We do not abort the push, but just disable the local phase
160 # We do not abort the push, but just disable the local phase
128 # synchronisation.
161 # synchronisation.
129 msg = 'cannot lock source repository: %s\n' % err
162 msg = 'cannot lock source repository: %s\n' % err
130 pushop.ui.debug(msg)
163 pushop.ui.debug(msg)
131 try:
164 try:
132 pushop.repo.checkpush(pushop)
165 pushop.repo.checkpush(pushop)
133 lock = None
166 lock = None
134 unbundle = pushop.remote.capable('unbundle')
167 unbundle = pushop.remote.capable('unbundle')
135 if not unbundle:
168 if not unbundle:
136 lock = pushop.remote.lock()
169 lock = pushop.remote.lock()
137 try:
170 try:
138 _pushdiscovery(pushop)
171 _pushdiscovery(pushop)
139 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
172 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
140 False)
173 False)
141 and pushop.remote.capable('bundle2-exp')):
174 and pushop.remote.capable('bundle2-exp')):
142 _pushbundle2(pushop)
175 _pushbundle2(pushop)
143 _pushchangeset(pushop)
176 _pushchangeset(pushop)
144 _pushcomputecommonheads(pushop)
177 _pushcomputecommonheads(pushop)
145 _pushsyncphase(pushop)
178 _pushsyncphase(pushop)
146 _pushobsolete(pushop)
179 _pushobsolete(pushop)
147 finally:
180 finally:
148 if lock is not None:
181 if lock is not None:
149 lock.release()
182 lock.release()
150 finally:
183 finally:
151 if locallock is not None:
184 if locallock is not None:
152 locallock.release()
185 locallock.release()
153
186
154 _pushbookmark(pushop)
187 _pushbookmark(pushop)
155 return pushop.ret
188 return pushop.ret
156
189
157 def _pushdiscovery(pushop):
190 def _pushdiscovery(pushop):
158 # discovery
191 # discovery
159 unfi = pushop.repo.unfiltered()
192 unfi = pushop.repo.unfiltered()
160 fci = discovery.findcommonincoming
193 fci = discovery.findcommonincoming
161 commoninc = fci(unfi, pushop.remote, force=pushop.force)
194 commoninc = fci(unfi, pushop.remote, force=pushop.force)
162 common, inc, remoteheads = commoninc
195 common, inc, remoteheads = commoninc
163 fco = discovery.findcommonoutgoing
196 fco = discovery.findcommonoutgoing
164 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
197 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
165 commoninc=commoninc, force=pushop.force)
198 commoninc=commoninc, force=pushop.force)
166 pushop.outgoing = outgoing
199 pushop.outgoing = outgoing
167 pushop.remoteheads = remoteheads
200 pushop.remoteheads = remoteheads
168 pushop.incoming = inc
201 pushop.incoming = inc
169
202
170 def _pushcheckoutgoing(pushop):
203 def _pushcheckoutgoing(pushop):
171 outgoing = pushop.outgoing
204 outgoing = pushop.outgoing
172 unfi = pushop.repo.unfiltered()
205 unfi = pushop.repo.unfiltered()
173 if not outgoing.missing:
206 if not outgoing.missing:
174 # nothing to push
207 # nothing to push
175 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
208 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
176 return False
209 return False
177 # something to push
210 # something to push
178 if not pushop.force:
211 if not pushop.force:
179 # if repo.obsstore == False --> no obsolete
212 # if repo.obsstore == False --> no obsolete
180 # then, save the iteration
213 # then, save the iteration
181 if unfi.obsstore:
214 if unfi.obsstore:
182 # this message are here for 80 char limit reason
215 # this message are here for 80 char limit reason
183 mso = _("push includes obsolete changeset: %s!")
216 mso = _("push includes obsolete changeset: %s!")
184 mst = "push includes %s changeset: %s!"
217 mst = "push includes %s changeset: %s!"
185 # plain versions for i18n tool to detect them
218 # plain versions for i18n tool to detect them
186 _("push includes unstable changeset: %s!")
219 _("push includes unstable changeset: %s!")
187 _("push includes bumped changeset: %s!")
220 _("push includes bumped changeset: %s!")
188 _("push includes divergent changeset: %s!")
221 _("push includes divergent changeset: %s!")
189 # If we are to push if there is at least one
222 # If we are to push if there is at least one
190 # obsolete or unstable changeset in missing, at
223 # obsolete or unstable changeset in missing, at
191 # least one of the missinghead will be obsolete or
224 # least one of the missinghead will be obsolete or
192 # unstable. So checking heads only is ok
225 # unstable. So checking heads only is ok
193 for node in outgoing.missingheads:
226 for node in outgoing.missingheads:
194 ctx = unfi[node]
227 ctx = unfi[node]
195 if ctx.obsolete():
228 if ctx.obsolete():
196 raise util.Abort(mso % ctx)
229 raise util.Abort(mso % ctx)
197 elif ctx.troubled():
230 elif ctx.troubled():
198 raise util.Abort(_(mst)
231 raise util.Abort(_(mst)
199 % (ctx.troubles()[0],
232 % (ctx.troubles()[0],
200 ctx))
233 ctx))
201 newbm = pushop.ui.configlist('bookmarks', 'pushing')
234 newbm = pushop.ui.configlist('bookmarks', 'pushing')
202 discovery.checkheads(unfi, pushop.remote, outgoing,
235 discovery.checkheads(unfi, pushop.remote, outgoing,
203 pushop.remoteheads,
236 pushop.remoteheads,
204 pushop.newbranch,
237 pushop.newbranch,
205 bool(pushop.incoming),
238 bool(pushop.incoming),
206 newbm)
239 newbm)
207 return True
240 return True
208
241
209 def _pushb2ctx(pushop, bundler):
242 def _pushb2ctx(pushop, bundler):
210 """handle changegroup push through bundle2
243 """handle changegroup push through bundle2
211
244
212 addchangegroup result is stored in the ``pushop.ret`` attribute.
245 addchangegroup result is stored in the ``pushop.ret`` attribute.
213 """
246 """
214 if 'changesets' in pushop.stepsdone:
247 if 'changesets' in pushop.stepsdone:
215 return
248 return
216 pushop.stepsdone.add('changesets')
249 pushop.stepsdone.add('changesets')
217 # Send known heads to the server for race detection.
250 # Send known heads to the server for race detection.
218 pushop.stepsdone.add('changesets')
251 pushop.stepsdone.add('changesets')
219 if not _pushcheckoutgoing(pushop):
252 if not _pushcheckoutgoing(pushop):
220 return
253 return
221 pushop.repo.prepushoutgoinghooks(pushop.repo,
254 pushop.repo.prepushoutgoinghooks(pushop.repo,
222 pushop.remote,
255 pushop.remote,
223 pushop.outgoing)
256 pushop.outgoing)
224 if not pushop.force:
257 if not pushop.force:
225 bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads))
258 bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads))
226 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
259 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
227 cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks())
260 cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks())
228 def handlereply(op):
261 def handlereply(op):
229 """extract addchangroup returns from server reply"""
262 """extract addchangroup returns from server reply"""
230 cgreplies = op.records.getreplies(cgpart.id)
263 cgreplies = op.records.getreplies(cgpart.id)
231 assert len(cgreplies['changegroup']) == 1
264 assert len(cgreplies['changegroup']) == 1
232 pushop.ret = cgreplies['changegroup'][0]['return']
265 pushop.ret = cgreplies['changegroup'][0]['return']
233 return handlereply
266 return handlereply
234
267
235 # list of function that may decide to add parts to an outgoing bundle2
268 # list of function that may decide to add parts to an outgoing bundle2
236 bundle2partsgenerators = [_pushb2ctx]
269 bundle2partsgenerators = [_pushb2ctx]
237
270
238 def _pushbundle2(pushop):
271 def _pushbundle2(pushop):
239 """push data to the remote using bundle2
272 """push data to the remote using bundle2
240
273
241 The only currently supported type of data is changegroup but this will
274 The only currently supported type of data is changegroup but this will
242 evolve in the future."""
275 evolve in the future."""
243 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
276 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
244 # create reply capability
277 # create reply capability
245 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
278 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
246 bundler.newpart('b2x:replycaps', data=capsblob)
279 bundler.newpart('b2x:replycaps', data=capsblob)
247 replyhandlers = []
280 replyhandlers = []
248 for partgen in bundle2partsgenerators:
281 for partgen in bundle2partsgenerators:
249 ret = partgen(pushop, bundler)
282 ret = partgen(pushop, bundler)
250 if callable(ret):
283 if callable(ret):
251 replyhandlers.append(ret)
284 replyhandlers.append(ret)
252 # do not push if nothing to push
285 # do not push if nothing to push
253 if bundler.nbparts <= 1:
286 if bundler.nbparts <= 1:
254 return
287 return
255 stream = util.chunkbuffer(bundler.getchunks())
288 stream = util.chunkbuffer(bundler.getchunks())
256 try:
289 try:
257 reply = pushop.remote.unbundle(stream, ['force'], 'push')
290 reply = pushop.remote.unbundle(stream, ['force'], 'push')
258 except error.BundleValueError, exc:
291 except error.BundleValueError, exc:
259 raise util.Abort('missing support for %s' % exc)
292 raise util.Abort('missing support for %s' % exc)
260 try:
293 try:
261 op = bundle2.processbundle(pushop.repo, reply)
294 op = bundle2.processbundle(pushop.repo, reply)
262 except error.BundleValueError, exc:
295 except error.BundleValueError, exc:
263 raise util.Abort('missing support for %s' % exc)
296 raise util.Abort('missing support for %s' % exc)
264 for rephand in replyhandlers:
297 for rephand in replyhandlers:
265 rephand(op)
298 rephand(op)
266
299
267 def _pushchangeset(pushop):
300 def _pushchangeset(pushop):
268 """Make the actual push of changeset bundle to remote repo"""
301 """Make the actual push of changeset bundle to remote repo"""
269 if 'changesets' in pushop.stepsdone:
302 if 'changesets' in pushop.stepsdone:
270 return
303 return
271 pushop.stepsdone.add('changesets')
304 pushop.stepsdone.add('changesets')
272 if not _pushcheckoutgoing(pushop):
305 if not _pushcheckoutgoing(pushop):
273 return
306 return
274 pushop.repo.prepushoutgoinghooks(pushop.repo,
307 pushop.repo.prepushoutgoinghooks(pushop.repo,
275 pushop.remote,
308 pushop.remote,
276 pushop.outgoing)
309 pushop.outgoing)
277 outgoing = pushop.outgoing
310 outgoing = pushop.outgoing
278 unbundle = pushop.remote.capable('unbundle')
311 unbundle = pushop.remote.capable('unbundle')
279 # TODO: get bundlecaps from remote
312 # TODO: get bundlecaps from remote
280 bundlecaps = None
313 bundlecaps = None
281 # create a changegroup from local
314 # create a changegroup from local
282 if pushop.revs is None and not (outgoing.excluded
315 if pushop.revs is None and not (outgoing.excluded
283 or pushop.repo.changelog.filteredrevs):
316 or pushop.repo.changelog.filteredrevs):
284 # push everything,
317 # push everything,
285 # use the fast path, no race possible on push
318 # use the fast path, no race possible on push
286 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
319 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
287 cg = changegroup.getsubset(pushop.repo,
320 cg = changegroup.getsubset(pushop.repo,
288 outgoing,
321 outgoing,
289 bundler,
322 bundler,
290 'push',
323 'push',
291 fastpath=True)
324 fastpath=True)
292 else:
325 else:
293 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
326 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
294 bundlecaps)
327 bundlecaps)
295
328
296 # apply changegroup to remote
329 # apply changegroup to remote
297 if unbundle:
330 if unbundle:
298 # local repo finds heads on server, finds out what
331 # local repo finds heads on server, finds out what
299 # revs it must push. once revs transferred, if server
332 # revs it must push. once revs transferred, if server
300 # finds it has different heads (someone else won
333 # finds it has different heads (someone else won
301 # commit/push race), server aborts.
334 # commit/push race), server aborts.
302 if pushop.force:
335 if pushop.force:
303 remoteheads = ['force']
336 remoteheads = ['force']
304 else:
337 else:
305 remoteheads = pushop.remoteheads
338 remoteheads = pushop.remoteheads
306 # ssh: return remote's addchangegroup()
339 # ssh: return remote's addchangegroup()
307 # http: return remote's addchangegroup() or 0 for error
340 # http: return remote's addchangegroup() or 0 for error
308 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
341 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
309 pushop.repo.url())
342 pushop.repo.url())
310 else:
343 else:
311 # we return an integer indicating remote head count
344 # we return an integer indicating remote head count
312 # change
345 # change
313 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
346 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
314
347
315 def _pushcomputecommonheads(pushop):
348 def _pushcomputecommonheads(pushop):
316 unfi = pushop.repo.unfiltered()
317 if pushop.ret:
349 if pushop.ret:
318 cheads = pushop.futureheads
350 cheads = pushop.futureheads
319 elif pushop.revs is None:
320 # All out push fails. synchronize all common
321 cheads = pushop.outgoing.commonheads
322 else:
351 else:
323 # I want cheads = heads(::missingheads and ::commonheads)
352 cheads = pushop.fallbackheads
324 # (missingheads is revs with secret changeset filtered out)
325 #
326 # This can be expressed as:
327 # cheads = ( (missingheads and ::commonheads)
328 # + (commonheads and ::missingheads))"
329 # )
330 #
331 # while trying to push we already computed the following:
332 # common = (::commonheads)
333 # missing = ((commonheads::missingheads) - commonheads)
334 #
335 # We can pick:
336 # * missingheads part of common (::commonheads)
337 common = set(pushop.outgoing.common)
338 nm = pushop.repo.changelog.nodemap
339 cheads = [node for node in pushop.revs if nm[node] in common]
340 # and
341 # * commonheads parents on missing
342 revset = unfi.set('%ln and parents(roots(%ln))',
343 pushop.outgoing.commonheads,
344 pushop.outgoing.missing)
345 cheads.extend(c.node() for c in revset)
346 pushop.commonheads = cheads
353 pushop.commonheads = cheads
347
354
348 def _pushsyncphase(pushop):
355 def _pushsyncphase(pushop):
349 """synchronise phase information locally and remotely"""
356 """synchronise phase information locally and remotely"""
350 unfi = pushop.repo.unfiltered()
357 unfi = pushop.repo.unfiltered()
351 cheads = pushop.commonheads
358 cheads = pushop.commonheads
352 # even when we don't push, exchanging phase data is useful
359 # even when we don't push, exchanging phase data is useful
353 remotephases = pushop.remote.listkeys('phases')
360 remotephases = pushop.remote.listkeys('phases')
354 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
361 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
355 and remotephases # server supports phases
362 and remotephases # server supports phases
356 and pushop.ret is None # nothing was pushed
363 and pushop.ret is None # nothing was pushed
357 and remotephases.get('publishing', False)):
364 and remotephases.get('publishing', False)):
358 # When:
365 # When:
359 # - this is a subrepo push
366 # - this is a subrepo push
360 # - and remote support phase
367 # - and remote support phase
361 # - and no changeset was pushed
368 # - and no changeset was pushed
362 # - and remote is publishing
369 # - and remote is publishing
363 # We may be in issue 3871 case!
370 # We may be in issue 3871 case!
364 # We drop the possible phase synchronisation done by
371 # We drop the possible phase synchronisation done by
365 # courtesy to publish changesets possibly locally draft
372 # courtesy to publish changesets possibly locally draft
366 # on the remote.
373 # on the remote.
367 remotephases = {'publishing': 'True'}
374 remotephases = {'publishing': 'True'}
368 if not remotephases: # old server or public only reply from non-publishing
375 if not remotephases: # old server or public only reply from non-publishing
369 _localphasemove(pushop, cheads)
376 _localphasemove(pushop, cheads)
370 # don't push any phase data as there is nothing to push
377 # don't push any phase data as there is nothing to push
371 else:
378 else:
372 ana = phases.analyzeremotephases(pushop.repo, cheads,
379 ana = phases.analyzeremotephases(pushop.repo, cheads,
373 remotephases)
380 remotephases)
374 pheads, droots = ana
381 pheads, droots = ana
375 ### Apply remote phase on local
382 ### Apply remote phase on local
376 if remotephases.get('publishing', False):
383 if remotephases.get('publishing', False):
377 _localphasemove(pushop, cheads)
384 _localphasemove(pushop, cheads)
378 else: # publish = False
385 else: # publish = False
379 _localphasemove(pushop, pheads)
386 _localphasemove(pushop, pheads)
380 _localphasemove(pushop, cheads, phases.draft)
387 _localphasemove(pushop, cheads, phases.draft)
381 ### Apply local phase on remote
388 ### Apply local phase on remote
382
389
383 # Get the list of all revs draft on remote by public here.
390 # Get the list of all revs draft on remote by public here.
384 # XXX Beware that revset break if droots is not strictly
391 # XXX Beware that revset break if droots is not strictly
385 # XXX root we may want to ensure it is but it is costly
392 # XXX root we may want to ensure it is but it is costly
386 outdated = unfi.set('heads((%ln::%ln) and public())',
393 outdated = unfi.set('heads((%ln::%ln) and public())',
387 droots, cheads)
394 droots, cheads)
388
395
389 b2caps = bundle2.bundle2caps(pushop.remote)
396 b2caps = bundle2.bundle2caps(pushop.remote)
390 if 'b2x:pushkey' in b2caps:
397 if 'b2x:pushkey' in b2caps:
391 # server supports bundle2, let's do a batched push through it
398 # server supports bundle2, let's do a batched push through it
392 #
399 #
393 # This will eventually be unified with the changesets bundle2 push
400 # This will eventually be unified with the changesets bundle2 push
394 bundler = bundle2.bundle20(pushop.ui, b2caps)
401 bundler = bundle2.bundle20(pushop.ui, b2caps)
395 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
402 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
396 bundler.newpart('b2x:replycaps', data=capsblob)
403 bundler.newpart('b2x:replycaps', data=capsblob)
397 part2node = []
404 part2node = []
398 enc = pushkey.encode
405 enc = pushkey.encode
399 for newremotehead in outdated:
406 for newremotehead in outdated:
400 part = bundler.newpart('b2x:pushkey')
407 part = bundler.newpart('b2x:pushkey')
401 part.addparam('namespace', enc('phases'))
408 part.addparam('namespace', enc('phases'))
402 part.addparam('key', enc(newremotehead.hex()))
409 part.addparam('key', enc(newremotehead.hex()))
403 part.addparam('old', enc(str(phases.draft)))
410 part.addparam('old', enc(str(phases.draft)))
404 part.addparam('new', enc(str(phases.public)))
411 part.addparam('new', enc(str(phases.public)))
405 part2node.append((part.id, newremotehead))
412 part2node.append((part.id, newremotehead))
406 stream = util.chunkbuffer(bundler.getchunks())
413 stream = util.chunkbuffer(bundler.getchunks())
407 try:
414 try:
408 reply = pushop.remote.unbundle(stream, ['force'], 'push')
415 reply = pushop.remote.unbundle(stream, ['force'], 'push')
409 op = bundle2.processbundle(pushop.repo, reply)
416 op = bundle2.processbundle(pushop.repo, reply)
410 except error.BundleValueError, exc:
417 except error.BundleValueError, exc:
411 raise util.Abort('missing support for %s' % exc)
418 raise util.Abort('missing support for %s' % exc)
412 for partid, node in part2node:
419 for partid, node in part2node:
413 partrep = op.records.getreplies(partid)
420 partrep = op.records.getreplies(partid)
414 results = partrep['pushkey']
421 results = partrep['pushkey']
415 assert len(results) <= 1
422 assert len(results) <= 1
416 msg = None
423 msg = None
417 if not results:
424 if not results:
418 msg = _('server ignored update of %s to public!\n') % node
425 msg = _('server ignored update of %s to public!\n') % node
419 elif not int(results[0]['return']):
426 elif not int(results[0]['return']):
420 msg = _('updating %s to public failed!\n') % node
427 msg = _('updating %s to public failed!\n') % node
421 if msg is not None:
428 if msg is not None:
422 pushop.ui.warn(msg)
429 pushop.ui.warn(msg)
423
430
424 else:
431 else:
425 # fallback to independant pushkey command
432 # fallback to independant pushkey command
426 for newremotehead in outdated:
433 for newremotehead in outdated:
427 r = pushop.remote.pushkey('phases',
434 r = pushop.remote.pushkey('phases',
428 newremotehead.hex(),
435 newremotehead.hex(),
429 str(phases.draft),
436 str(phases.draft),
430 str(phases.public))
437 str(phases.public))
431 if not r:
438 if not r:
432 pushop.ui.warn(_('updating %s to public failed!\n')
439 pushop.ui.warn(_('updating %s to public failed!\n')
433 % newremotehead)
440 % newremotehead)
434
441
435 def _localphasemove(pushop, nodes, phase=phases.public):
442 def _localphasemove(pushop, nodes, phase=phases.public):
436 """move <nodes> to <phase> in the local source repo"""
443 """move <nodes> to <phase> in the local source repo"""
437 if pushop.locallocked:
444 if pushop.locallocked:
438 phases.advanceboundary(pushop.repo, phase, nodes)
445 phases.advanceboundary(pushop.repo, phase, nodes)
439 else:
446 else:
440 # repo is not locked, do not change any phases!
447 # repo is not locked, do not change any phases!
441 # Informs the user that phases should have been moved when
448 # Informs the user that phases should have been moved when
442 # applicable.
449 # applicable.
443 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
450 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
444 phasestr = phases.phasenames[phase]
451 phasestr = phases.phasenames[phase]
445 if actualmoves:
452 if actualmoves:
446 pushop.ui.status(_('cannot lock source repo, skipping '
453 pushop.ui.status(_('cannot lock source repo, skipping '
447 'local %s phase update\n') % phasestr)
454 'local %s phase update\n') % phasestr)
448
455
449 def _pushobsolete(pushop):
456 def _pushobsolete(pushop):
450 """utility function to push obsolete markers to a remote"""
457 """utility function to push obsolete markers to a remote"""
451 pushop.ui.debug('try to push obsolete markers to remote\n')
458 pushop.ui.debug('try to push obsolete markers to remote\n')
452 repo = pushop.repo
459 repo = pushop.repo
453 remote = pushop.remote
460 remote = pushop.remote
454 if (obsolete._enabled and repo.obsstore and
461 if (obsolete._enabled and repo.obsstore and
455 'obsolete' in remote.listkeys('namespaces')):
462 'obsolete' in remote.listkeys('namespaces')):
456 rslts = []
463 rslts = []
457 remotedata = repo.listkeys('obsolete')
464 remotedata = repo.listkeys('obsolete')
458 for key in sorted(remotedata, reverse=True):
465 for key in sorted(remotedata, reverse=True):
459 # reverse sort to ensure we end with dump0
466 # reverse sort to ensure we end with dump0
460 data = remotedata[key]
467 data = remotedata[key]
461 rslts.append(remote.pushkey('obsolete', key, '', data))
468 rslts.append(remote.pushkey('obsolete', key, '', data))
462 if [r for r in rslts if not r]:
469 if [r for r in rslts if not r]:
463 msg = _('failed to push some obsolete markers!\n')
470 msg = _('failed to push some obsolete markers!\n')
464 repo.ui.warn(msg)
471 repo.ui.warn(msg)
465
472
466 def _pushbookmark(pushop):
473 def _pushbookmark(pushop):
467 """Update bookmark position on remote"""
474 """Update bookmark position on remote"""
468 ui = pushop.ui
475 ui = pushop.ui
469 repo = pushop.repo.unfiltered()
476 repo = pushop.repo.unfiltered()
470 remote = pushop.remote
477 remote = pushop.remote
471 ui.debug("checking for updated bookmarks\n")
478 ui.debug("checking for updated bookmarks\n")
472 revnums = map(repo.changelog.rev, pushop.revs or [])
479 revnums = map(repo.changelog.rev, pushop.revs or [])
473 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
480 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
474 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
481 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
475 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
482 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
476 srchex=hex)
483 srchex=hex)
477
484
478 for b, scid, dcid in advsrc:
485 for b, scid, dcid in advsrc:
479 if ancestors and repo[scid].rev() not in ancestors:
486 if ancestors and repo[scid].rev() not in ancestors:
480 continue
487 continue
481 if remote.pushkey('bookmarks', b, dcid, scid):
488 if remote.pushkey('bookmarks', b, dcid, scid):
482 ui.status(_("updating bookmark %s\n") % b)
489 ui.status(_("updating bookmark %s\n") % b)
483 else:
490 else:
484 ui.warn(_('updating bookmark %s failed!\n') % b)
491 ui.warn(_('updating bookmark %s failed!\n') % b)
485
492
486 class pulloperation(object):
493 class pulloperation(object):
487 """A object that represent a single pull operation
494 """A object that represent a single pull operation
488
495
489 It purpose is to carry push related state and very common operation.
496 It purpose is to carry push related state and very common operation.
490
497
491 A new should be created at the beginning of each pull and discarded
498 A new should be created at the beginning of each pull and discarded
492 afterward.
499 afterward.
493 """
500 """
494
501
495 def __init__(self, repo, remote, heads=None, force=False):
502 def __init__(self, repo, remote, heads=None, force=False):
496 # repo we pull into
503 # repo we pull into
497 self.repo = repo
504 self.repo = repo
498 # repo we pull from
505 # repo we pull from
499 self.remote = remote
506 self.remote = remote
500 # revision we try to pull (None is "all")
507 # revision we try to pull (None is "all")
501 self.heads = heads
508 self.heads = heads
502 # do we force pull?
509 # do we force pull?
503 self.force = force
510 self.force = force
504 # the name the pull transaction
511 # the name the pull transaction
505 self._trname = 'pull\n' + util.hidepassword(remote.url())
512 self._trname = 'pull\n' + util.hidepassword(remote.url())
506 # hold the transaction once created
513 # hold the transaction once created
507 self._tr = None
514 self._tr = None
508 # set of common changeset between local and remote before pull
515 # set of common changeset between local and remote before pull
509 self.common = None
516 self.common = None
510 # set of pulled head
517 # set of pulled head
511 self.rheads = None
518 self.rheads = None
512 # list of missing changeset to fetch remotely
519 # list of missing changeset to fetch remotely
513 self.fetch = None
520 self.fetch = None
514 # result of changegroup pulling (used as return code by pull)
521 # result of changegroup pulling (used as return code by pull)
515 self.cgresult = None
522 self.cgresult = None
516 # list of step remaining todo (related to future bundle2 usage)
523 # list of step remaining todo (related to future bundle2 usage)
517 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
524 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
518
525
519 @util.propertycache
526 @util.propertycache
520 def pulledsubset(self):
527 def pulledsubset(self):
521 """heads of the set of changeset target by the pull"""
528 """heads of the set of changeset target by the pull"""
522 # compute target subset
529 # compute target subset
523 if self.heads is None:
530 if self.heads is None:
524 # We pulled every thing possible
531 # We pulled every thing possible
525 # sync on everything common
532 # sync on everything common
526 c = set(self.common)
533 c = set(self.common)
527 ret = list(self.common)
534 ret = list(self.common)
528 for n in self.rheads:
535 for n in self.rheads:
529 if n not in c:
536 if n not in c:
530 ret.append(n)
537 ret.append(n)
531 return ret
538 return ret
532 else:
539 else:
533 # We pulled a specific subset
540 # We pulled a specific subset
534 # sync on this subset
541 # sync on this subset
535 return self.heads
542 return self.heads
536
543
537 def gettransaction(self):
544 def gettransaction(self):
538 """get appropriate pull transaction, creating it if needed"""
545 """get appropriate pull transaction, creating it if needed"""
539 if self._tr is None:
546 if self._tr is None:
540 self._tr = self.repo.transaction(self._trname)
547 self._tr = self.repo.transaction(self._trname)
541 return self._tr
548 return self._tr
542
549
543 def closetransaction(self):
550 def closetransaction(self):
544 """close transaction if created"""
551 """close transaction if created"""
545 if self._tr is not None:
552 if self._tr is not None:
546 self._tr.close()
553 self._tr.close()
547
554
548 def releasetransaction(self):
555 def releasetransaction(self):
549 """release transaction if created"""
556 """release transaction if created"""
550 if self._tr is not None:
557 if self._tr is not None:
551 self._tr.release()
558 self._tr.release()
552
559
553 def pull(repo, remote, heads=None, force=False):
560 def pull(repo, remote, heads=None, force=False):
554 pullop = pulloperation(repo, remote, heads, force)
561 pullop = pulloperation(repo, remote, heads, force)
555 if pullop.remote.local():
562 if pullop.remote.local():
556 missing = set(pullop.remote.requirements) - pullop.repo.supported
563 missing = set(pullop.remote.requirements) - pullop.repo.supported
557 if missing:
564 if missing:
558 msg = _("required features are not"
565 msg = _("required features are not"
559 " supported in the destination:"
566 " supported in the destination:"
560 " %s") % (', '.join(sorted(missing)))
567 " %s") % (', '.join(sorted(missing)))
561 raise util.Abort(msg)
568 raise util.Abort(msg)
562
569
563 lock = pullop.repo.lock()
570 lock = pullop.repo.lock()
564 try:
571 try:
565 _pulldiscovery(pullop)
572 _pulldiscovery(pullop)
566 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
573 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
567 and pullop.remote.capable('bundle2-exp')):
574 and pullop.remote.capable('bundle2-exp')):
568 _pullbundle2(pullop)
575 _pullbundle2(pullop)
569 if 'changegroup' in pullop.todosteps:
576 if 'changegroup' in pullop.todosteps:
570 _pullchangeset(pullop)
577 _pullchangeset(pullop)
571 if 'phases' in pullop.todosteps:
578 if 'phases' in pullop.todosteps:
572 _pullphase(pullop)
579 _pullphase(pullop)
573 if 'obsmarkers' in pullop.todosteps:
580 if 'obsmarkers' in pullop.todosteps:
574 _pullobsolete(pullop)
581 _pullobsolete(pullop)
575 pullop.closetransaction()
582 pullop.closetransaction()
576 finally:
583 finally:
577 pullop.releasetransaction()
584 pullop.releasetransaction()
578 lock.release()
585 lock.release()
579
586
580 return pullop.cgresult
587 return pullop.cgresult
581
588
582 def _pulldiscovery(pullop):
589 def _pulldiscovery(pullop):
583 """discovery phase for the pull
590 """discovery phase for the pull
584
591
585 Current handle changeset discovery only, will change handle all discovery
592 Current handle changeset discovery only, will change handle all discovery
586 at some point."""
593 at some point."""
587 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
594 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
588 pullop.remote,
595 pullop.remote,
589 heads=pullop.heads,
596 heads=pullop.heads,
590 force=pullop.force)
597 force=pullop.force)
591 pullop.common, pullop.fetch, pullop.rheads = tmp
598 pullop.common, pullop.fetch, pullop.rheads = tmp
592
599
593 def _pullbundle2(pullop):
600 def _pullbundle2(pullop):
594 """pull data using bundle2
601 """pull data using bundle2
595
602
596 For now, the only supported data are changegroup."""
603 For now, the only supported data are changegroup."""
597 remotecaps = bundle2.bundle2caps(pullop.remote)
604 remotecaps = bundle2.bundle2caps(pullop.remote)
598 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
605 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
599 # pulling changegroup
606 # pulling changegroup
600 pullop.todosteps.remove('changegroup')
607 pullop.todosteps.remove('changegroup')
601
608
602 kwargs['common'] = pullop.common
609 kwargs['common'] = pullop.common
603 kwargs['heads'] = pullop.heads or pullop.rheads
610 kwargs['heads'] = pullop.heads or pullop.rheads
604 if 'b2x:listkeys' in remotecaps:
611 if 'b2x:listkeys' in remotecaps:
605 kwargs['listkeys'] = ['phase']
612 kwargs['listkeys'] = ['phase']
606 if not pullop.fetch:
613 if not pullop.fetch:
607 pullop.repo.ui.status(_("no changes found\n"))
614 pullop.repo.ui.status(_("no changes found\n"))
608 pullop.cgresult = 0
615 pullop.cgresult = 0
609 else:
616 else:
610 if pullop.heads is None and list(pullop.common) == [nullid]:
617 if pullop.heads is None and list(pullop.common) == [nullid]:
611 pullop.repo.ui.status(_("requesting all changes\n"))
618 pullop.repo.ui.status(_("requesting all changes\n"))
612 _pullbundle2extraprepare(pullop, kwargs)
619 _pullbundle2extraprepare(pullop, kwargs)
613 if kwargs.keys() == ['format']:
620 if kwargs.keys() == ['format']:
614 return # nothing to pull
621 return # nothing to pull
615 bundle = pullop.remote.getbundle('pull', **kwargs)
622 bundle = pullop.remote.getbundle('pull', **kwargs)
616 try:
623 try:
617 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
624 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
618 except error.BundleValueError, exc:
625 except error.BundleValueError, exc:
619 raise util.Abort('missing support for %s' % exc)
626 raise util.Abort('missing support for %s' % exc)
620
627
621 if pullop.fetch:
628 if pullop.fetch:
622 assert len(op.records['changegroup']) == 1
629 assert len(op.records['changegroup']) == 1
623 pullop.cgresult = op.records['changegroup'][0]['return']
630 pullop.cgresult = op.records['changegroup'][0]['return']
624
631
625 # processing phases change
632 # processing phases change
626 for namespace, value in op.records['listkeys']:
633 for namespace, value in op.records['listkeys']:
627 if namespace == 'phases':
634 if namespace == 'phases':
628 _pullapplyphases(pullop, value)
635 _pullapplyphases(pullop, value)
629
636
630 def _pullbundle2extraprepare(pullop, kwargs):
637 def _pullbundle2extraprepare(pullop, kwargs):
631 """hook function so that extensions can extend the getbundle call"""
638 """hook function so that extensions can extend the getbundle call"""
632 pass
639 pass
633
640
634 def _pullchangeset(pullop):
641 def _pullchangeset(pullop):
635 """pull changeset from unbundle into the local repo"""
642 """pull changeset from unbundle into the local repo"""
636 # We delay the open of the transaction as late as possible so we
643 # We delay the open of the transaction as late as possible so we
637 # don't open transaction for nothing or you break future useful
644 # don't open transaction for nothing or you break future useful
638 # rollback call
645 # rollback call
639 pullop.todosteps.remove('changegroup')
646 pullop.todosteps.remove('changegroup')
640 if not pullop.fetch:
647 if not pullop.fetch:
641 pullop.repo.ui.status(_("no changes found\n"))
648 pullop.repo.ui.status(_("no changes found\n"))
642 pullop.cgresult = 0
649 pullop.cgresult = 0
643 return
650 return
644 pullop.gettransaction()
651 pullop.gettransaction()
645 if pullop.heads is None and list(pullop.common) == [nullid]:
652 if pullop.heads is None and list(pullop.common) == [nullid]:
646 pullop.repo.ui.status(_("requesting all changes\n"))
653 pullop.repo.ui.status(_("requesting all changes\n"))
647 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
654 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
648 # issue1320, avoid a race if remote changed after discovery
655 # issue1320, avoid a race if remote changed after discovery
649 pullop.heads = pullop.rheads
656 pullop.heads = pullop.rheads
650
657
651 if pullop.remote.capable('getbundle'):
658 if pullop.remote.capable('getbundle'):
652 # TODO: get bundlecaps from remote
659 # TODO: get bundlecaps from remote
653 cg = pullop.remote.getbundle('pull', common=pullop.common,
660 cg = pullop.remote.getbundle('pull', common=pullop.common,
654 heads=pullop.heads or pullop.rheads)
661 heads=pullop.heads or pullop.rheads)
655 elif pullop.heads is None:
662 elif pullop.heads is None:
656 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
663 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
657 elif not pullop.remote.capable('changegroupsubset'):
664 elif not pullop.remote.capable('changegroupsubset'):
658 raise util.Abort(_("partial pull cannot be done because "
665 raise util.Abort(_("partial pull cannot be done because "
659 "other repository doesn't support "
666 "other repository doesn't support "
660 "changegroupsubset."))
667 "changegroupsubset."))
661 else:
668 else:
662 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
669 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
663 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
670 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
664 pullop.remote.url())
671 pullop.remote.url())
665
672
666 def _pullphase(pullop):
673 def _pullphase(pullop):
667 # Get remote phases data from remote
674 # Get remote phases data from remote
668 remotephases = pullop.remote.listkeys('phases')
675 remotephases = pullop.remote.listkeys('phases')
669 _pullapplyphases(pullop, remotephases)
676 _pullapplyphases(pullop, remotephases)
670
677
671 def _pullapplyphases(pullop, remotephases):
678 def _pullapplyphases(pullop, remotephases):
672 """apply phase movement from observed remote state"""
679 """apply phase movement from observed remote state"""
673 pullop.todosteps.remove('phases')
680 pullop.todosteps.remove('phases')
674 publishing = bool(remotephases.get('publishing', False))
681 publishing = bool(remotephases.get('publishing', False))
675 if remotephases and not publishing:
682 if remotephases and not publishing:
676 # remote is new and unpublishing
683 # remote is new and unpublishing
677 pheads, _dr = phases.analyzeremotephases(pullop.repo,
684 pheads, _dr = phases.analyzeremotephases(pullop.repo,
678 pullop.pulledsubset,
685 pullop.pulledsubset,
679 remotephases)
686 remotephases)
680 phases.advanceboundary(pullop.repo, phases.public, pheads)
687 phases.advanceboundary(pullop.repo, phases.public, pheads)
681 phases.advanceboundary(pullop.repo, phases.draft,
688 phases.advanceboundary(pullop.repo, phases.draft,
682 pullop.pulledsubset)
689 pullop.pulledsubset)
683 else:
690 else:
684 # Remote is old or publishing all common changesets
691 # Remote is old or publishing all common changesets
685 # should be seen as public
692 # should be seen as public
686 phases.advanceboundary(pullop.repo, phases.public,
693 phases.advanceboundary(pullop.repo, phases.public,
687 pullop.pulledsubset)
694 pullop.pulledsubset)
688
695
689 def _pullobsolete(pullop):
696 def _pullobsolete(pullop):
690 """utility function to pull obsolete markers from a remote
697 """utility function to pull obsolete markers from a remote
691
698
692 The `gettransaction` is function that return the pull transaction, creating
699 The `gettransaction` is function that return the pull transaction, creating
693 one if necessary. We return the transaction to inform the calling code that
700 one if necessary. We return the transaction to inform the calling code that
694 a new transaction have been created (when applicable).
701 a new transaction have been created (when applicable).
695
702
696 Exists mostly to allow overriding for experimentation purpose"""
703 Exists mostly to allow overriding for experimentation purpose"""
697 pullop.todosteps.remove('obsmarkers')
704 pullop.todosteps.remove('obsmarkers')
698 tr = None
705 tr = None
699 if obsolete._enabled:
706 if obsolete._enabled:
700 pullop.repo.ui.debug('fetching remote obsolete markers\n')
707 pullop.repo.ui.debug('fetching remote obsolete markers\n')
701 remoteobs = pullop.remote.listkeys('obsolete')
708 remoteobs = pullop.remote.listkeys('obsolete')
702 if 'dump0' in remoteobs:
709 if 'dump0' in remoteobs:
703 tr = pullop.gettransaction()
710 tr = pullop.gettransaction()
704 for key in sorted(remoteobs, reverse=True):
711 for key in sorted(remoteobs, reverse=True):
705 if key.startswith('dump'):
712 if key.startswith('dump'):
706 data = base85.b85decode(remoteobs[key])
713 data = base85.b85decode(remoteobs[key])
707 pullop.repo.obsstore.mergemarkers(tr, data)
714 pullop.repo.obsstore.mergemarkers(tr, data)
708 pullop.repo.invalidatevolatilesets()
715 pullop.repo.invalidatevolatilesets()
709 return tr
716 return tr
710
717
711 def caps20to10(repo):
718 def caps20to10(repo):
712 """return a set with appropriate options to use bundle20 during getbundle"""
719 """return a set with appropriate options to use bundle20 during getbundle"""
713 caps = set(['HG2X'])
720 caps = set(['HG2X'])
714 capsblob = bundle2.encodecaps(repo.bundle2caps)
721 capsblob = bundle2.encodecaps(repo.bundle2caps)
715 caps.add('bundle2=' + urllib.quote(capsblob))
722 caps.add('bundle2=' + urllib.quote(capsblob))
716 return caps
723 return caps
717
724
718 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
725 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
719 **kwargs):
726 **kwargs):
720 """return a full bundle (with potentially multiple kind of parts)
727 """return a full bundle (with potentially multiple kind of parts)
721
728
722 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
729 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
723 passed. For now, the bundle can contain only changegroup, but this will
730 passed. For now, the bundle can contain only changegroup, but this will
724 changes when more part type will be available for bundle2.
731 changes when more part type will be available for bundle2.
725
732
726 This is different from changegroup.getbundle that only returns an HG10
733 This is different from changegroup.getbundle that only returns an HG10
727 changegroup bundle. They may eventually get reunited in the future when we
734 changegroup bundle. They may eventually get reunited in the future when we
728 have a clearer idea of the API we what to query different data.
735 have a clearer idea of the API we what to query different data.
729
736
730 The implementation is at a very early stage and will get massive rework
737 The implementation is at a very early stage and will get massive rework
731 when the API of bundle is refined.
738 when the API of bundle is refined.
732 """
739 """
733 cg = None
740 cg = None
734 if kwargs.get('cg', True):
741 if kwargs.get('cg', True):
735 # build changegroup bundle here.
742 # build changegroup bundle here.
736 cg = changegroup.getbundle(repo, source, heads=heads,
743 cg = changegroup.getbundle(repo, source, heads=heads,
737 common=common, bundlecaps=bundlecaps)
744 common=common, bundlecaps=bundlecaps)
738 elif 'HG2X' not in bundlecaps:
745 elif 'HG2X' not in bundlecaps:
739 raise ValueError(_('request for bundle10 must include changegroup'))
746 raise ValueError(_('request for bundle10 must include changegroup'))
740 if bundlecaps is None or 'HG2X' not in bundlecaps:
747 if bundlecaps is None or 'HG2X' not in bundlecaps:
741 if kwargs:
748 if kwargs:
742 raise ValueError(_('unsupported getbundle arguments: %s')
749 raise ValueError(_('unsupported getbundle arguments: %s')
743 % ', '.join(sorted(kwargs.keys())))
750 % ', '.join(sorted(kwargs.keys())))
744 return cg
751 return cg
745 # very crude first implementation,
752 # very crude first implementation,
746 # the bundle API will change and the generation will be done lazily.
753 # the bundle API will change and the generation will be done lazily.
747 b2caps = {}
754 b2caps = {}
748 for bcaps in bundlecaps:
755 for bcaps in bundlecaps:
749 if bcaps.startswith('bundle2='):
756 if bcaps.startswith('bundle2='):
750 blob = urllib.unquote(bcaps[len('bundle2='):])
757 blob = urllib.unquote(bcaps[len('bundle2='):])
751 b2caps.update(bundle2.decodecaps(blob))
758 b2caps.update(bundle2.decodecaps(blob))
752 bundler = bundle2.bundle20(repo.ui, b2caps)
759 bundler = bundle2.bundle20(repo.ui, b2caps)
753 if cg:
760 if cg:
754 bundler.newpart('b2x:changegroup', data=cg.getchunks())
761 bundler.newpart('b2x:changegroup', data=cg.getchunks())
755 listkeys = kwargs.get('listkeys', ())
762 listkeys = kwargs.get('listkeys', ())
756 for namespace in listkeys:
763 for namespace in listkeys:
757 part = bundler.newpart('b2x:listkeys')
764 part = bundler.newpart('b2x:listkeys')
758 part.addparam('namespace', namespace)
765 part.addparam('namespace', namespace)
759 keys = repo.listkeys(namespace).items()
766 keys = repo.listkeys(namespace).items()
760 part.data = pushkey.encodekeys(keys)
767 part.data = pushkey.encodekeys(keys)
761 _getbundleextrapart(bundler, repo, source, heads=heads, common=common,
768 _getbundleextrapart(bundler, repo, source, heads=heads, common=common,
762 bundlecaps=bundlecaps, **kwargs)
769 bundlecaps=bundlecaps, **kwargs)
763 return util.chunkbuffer(bundler.getchunks())
770 return util.chunkbuffer(bundler.getchunks())
764
771
765 def _getbundleextrapart(bundler, repo, source, heads=None, common=None,
772 def _getbundleextrapart(bundler, repo, source, heads=None, common=None,
766 bundlecaps=None, **kwargs):
773 bundlecaps=None, **kwargs):
767 """hook function to let extensions add parts to the requested bundle"""
774 """hook function to let extensions add parts to the requested bundle"""
768 pass
775 pass
769
776
770 def check_heads(repo, their_heads, context):
777 def check_heads(repo, their_heads, context):
771 """check if the heads of a repo have been modified
778 """check if the heads of a repo have been modified
772
779
773 Used by peer for unbundling.
780 Used by peer for unbundling.
774 """
781 """
775 heads = repo.heads()
782 heads = repo.heads()
776 heads_hash = util.sha1(''.join(sorted(heads))).digest()
783 heads_hash = util.sha1(''.join(sorted(heads))).digest()
777 if not (their_heads == ['force'] or their_heads == heads or
784 if not (their_heads == ['force'] or their_heads == heads or
778 their_heads == ['hashed', heads_hash]):
785 their_heads == ['hashed', heads_hash]):
779 # someone else committed/pushed/unbundled while we
786 # someone else committed/pushed/unbundled while we
780 # were transferring data
787 # were transferring data
781 raise error.PushRaced('repository changed while %s - '
788 raise error.PushRaced('repository changed while %s - '
782 'please try again' % context)
789 'please try again' % context)
783
790
784 def unbundle(repo, cg, heads, source, url):
791 def unbundle(repo, cg, heads, source, url):
785 """Apply a bundle to a repo.
792 """Apply a bundle to a repo.
786
793
787 this function makes sure the repo is locked during the application and have
794 this function makes sure the repo is locked during the application and have
788 mechanism to check that no push race occurred between the creation of the
795 mechanism to check that no push race occurred between the creation of the
789 bundle and its application.
796 bundle and its application.
790
797
791 If the push was raced as PushRaced exception is raised."""
798 If the push was raced as PushRaced exception is raised."""
792 r = 0
799 r = 0
793 # need a transaction when processing a bundle2 stream
800 # need a transaction when processing a bundle2 stream
794 tr = None
801 tr = None
795 lock = repo.lock()
802 lock = repo.lock()
796 try:
803 try:
797 check_heads(repo, heads, 'uploading changes')
804 check_heads(repo, heads, 'uploading changes')
798 # push can proceed
805 # push can proceed
799 if util.safehasattr(cg, 'params'):
806 if util.safehasattr(cg, 'params'):
800 try:
807 try:
801 tr = repo.transaction('unbundle')
808 tr = repo.transaction('unbundle')
802 tr.hookargs['bundle2-exp'] = '1'
809 tr.hookargs['bundle2-exp'] = '1'
803 r = bundle2.processbundle(repo, cg, lambda: tr).reply
810 r = bundle2.processbundle(repo, cg, lambda: tr).reply
804 cl = repo.unfiltered().changelog
811 cl = repo.unfiltered().changelog
805 p = cl.writepending() and repo.root or ""
812 p = cl.writepending() and repo.root or ""
806 repo.hook('b2x-pretransactionclose', throw=True, source=source,
813 repo.hook('b2x-pretransactionclose', throw=True, source=source,
807 url=url, pending=p, **tr.hookargs)
814 url=url, pending=p, **tr.hookargs)
808 tr.close()
815 tr.close()
809 repo.hook('b2x-transactionclose', source=source, url=url,
816 repo.hook('b2x-transactionclose', source=source, url=url,
810 **tr.hookargs)
817 **tr.hookargs)
811 except Exception, exc:
818 except Exception, exc:
812 exc.duringunbundle2 = True
819 exc.duringunbundle2 = True
813 raise
820 raise
814 else:
821 else:
815 r = changegroup.addchangegroup(repo, cg, source, url)
822 r = changegroup.addchangegroup(repo, cg, source, url)
816 finally:
823 finally:
817 if tr is not None:
824 if tr is not None:
818 tr.release()
825 tr.release()
819 lock.release()
826 lock.release()
820 return r
827 return r
General Comments 0
You need to be logged in to leave comments. Login now