##// END OF EJS Templates
bundle2: transmit capabilities to getbundle during pull...
Pierre-Yves David -
r21143:5bb5d4ba default
parent child Browse files
Show More
@@ -1,710 +1,717
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno, urllib
10 import errno, urllib
11 import util, scmutil, changegroup, base85
11 import util, scmutil, changegroup, base85
12 import discovery, phases, obsolete, bookmarks, bundle2
12 import discovery, phases, obsolete, bookmarks, bundle2
13
13
14 def readbundle(ui, fh, fname, vfs=None):
14 def readbundle(ui, fh, fname, vfs=None):
15 header = changegroup.readexactly(fh, 4)
15 header = changegroup.readexactly(fh, 4)
16
16
17 alg = None
17 alg = None
18 if not fname:
18 if not fname:
19 fname = "stream"
19 fname = "stream"
20 if not header.startswith('HG') and header.startswith('\0'):
20 if not header.startswith('HG') and header.startswith('\0'):
21 fh = changegroup.headerlessfixup(fh, header)
21 fh = changegroup.headerlessfixup(fh, header)
22 header = "HG10"
22 header = "HG10"
23 alg = 'UN'
23 alg = 'UN'
24 elif vfs:
24 elif vfs:
25 fname = vfs.join(fname)
25 fname = vfs.join(fname)
26
26
27 magic, version = header[0:2], header[2:4]
27 magic, version = header[0:2], header[2:4]
28
28
29 if magic != 'HG':
29 if magic != 'HG':
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 if version == '10':
31 if version == '10':
32 if alg is None:
32 if alg is None:
33 alg = changegroup.readexactly(fh, 2)
33 alg = changegroup.readexactly(fh, 2)
34 return changegroup.unbundle10(fh, alg)
34 return changegroup.unbundle10(fh, alg)
35 elif version == '20':
35 elif version == '20':
36 return bundle2.unbundle20(ui, fh, header=magic + version)
36 return bundle2.unbundle20(ui, fh, header=magic + version)
37 else:
37 else:
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39
39
40
40
41 class pushoperation(object):
41 class pushoperation(object):
42 """A object that represent a single push operation
42 """A object that represent a single push operation
43
43
44 It purpose is to carry push related state and very common operation.
44 It purpose is to carry push related state and very common operation.
45
45
46 A new should be created at the beginning of each push and discarded
46 A new should be created at the beginning of each push and discarded
47 afterward.
47 afterward.
48 """
48 """
49
49
50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
51 # repo we push from
51 # repo we push from
52 self.repo = repo
52 self.repo = repo
53 self.ui = repo.ui
53 self.ui = repo.ui
54 # repo we push to
54 # repo we push to
55 self.remote = remote
55 self.remote = remote
56 # force option provided
56 # force option provided
57 self.force = force
57 self.force = force
58 # revs to be pushed (None is "all")
58 # revs to be pushed (None is "all")
59 self.revs = revs
59 self.revs = revs
60 # allow push of new branch
60 # allow push of new branch
61 self.newbranch = newbranch
61 self.newbranch = newbranch
62 # did a local lock get acquired?
62 # did a local lock get acquired?
63 self.locallocked = None
63 self.locallocked = None
64 # Integer version of the push result
64 # Integer version of the push result
65 # - None means nothing to push
65 # - None means nothing to push
66 # - 0 means HTTP error
66 # - 0 means HTTP error
67 # - 1 means we pushed and remote head count is unchanged *or*
67 # - 1 means we pushed and remote head count is unchanged *or*
68 # we have outgoing changesets but refused to push
68 # we have outgoing changesets but refused to push
69 # - other values as described by addchangegroup()
69 # - other values as described by addchangegroup()
70 self.ret = None
70 self.ret = None
71 # discover.outgoing object (contains common and outgoing data)
71 # discover.outgoing object (contains common and outgoing data)
72 self.outgoing = None
72 self.outgoing = None
73 # all remote heads before the push
73 # all remote heads before the push
74 self.remoteheads = None
74 self.remoteheads = None
75 # testable as a boolean indicating if any nodes are missing locally.
75 # testable as a boolean indicating if any nodes are missing locally.
76 self.incoming = None
76 self.incoming = None
77 # set of all heads common after changeset bundle push
77 # set of all heads common after changeset bundle push
78 self.commonheads = None
78 self.commonheads = None
79
79
80 def push(repo, remote, force=False, revs=None, newbranch=False):
80 def push(repo, remote, force=False, revs=None, newbranch=False):
81 '''Push outgoing changesets (limited by revs) from a local
81 '''Push outgoing changesets (limited by revs) from a local
82 repository to remote. Return an integer:
82 repository to remote. Return an integer:
83 - None means nothing to push
83 - None means nothing to push
84 - 0 means HTTP error
84 - 0 means HTTP error
85 - 1 means we pushed and remote head count is unchanged *or*
85 - 1 means we pushed and remote head count is unchanged *or*
86 we have outgoing changesets but refused to push
86 we have outgoing changesets but refused to push
87 - other values as described by addchangegroup()
87 - other values as described by addchangegroup()
88 '''
88 '''
89 pushop = pushoperation(repo, remote, force, revs, newbranch)
89 pushop = pushoperation(repo, remote, force, revs, newbranch)
90 if pushop.remote.local():
90 if pushop.remote.local():
91 missing = (set(pushop.repo.requirements)
91 missing = (set(pushop.repo.requirements)
92 - pushop.remote.local().supported)
92 - pushop.remote.local().supported)
93 if missing:
93 if missing:
94 msg = _("required features are not"
94 msg = _("required features are not"
95 " supported in the destination:"
95 " supported in the destination:"
96 " %s") % (', '.join(sorted(missing)))
96 " %s") % (', '.join(sorted(missing)))
97 raise util.Abort(msg)
97 raise util.Abort(msg)
98
98
99 # there are two ways to push to remote repo:
99 # there are two ways to push to remote repo:
100 #
100 #
101 # addchangegroup assumes local user can lock remote
101 # addchangegroup assumes local user can lock remote
102 # repo (local filesystem, old ssh servers).
102 # repo (local filesystem, old ssh servers).
103 #
103 #
104 # unbundle assumes local user cannot lock remote repo (new ssh
104 # unbundle assumes local user cannot lock remote repo (new ssh
105 # servers, http servers).
105 # servers, http servers).
106
106
107 if not pushop.remote.canpush():
107 if not pushop.remote.canpush():
108 raise util.Abort(_("destination does not support push"))
108 raise util.Abort(_("destination does not support push"))
109 # get local lock as we might write phase data
109 # get local lock as we might write phase data
110 locallock = None
110 locallock = None
111 try:
111 try:
112 locallock = pushop.repo.lock()
112 locallock = pushop.repo.lock()
113 pushop.locallocked = True
113 pushop.locallocked = True
114 except IOError, err:
114 except IOError, err:
115 pushop.locallocked = False
115 pushop.locallocked = False
116 if err.errno != errno.EACCES:
116 if err.errno != errno.EACCES:
117 raise
117 raise
118 # source repo cannot be locked.
118 # source repo cannot be locked.
119 # We do not abort the push, but just disable the local phase
119 # We do not abort the push, but just disable the local phase
120 # synchronisation.
120 # synchronisation.
121 msg = 'cannot lock source repository: %s\n' % err
121 msg = 'cannot lock source repository: %s\n' % err
122 pushop.ui.debug(msg)
122 pushop.ui.debug(msg)
123 try:
123 try:
124 pushop.repo.checkpush(pushop)
124 pushop.repo.checkpush(pushop)
125 lock = None
125 lock = None
126 unbundle = pushop.remote.capable('unbundle')
126 unbundle = pushop.remote.capable('unbundle')
127 if not unbundle:
127 if not unbundle:
128 lock = pushop.remote.lock()
128 lock = pushop.remote.lock()
129 try:
129 try:
130 _pushdiscovery(pushop)
130 _pushdiscovery(pushop)
131 if _pushcheckoutgoing(pushop):
131 if _pushcheckoutgoing(pushop):
132 pushop.repo.prepushoutgoinghooks(pushop.repo,
132 pushop.repo.prepushoutgoinghooks(pushop.repo,
133 pushop.remote,
133 pushop.remote,
134 pushop.outgoing)
134 pushop.outgoing)
135 if pushop.remote.capable('bundle2'):
135 if pushop.remote.capable('bundle2'):
136 _pushbundle2(pushop)
136 _pushbundle2(pushop)
137 else:
137 else:
138 _pushchangeset(pushop)
138 _pushchangeset(pushop)
139 _pushcomputecommonheads(pushop)
139 _pushcomputecommonheads(pushop)
140 _pushsyncphase(pushop)
140 _pushsyncphase(pushop)
141 _pushobsolete(pushop)
141 _pushobsolete(pushop)
142 finally:
142 finally:
143 if lock is not None:
143 if lock is not None:
144 lock.release()
144 lock.release()
145 finally:
145 finally:
146 if locallock is not None:
146 if locallock is not None:
147 locallock.release()
147 locallock.release()
148
148
149 _pushbookmark(pushop)
149 _pushbookmark(pushop)
150 return pushop.ret
150 return pushop.ret
151
151
152 def _pushdiscovery(pushop):
152 def _pushdiscovery(pushop):
153 # discovery
153 # discovery
154 unfi = pushop.repo.unfiltered()
154 unfi = pushop.repo.unfiltered()
155 fci = discovery.findcommonincoming
155 fci = discovery.findcommonincoming
156 commoninc = fci(unfi, pushop.remote, force=pushop.force)
156 commoninc = fci(unfi, pushop.remote, force=pushop.force)
157 common, inc, remoteheads = commoninc
157 common, inc, remoteheads = commoninc
158 fco = discovery.findcommonoutgoing
158 fco = discovery.findcommonoutgoing
159 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
159 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
160 commoninc=commoninc, force=pushop.force)
160 commoninc=commoninc, force=pushop.force)
161 pushop.outgoing = outgoing
161 pushop.outgoing = outgoing
162 pushop.remoteheads = remoteheads
162 pushop.remoteheads = remoteheads
163 pushop.incoming = inc
163 pushop.incoming = inc
164
164
165 def _pushcheckoutgoing(pushop):
165 def _pushcheckoutgoing(pushop):
166 outgoing = pushop.outgoing
166 outgoing = pushop.outgoing
167 unfi = pushop.repo.unfiltered()
167 unfi = pushop.repo.unfiltered()
168 if not outgoing.missing:
168 if not outgoing.missing:
169 # nothing to push
169 # nothing to push
170 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
170 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
171 return False
171 return False
172 # something to push
172 # something to push
173 if not pushop.force:
173 if not pushop.force:
174 # if repo.obsstore == False --> no obsolete
174 # if repo.obsstore == False --> no obsolete
175 # then, save the iteration
175 # then, save the iteration
176 if unfi.obsstore:
176 if unfi.obsstore:
177 # this message are here for 80 char limit reason
177 # this message are here for 80 char limit reason
178 mso = _("push includes obsolete changeset: %s!")
178 mso = _("push includes obsolete changeset: %s!")
179 mst = "push includes %s changeset: %s!"
179 mst = "push includes %s changeset: %s!"
180 # plain versions for i18n tool to detect them
180 # plain versions for i18n tool to detect them
181 _("push includes unstable changeset: %s!")
181 _("push includes unstable changeset: %s!")
182 _("push includes bumped changeset: %s!")
182 _("push includes bumped changeset: %s!")
183 _("push includes divergent changeset: %s!")
183 _("push includes divergent changeset: %s!")
184 # If we are to push if there is at least one
184 # If we are to push if there is at least one
185 # obsolete or unstable changeset in missing, at
185 # obsolete or unstable changeset in missing, at
186 # least one of the missinghead will be obsolete or
186 # least one of the missinghead will be obsolete or
187 # unstable. So checking heads only is ok
187 # unstable. So checking heads only is ok
188 for node in outgoing.missingheads:
188 for node in outgoing.missingheads:
189 ctx = unfi[node]
189 ctx = unfi[node]
190 if ctx.obsolete():
190 if ctx.obsolete():
191 raise util.Abort(mso % ctx)
191 raise util.Abort(mso % ctx)
192 elif ctx.troubled():
192 elif ctx.troubled():
193 raise util.Abort(_(mst)
193 raise util.Abort(_(mst)
194 % (ctx.troubles()[0],
194 % (ctx.troubles()[0],
195 ctx))
195 ctx))
196 newbm = pushop.ui.configlist('bookmarks', 'pushing')
196 newbm = pushop.ui.configlist('bookmarks', 'pushing')
197 discovery.checkheads(unfi, pushop.remote, outgoing,
197 discovery.checkheads(unfi, pushop.remote, outgoing,
198 pushop.remoteheads,
198 pushop.remoteheads,
199 pushop.newbranch,
199 pushop.newbranch,
200 bool(pushop.incoming),
200 bool(pushop.incoming),
201 newbm)
201 newbm)
202 return True
202 return True
203
203
204 def _pushbundle2(pushop):
204 def _pushbundle2(pushop):
205 """push data to the remote using bundle2
205 """push data to the remote using bundle2
206
206
207 The only currently supported type of data is changegroup but this will
207 The only currently supported type of data is changegroup but this will
208 evolve in the future."""
208 evolve in the future."""
209 # Send known head to the server for race detection.
209 # Send known head to the server for race detection.
210 capsblob = urllib.unquote(pushop.remote.capable('bundle2'))
210 capsblob = urllib.unquote(pushop.remote.capable('bundle2'))
211 caps = bundle2.decodecaps(capsblob)
211 caps = bundle2.decodecaps(capsblob)
212 bundler = bundle2.bundle20(pushop.ui, caps)
212 bundler = bundle2.bundle20(pushop.ui, caps)
213 # create reply capability
213 # create reply capability
214 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
214 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
215 bundler.addpart(bundle2.bundlepart('replycaps', data=capsblob))
215 bundler.addpart(bundle2.bundlepart('replycaps', data=capsblob))
216 if not pushop.force:
216 if not pushop.force:
217 part = bundle2.bundlepart('CHECK:HEADS', data=iter(pushop.remoteheads))
217 part = bundle2.bundlepart('CHECK:HEADS', data=iter(pushop.remoteheads))
218 bundler.addpart(part)
218 bundler.addpart(part)
219 # add the changegroup bundle
219 # add the changegroup bundle
220 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
220 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
221 cgpart = bundle2.bundlepart('CHANGEGROUP', data=cg.getchunks())
221 cgpart = bundle2.bundlepart('CHANGEGROUP', data=cg.getchunks())
222 bundler.addpart(cgpart)
222 bundler.addpart(cgpart)
223 stream = util.chunkbuffer(bundler.getchunks())
223 stream = util.chunkbuffer(bundler.getchunks())
224 reply = pushop.remote.unbundle(stream, ['force'], 'push')
224 reply = pushop.remote.unbundle(stream, ['force'], 'push')
225 try:
225 try:
226 op = bundle2.processbundle(pushop.repo, reply)
226 op = bundle2.processbundle(pushop.repo, reply)
227 except KeyError, exc:
227 except KeyError, exc:
228 raise util.Abort('missing support for %s' % exc)
228 raise util.Abort('missing support for %s' % exc)
229 cgreplies = op.records.getreplies(cgpart.id)
229 cgreplies = op.records.getreplies(cgpart.id)
230 assert len(cgreplies['changegroup']) == 1
230 assert len(cgreplies['changegroup']) == 1
231 pushop.ret = cgreplies['changegroup'][0]['return']
231 pushop.ret = cgreplies['changegroup'][0]['return']
232
232
233 def _pushchangeset(pushop):
233 def _pushchangeset(pushop):
234 """Make the actual push of changeset bundle to remote repo"""
234 """Make the actual push of changeset bundle to remote repo"""
235 outgoing = pushop.outgoing
235 outgoing = pushop.outgoing
236 unbundle = pushop.remote.capable('unbundle')
236 unbundle = pushop.remote.capable('unbundle')
237 # TODO: get bundlecaps from remote
237 # TODO: get bundlecaps from remote
238 bundlecaps = None
238 bundlecaps = None
239 # create a changegroup from local
239 # create a changegroup from local
240 if pushop.revs is None and not (outgoing.excluded
240 if pushop.revs is None and not (outgoing.excluded
241 or pushop.repo.changelog.filteredrevs):
241 or pushop.repo.changelog.filteredrevs):
242 # push everything,
242 # push everything,
243 # use the fast path, no race possible on push
243 # use the fast path, no race possible on push
244 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
244 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
245 cg = changegroup.getsubset(pushop.repo,
245 cg = changegroup.getsubset(pushop.repo,
246 outgoing,
246 outgoing,
247 bundler,
247 bundler,
248 'push',
248 'push',
249 fastpath=True)
249 fastpath=True)
250 else:
250 else:
251 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
251 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
252 bundlecaps)
252 bundlecaps)
253
253
254 # apply changegroup to remote
254 # apply changegroup to remote
255 if unbundle:
255 if unbundle:
256 # local repo finds heads on server, finds out what
256 # local repo finds heads on server, finds out what
257 # revs it must push. once revs transferred, if server
257 # revs it must push. once revs transferred, if server
258 # finds it has different heads (someone else won
258 # finds it has different heads (someone else won
259 # commit/push race), server aborts.
259 # commit/push race), server aborts.
260 if pushop.force:
260 if pushop.force:
261 remoteheads = ['force']
261 remoteheads = ['force']
262 else:
262 else:
263 remoteheads = pushop.remoteheads
263 remoteheads = pushop.remoteheads
264 # ssh: return remote's addchangegroup()
264 # ssh: return remote's addchangegroup()
265 # http: return remote's addchangegroup() or 0 for error
265 # http: return remote's addchangegroup() or 0 for error
266 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
266 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
267 'push')
267 'push')
268 else:
268 else:
269 # we return an integer indicating remote head count
269 # we return an integer indicating remote head count
270 # change
270 # change
271 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
271 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
272
272
273 def _pushcomputecommonheads(pushop):
273 def _pushcomputecommonheads(pushop):
274 unfi = pushop.repo.unfiltered()
274 unfi = pushop.repo.unfiltered()
275 if pushop.ret:
275 if pushop.ret:
276 # push succeed, synchronize target of the push
276 # push succeed, synchronize target of the push
277 cheads = pushop.outgoing.missingheads
277 cheads = pushop.outgoing.missingheads
278 elif pushop.revs is None:
278 elif pushop.revs is None:
279 # All out push fails. synchronize all common
279 # All out push fails. synchronize all common
280 cheads = pushop.outgoing.commonheads
280 cheads = pushop.outgoing.commonheads
281 else:
281 else:
282 # I want cheads = heads(::missingheads and ::commonheads)
282 # I want cheads = heads(::missingheads and ::commonheads)
283 # (missingheads is revs with secret changeset filtered out)
283 # (missingheads is revs with secret changeset filtered out)
284 #
284 #
285 # This can be expressed as:
285 # This can be expressed as:
286 # cheads = ( (missingheads and ::commonheads)
286 # cheads = ( (missingheads and ::commonheads)
287 # + (commonheads and ::missingheads))"
287 # + (commonheads and ::missingheads))"
288 # )
288 # )
289 #
289 #
290 # while trying to push we already computed the following:
290 # while trying to push we already computed the following:
291 # common = (::commonheads)
291 # common = (::commonheads)
292 # missing = ((commonheads::missingheads) - commonheads)
292 # missing = ((commonheads::missingheads) - commonheads)
293 #
293 #
294 # We can pick:
294 # We can pick:
295 # * missingheads part of common (::commonheads)
295 # * missingheads part of common (::commonheads)
296 common = set(pushop.outgoing.common)
296 common = set(pushop.outgoing.common)
297 nm = pushop.repo.changelog.nodemap
297 nm = pushop.repo.changelog.nodemap
298 cheads = [node for node in pushop.revs if nm[node] in common]
298 cheads = [node for node in pushop.revs if nm[node] in common]
299 # and
299 # and
300 # * commonheads parents on missing
300 # * commonheads parents on missing
301 revset = unfi.set('%ln and parents(roots(%ln))',
301 revset = unfi.set('%ln and parents(roots(%ln))',
302 pushop.outgoing.commonheads,
302 pushop.outgoing.commonheads,
303 pushop.outgoing.missing)
303 pushop.outgoing.missing)
304 cheads.extend(c.node() for c in revset)
304 cheads.extend(c.node() for c in revset)
305 pushop.commonheads = cheads
305 pushop.commonheads = cheads
306
306
307 def _pushsyncphase(pushop):
307 def _pushsyncphase(pushop):
308 """synchronise phase information locally and remotely"""
308 """synchronise phase information locally and remotely"""
309 unfi = pushop.repo.unfiltered()
309 unfi = pushop.repo.unfiltered()
310 cheads = pushop.commonheads
310 cheads = pushop.commonheads
311 if pushop.ret:
311 if pushop.ret:
312 # push succeed, synchronize target of the push
312 # push succeed, synchronize target of the push
313 cheads = pushop.outgoing.missingheads
313 cheads = pushop.outgoing.missingheads
314 elif pushop.revs is None:
314 elif pushop.revs is None:
315 # All out push fails. synchronize all common
315 # All out push fails. synchronize all common
316 cheads = pushop.outgoing.commonheads
316 cheads = pushop.outgoing.commonheads
317 else:
317 else:
318 # I want cheads = heads(::missingheads and ::commonheads)
318 # I want cheads = heads(::missingheads and ::commonheads)
319 # (missingheads is revs with secret changeset filtered out)
319 # (missingheads is revs with secret changeset filtered out)
320 #
320 #
321 # This can be expressed as:
321 # This can be expressed as:
322 # cheads = ( (missingheads and ::commonheads)
322 # cheads = ( (missingheads and ::commonheads)
323 # + (commonheads and ::missingheads))"
323 # + (commonheads and ::missingheads))"
324 # )
324 # )
325 #
325 #
326 # while trying to push we already computed the following:
326 # while trying to push we already computed the following:
327 # common = (::commonheads)
327 # common = (::commonheads)
328 # missing = ((commonheads::missingheads) - commonheads)
328 # missing = ((commonheads::missingheads) - commonheads)
329 #
329 #
330 # We can pick:
330 # We can pick:
331 # * missingheads part of common (::commonheads)
331 # * missingheads part of common (::commonheads)
332 common = set(pushop.outgoing.common)
332 common = set(pushop.outgoing.common)
333 nm = pushop.repo.changelog.nodemap
333 nm = pushop.repo.changelog.nodemap
334 cheads = [node for node in pushop.revs if nm[node] in common]
334 cheads = [node for node in pushop.revs if nm[node] in common]
335 # and
335 # and
336 # * commonheads parents on missing
336 # * commonheads parents on missing
337 revset = unfi.set('%ln and parents(roots(%ln))',
337 revset = unfi.set('%ln and parents(roots(%ln))',
338 pushop.outgoing.commonheads,
338 pushop.outgoing.commonheads,
339 pushop.outgoing.missing)
339 pushop.outgoing.missing)
340 cheads.extend(c.node() for c in revset)
340 cheads.extend(c.node() for c in revset)
341 pushop.commonheads = cheads
341 pushop.commonheads = cheads
342 # even when we don't push, exchanging phase data is useful
342 # even when we don't push, exchanging phase data is useful
343 remotephases = pushop.remote.listkeys('phases')
343 remotephases = pushop.remote.listkeys('phases')
344 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
344 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
345 and remotephases # server supports phases
345 and remotephases # server supports phases
346 and pushop.ret is None # nothing was pushed
346 and pushop.ret is None # nothing was pushed
347 and remotephases.get('publishing', False)):
347 and remotephases.get('publishing', False)):
348 # When:
348 # When:
349 # - this is a subrepo push
349 # - this is a subrepo push
350 # - and remote support phase
350 # - and remote support phase
351 # - and no changeset was pushed
351 # - and no changeset was pushed
352 # - and remote is publishing
352 # - and remote is publishing
353 # We may be in issue 3871 case!
353 # We may be in issue 3871 case!
354 # We drop the possible phase synchronisation done by
354 # We drop the possible phase synchronisation done by
355 # courtesy to publish changesets possibly locally draft
355 # courtesy to publish changesets possibly locally draft
356 # on the remote.
356 # on the remote.
357 remotephases = {'publishing': 'True'}
357 remotephases = {'publishing': 'True'}
358 if not remotephases: # old server or public only reply from non-publishing
358 if not remotephases: # old server or public only reply from non-publishing
359 _localphasemove(pushop, cheads)
359 _localphasemove(pushop, cheads)
360 # don't push any phase data as there is nothing to push
360 # don't push any phase data as there is nothing to push
361 else:
361 else:
362 ana = phases.analyzeremotephases(pushop.repo, cheads,
362 ana = phases.analyzeremotephases(pushop.repo, cheads,
363 remotephases)
363 remotephases)
364 pheads, droots = ana
364 pheads, droots = ana
365 ### Apply remote phase on local
365 ### Apply remote phase on local
366 if remotephases.get('publishing', False):
366 if remotephases.get('publishing', False):
367 _localphasemove(pushop, cheads)
367 _localphasemove(pushop, cheads)
368 else: # publish = False
368 else: # publish = False
369 _localphasemove(pushop, pheads)
369 _localphasemove(pushop, pheads)
370 _localphasemove(pushop, cheads, phases.draft)
370 _localphasemove(pushop, cheads, phases.draft)
371 ### Apply local phase on remote
371 ### Apply local phase on remote
372
372
373 # Get the list of all revs draft on remote by public here.
373 # Get the list of all revs draft on remote by public here.
374 # XXX Beware that revset break if droots is not strictly
374 # XXX Beware that revset break if droots is not strictly
375 # XXX root we may want to ensure it is but it is costly
375 # XXX root we may want to ensure it is but it is costly
376 outdated = unfi.set('heads((%ln::%ln) and public())',
376 outdated = unfi.set('heads((%ln::%ln) and public())',
377 droots, cheads)
377 droots, cheads)
378 for newremotehead in outdated:
378 for newremotehead in outdated:
379 r = pushop.remote.pushkey('phases',
379 r = pushop.remote.pushkey('phases',
380 newremotehead.hex(),
380 newremotehead.hex(),
381 str(phases.draft),
381 str(phases.draft),
382 str(phases.public))
382 str(phases.public))
383 if not r:
383 if not r:
384 pushop.ui.warn(_('updating %s to public failed!\n')
384 pushop.ui.warn(_('updating %s to public failed!\n')
385 % newremotehead)
385 % newremotehead)
386
386
387 def _localphasemove(pushop, nodes, phase=phases.public):
387 def _localphasemove(pushop, nodes, phase=phases.public):
388 """move <nodes> to <phase> in the local source repo"""
388 """move <nodes> to <phase> in the local source repo"""
389 if pushop.locallocked:
389 if pushop.locallocked:
390 phases.advanceboundary(pushop.repo, phase, nodes)
390 phases.advanceboundary(pushop.repo, phase, nodes)
391 else:
391 else:
392 # repo is not locked, do not change any phases!
392 # repo is not locked, do not change any phases!
393 # Informs the user that phases should have been moved when
393 # Informs the user that phases should have been moved when
394 # applicable.
394 # applicable.
395 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
395 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
396 phasestr = phases.phasenames[phase]
396 phasestr = phases.phasenames[phase]
397 if actualmoves:
397 if actualmoves:
398 pushop.ui.status(_('cannot lock source repo, skipping '
398 pushop.ui.status(_('cannot lock source repo, skipping '
399 'local %s phase update\n') % phasestr)
399 'local %s phase update\n') % phasestr)
400
400
401 def _pushobsolete(pushop):
401 def _pushobsolete(pushop):
402 """utility function to push obsolete markers to a remote"""
402 """utility function to push obsolete markers to a remote"""
403 pushop.ui.debug('try to push obsolete markers to remote\n')
403 pushop.ui.debug('try to push obsolete markers to remote\n')
404 repo = pushop.repo
404 repo = pushop.repo
405 remote = pushop.remote
405 remote = pushop.remote
406 if (obsolete._enabled and repo.obsstore and
406 if (obsolete._enabled and repo.obsstore and
407 'obsolete' in remote.listkeys('namespaces')):
407 'obsolete' in remote.listkeys('namespaces')):
408 rslts = []
408 rslts = []
409 remotedata = repo.listkeys('obsolete')
409 remotedata = repo.listkeys('obsolete')
410 for key in sorted(remotedata, reverse=True):
410 for key in sorted(remotedata, reverse=True):
411 # reverse sort to ensure we end with dump0
411 # reverse sort to ensure we end with dump0
412 data = remotedata[key]
412 data = remotedata[key]
413 rslts.append(remote.pushkey('obsolete', key, '', data))
413 rslts.append(remote.pushkey('obsolete', key, '', data))
414 if [r for r in rslts if not r]:
414 if [r for r in rslts if not r]:
415 msg = _('failed to push some obsolete markers!\n')
415 msg = _('failed to push some obsolete markers!\n')
416 repo.ui.warn(msg)
416 repo.ui.warn(msg)
417
417
418 def _pushbookmark(pushop):
418 def _pushbookmark(pushop):
419 """Update bookmark position on remote"""
419 """Update bookmark position on remote"""
420 ui = pushop.ui
420 ui = pushop.ui
421 repo = pushop.repo.unfiltered()
421 repo = pushop.repo.unfiltered()
422 remote = pushop.remote
422 remote = pushop.remote
423 ui.debug("checking for updated bookmarks\n")
423 ui.debug("checking for updated bookmarks\n")
424 revnums = map(repo.changelog.rev, pushop.revs or [])
424 revnums = map(repo.changelog.rev, pushop.revs or [])
425 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
425 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
426 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
426 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
427 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
427 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
428 srchex=hex)
428 srchex=hex)
429
429
430 for b, scid, dcid in advsrc:
430 for b, scid, dcid in advsrc:
431 if ancestors and repo[scid].rev() not in ancestors:
431 if ancestors and repo[scid].rev() not in ancestors:
432 continue
432 continue
433 if remote.pushkey('bookmarks', b, dcid, scid):
433 if remote.pushkey('bookmarks', b, dcid, scid):
434 ui.status(_("updating bookmark %s\n") % b)
434 ui.status(_("updating bookmark %s\n") % b)
435 else:
435 else:
436 ui.warn(_('updating bookmark %s failed!\n') % b)
436 ui.warn(_('updating bookmark %s failed!\n') % b)
437
437
438 class pulloperation(object):
438 class pulloperation(object):
439 """A object that represent a single pull operation
439 """A object that represent a single pull operation
440
440
441 It purpose is to carry push related state and very common operation.
441 It purpose is to carry push related state and very common operation.
442
442
443 A new should be created at the beginning of each pull and discarded
443 A new should be created at the beginning of each pull and discarded
444 afterward.
444 afterward.
445 """
445 """
446
446
447 def __init__(self, repo, remote, heads=None, force=False):
447 def __init__(self, repo, remote, heads=None, force=False):
448 # repo we pull into
448 # repo we pull into
449 self.repo = repo
449 self.repo = repo
450 # repo we pull from
450 # repo we pull from
451 self.remote = remote
451 self.remote = remote
452 # revision we try to pull (None is "all")
452 # revision we try to pull (None is "all")
453 self.heads = heads
453 self.heads = heads
454 # do we force pull?
454 # do we force pull?
455 self.force = force
455 self.force = force
456 # the name the pull transaction
456 # the name the pull transaction
457 self._trname = 'pull\n' + util.hidepassword(remote.url())
457 self._trname = 'pull\n' + util.hidepassword(remote.url())
458 # hold the transaction once created
458 # hold the transaction once created
459 self._tr = None
459 self._tr = None
460 # set of common changeset between local and remote before pull
460 # set of common changeset between local and remote before pull
461 self.common = None
461 self.common = None
462 # set of pulled head
462 # set of pulled head
463 self.rheads = None
463 self.rheads = None
464 # list of missing changeset to fetch remotely
464 # list of missing changeset to fetch remotely
465 self.fetch = None
465 self.fetch = None
466 # result of changegroup pulling (used as return code by pull)
466 # result of changegroup pulling (used as return code by pull)
467 self.cgresult = None
467 self.cgresult = None
468 # list of step remaining todo (related to future bundle2 usage)
468 # list of step remaining todo (related to future bundle2 usage)
469 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
469 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
470
470
471 @util.propertycache
471 @util.propertycache
472 def pulledsubset(self):
472 def pulledsubset(self):
473 """heads of the set of changeset target by the pull"""
473 """heads of the set of changeset target by the pull"""
474 # compute target subset
474 # compute target subset
475 if self.heads is None:
475 if self.heads is None:
476 # We pulled every thing possible
476 # We pulled every thing possible
477 # sync on everything common
477 # sync on everything common
478 c = set(self.common)
478 c = set(self.common)
479 ret = list(self.common)
479 ret = list(self.common)
480 for n in self.rheads:
480 for n in self.rheads:
481 if n not in c:
481 if n not in c:
482 ret.append(n)
482 ret.append(n)
483 return ret
483 return ret
484 else:
484 else:
485 # We pulled a specific subset
485 # We pulled a specific subset
486 # sync on this subset
486 # sync on this subset
487 return self.heads
487 return self.heads
488
488
489 def gettransaction(self):
489 def gettransaction(self):
490 """get appropriate pull transaction, creating it if needed"""
490 """get appropriate pull transaction, creating it if needed"""
491 if self._tr is None:
491 if self._tr is None:
492 self._tr = self.repo.transaction(self._trname)
492 self._tr = self.repo.transaction(self._trname)
493 return self._tr
493 return self._tr
494
494
495 def closetransaction(self):
495 def closetransaction(self):
496 """close transaction if created"""
496 """close transaction if created"""
497 if self._tr is not None:
497 if self._tr is not None:
498 self._tr.close()
498 self._tr.close()
499
499
500 def releasetransaction(self):
500 def releasetransaction(self):
501 """release transaction if created"""
501 """release transaction if created"""
502 if self._tr is not None:
502 if self._tr is not None:
503 self._tr.release()
503 self._tr.release()
504
504
505 def pull(repo, remote, heads=None, force=False):
505 def pull(repo, remote, heads=None, force=False):
506 pullop = pulloperation(repo, remote, heads, force)
506 pullop = pulloperation(repo, remote, heads, force)
507 if pullop.remote.local():
507 if pullop.remote.local():
508 missing = set(pullop.remote.requirements) - pullop.repo.supported
508 missing = set(pullop.remote.requirements) - pullop.repo.supported
509 if missing:
509 if missing:
510 msg = _("required features are not"
510 msg = _("required features are not"
511 " supported in the destination:"
511 " supported in the destination:"
512 " %s") % (', '.join(sorted(missing)))
512 " %s") % (', '.join(sorted(missing)))
513 raise util.Abort(msg)
513 raise util.Abort(msg)
514
514
515 lock = pullop.repo.lock()
515 lock = pullop.repo.lock()
516 try:
516 try:
517 _pulldiscovery(pullop)
517 _pulldiscovery(pullop)
518 if pullop.remote.capable('bundle2'):
518 if pullop.remote.capable('bundle2'):
519 _pullbundle2(pullop)
519 _pullbundle2(pullop)
520 if 'changegroup' in pullop.todosteps:
520 if 'changegroup' in pullop.todosteps:
521 _pullchangeset(pullop)
521 _pullchangeset(pullop)
522 if 'phases' in pullop.todosteps:
522 if 'phases' in pullop.todosteps:
523 _pullphase(pullop)
523 _pullphase(pullop)
524 if 'obsmarkers' in pullop.todosteps:
524 if 'obsmarkers' in pullop.todosteps:
525 _pullobsolete(pullop)
525 _pullobsolete(pullop)
526 pullop.closetransaction()
526 pullop.closetransaction()
527 finally:
527 finally:
528 pullop.releasetransaction()
528 pullop.releasetransaction()
529 lock.release()
529 lock.release()
530
530
531 return pullop.cgresult
531 return pullop.cgresult
532
532
533 def _pulldiscovery(pullop):
533 def _pulldiscovery(pullop):
534 """discovery phase for the pull
534 """discovery phase for the pull
535
535
536 Current handle changeset discovery only, will change handle all discovery
536 Current handle changeset discovery only, will change handle all discovery
537 at some point."""
537 at some point."""
538 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
538 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
539 pullop.remote,
539 pullop.remote,
540 heads=pullop.heads,
540 heads=pullop.heads,
541 force=pullop.force)
541 force=pullop.force)
542 pullop.common, pullop.fetch, pullop.rheads = tmp
542 pullop.common, pullop.fetch, pullop.rheads = tmp
543
543
544 def _pullbundle2(pullop):
544 def _pullbundle2(pullop):
545 """pull data using bundle2
545 """pull data using bundle2
546
546
547 For now, the only supported data are changegroup."""
547 For now, the only supported data are changegroup."""
548 kwargs = {'bundlecaps': set(['HG20'])}
548 kwargs = {'bundlecaps': set(['HG20'])}
549 capsblob = bundle2.encodecaps(pullop.repo.bundle2caps)
550 kwargs['bundlecaps'].add('bundle2=' + urllib.quote(capsblob))
549 # pulling changegroup
551 # pulling changegroup
550 pullop.todosteps.remove('changegroup')
552 pullop.todosteps.remove('changegroup')
551 if not pullop.fetch:
553 if not pullop.fetch:
552 pullop.repo.ui.status(_("no changes found\n"))
554 pullop.repo.ui.status(_("no changes found\n"))
553 pullop.cgresult = 0
555 pullop.cgresult = 0
554 else:
556 else:
555 kwargs['common'] = pullop.common
557 kwargs['common'] = pullop.common
556 kwargs['heads'] = pullop.heads or pullop.rheads
558 kwargs['heads'] = pullop.heads or pullop.rheads
557 if pullop.heads is None and list(pullop.common) == [nullid]:
559 if pullop.heads is None and list(pullop.common) == [nullid]:
558 pullop.repo.ui.status(_("requesting all changes\n"))
560 pullop.repo.ui.status(_("requesting all changes\n"))
559 if kwargs.keys() == ['format']:
561 if kwargs.keys() == ['format']:
560 return # nothing to pull
562 return # nothing to pull
561 bundle = pullop.remote.getbundle('pull', **kwargs)
563 bundle = pullop.remote.getbundle('pull', **kwargs)
562 try:
564 try:
563 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
565 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
564 except KeyError, exc:
566 except KeyError, exc:
565 raise util.Abort('missing support for %s' % exc)
567 raise util.Abort('missing support for %s' % exc)
566 assert len(op.records['changegroup']) == 1
568 assert len(op.records['changegroup']) == 1
567 pullop.cgresult = op.records['changegroup'][0]['return']
569 pullop.cgresult = op.records['changegroup'][0]['return']
568
570
569 def _pullchangeset(pullop):
571 def _pullchangeset(pullop):
570 """pull changeset from unbundle into the local repo"""
572 """pull changeset from unbundle into the local repo"""
571 # We delay the open of the transaction as late as possible so we
573 # We delay the open of the transaction as late as possible so we
572 # don't open transaction for nothing or you break future useful
574 # don't open transaction for nothing or you break future useful
573 # rollback call
575 # rollback call
574 pullop.todosteps.remove('changegroup')
576 pullop.todosteps.remove('changegroup')
575 if not pullop.fetch:
577 if not pullop.fetch:
576 pullop.repo.ui.status(_("no changes found\n"))
578 pullop.repo.ui.status(_("no changes found\n"))
577 pullop.cgresult = 0
579 pullop.cgresult = 0
578 return
580 return
579 pullop.gettransaction()
581 pullop.gettransaction()
580 if pullop.heads is None and list(pullop.common) == [nullid]:
582 if pullop.heads is None and list(pullop.common) == [nullid]:
581 pullop.repo.ui.status(_("requesting all changes\n"))
583 pullop.repo.ui.status(_("requesting all changes\n"))
582 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
584 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
583 # issue1320, avoid a race if remote changed after discovery
585 # issue1320, avoid a race if remote changed after discovery
584 pullop.heads = pullop.rheads
586 pullop.heads = pullop.rheads
585
587
586 if pullop.remote.capable('getbundle'):
588 if pullop.remote.capable('getbundle'):
587 # TODO: get bundlecaps from remote
589 # TODO: get bundlecaps from remote
588 cg = pullop.remote.getbundle('pull', common=pullop.common,
590 cg = pullop.remote.getbundle('pull', common=pullop.common,
589 heads=pullop.heads or pullop.rheads)
591 heads=pullop.heads or pullop.rheads)
590 elif pullop.heads is None:
592 elif pullop.heads is None:
591 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
593 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
592 elif not pullop.remote.capable('changegroupsubset'):
594 elif not pullop.remote.capable('changegroupsubset'):
593 raise util.Abort(_("partial pull cannot be done because "
595 raise util.Abort(_("partial pull cannot be done because "
594 "other repository doesn't support "
596 "other repository doesn't support "
595 "changegroupsubset."))
597 "changegroupsubset."))
596 else:
598 else:
597 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
599 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
598 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
600 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
599 pullop.remote.url())
601 pullop.remote.url())
600
602
601 def _pullphase(pullop):
603 def _pullphase(pullop):
602 # Get remote phases data from remote
604 # Get remote phases data from remote
603 pullop.todosteps.remove('phases')
605 pullop.todosteps.remove('phases')
604 remotephases = pullop.remote.listkeys('phases')
606 remotephases = pullop.remote.listkeys('phases')
605 publishing = bool(remotephases.get('publishing', False))
607 publishing = bool(remotephases.get('publishing', False))
606 if remotephases and not publishing:
608 if remotephases and not publishing:
607 # remote is new and unpublishing
609 # remote is new and unpublishing
608 pheads, _dr = phases.analyzeremotephases(pullop.repo,
610 pheads, _dr = phases.analyzeremotephases(pullop.repo,
609 pullop.pulledsubset,
611 pullop.pulledsubset,
610 remotephases)
612 remotephases)
611 phases.advanceboundary(pullop.repo, phases.public, pheads)
613 phases.advanceboundary(pullop.repo, phases.public, pheads)
612 phases.advanceboundary(pullop.repo, phases.draft,
614 phases.advanceboundary(pullop.repo, phases.draft,
613 pullop.pulledsubset)
615 pullop.pulledsubset)
614 else:
616 else:
615 # Remote is old or publishing all common changesets
617 # Remote is old or publishing all common changesets
616 # should be seen as public
618 # should be seen as public
617 phases.advanceboundary(pullop.repo, phases.public,
619 phases.advanceboundary(pullop.repo, phases.public,
618 pullop.pulledsubset)
620 pullop.pulledsubset)
619
621
620 def _pullobsolete(pullop):
622 def _pullobsolete(pullop):
621 """utility function to pull obsolete markers from a remote
623 """utility function to pull obsolete markers from a remote
622
624
623 The `gettransaction` is function that return the pull transaction, creating
625 The `gettransaction` is function that return the pull transaction, creating
624 one if necessary. We return the transaction to inform the calling code that
626 one if necessary. We return the transaction to inform the calling code that
625 a new transaction have been created (when applicable).
627 a new transaction have been created (when applicable).
626
628
627 Exists mostly to allow overriding for experimentation purpose"""
629 Exists mostly to allow overriding for experimentation purpose"""
628 pullop.todosteps.remove('obsmarkers')
630 pullop.todosteps.remove('obsmarkers')
629 tr = None
631 tr = None
630 if obsolete._enabled:
632 if obsolete._enabled:
631 pullop.repo.ui.debug('fetching remote obsolete markers\n')
633 pullop.repo.ui.debug('fetching remote obsolete markers\n')
632 remoteobs = pullop.remote.listkeys('obsolete')
634 remoteobs = pullop.remote.listkeys('obsolete')
633 if 'dump0' in remoteobs:
635 if 'dump0' in remoteobs:
634 tr = pullop.gettransaction()
636 tr = pullop.gettransaction()
635 for key in sorted(remoteobs, reverse=True):
637 for key in sorted(remoteobs, reverse=True):
636 if key.startswith('dump'):
638 if key.startswith('dump'):
637 data = base85.b85decode(remoteobs[key])
639 data = base85.b85decode(remoteobs[key])
638 pullop.repo.obsstore.mergemarkers(tr, data)
640 pullop.repo.obsstore.mergemarkers(tr, data)
639 pullop.repo.invalidatevolatilesets()
641 pullop.repo.invalidatevolatilesets()
640 return tr
642 return tr
641
643
642 def getbundle(repo, source, heads=None, common=None, bundlecaps=None):
644 def getbundle(repo, source, heads=None, common=None, bundlecaps=None):
643 """return a full bundle (with potentially multiple kind of parts)
645 """return a full bundle (with potentially multiple kind of parts)
644
646
645 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
647 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
646 passed. For now, the bundle can contain only changegroup, but this will
648 passed. For now, the bundle can contain only changegroup, but this will
647 changes when more part type will be available for bundle2.
649 changes when more part type will be available for bundle2.
648
650
649 This is different from changegroup.getbundle that only returns an HG10
651 This is different from changegroup.getbundle that only returns an HG10
650 changegroup bundle. They may eventually get reunited in the future when we
652 changegroup bundle. They may eventually get reunited in the future when we
651 have a clearer idea of the API we what to query different data.
653 have a clearer idea of the API we what to query different data.
652
654
653 The implementation is at a very early stage and will get massive rework
655 The implementation is at a very early stage and will get massive rework
654 when the API of bundle is refined.
656 when the API of bundle is refined.
655 """
657 """
656 # build bundle here.
658 # build bundle here.
657 cg = changegroup.getbundle(repo, source, heads=heads,
659 cg = changegroup.getbundle(repo, source, heads=heads,
658 common=common, bundlecaps=bundlecaps)
660 common=common, bundlecaps=bundlecaps)
659 if bundlecaps is None or 'HG20' not in bundlecaps:
661 if bundlecaps is None or 'HG20' not in bundlecaps:
660 return cg
662 return cg
661 # very crude first implementation,
663 # very crude first implementation,
662 # the bundle API will change and the generation will be done lazily.
664 # the bundle API will change and the generation will be done lazily.
663 bundler = bundle2.bundle20(repo.ui)
665 b2caps = {}
666 for bcaps in bundlecaps:
667 if bcaps.startswith('bundle2='):
668 blob = urllib.unquote(bcaps[len('bundle2='):])
669 b2caps.update(bundle2.decodecaps(blob))
670 bundler = bundle2.bundle20(repo.ui, b2caps)
664 part = bundle2.bundlepart('changegroup', data=cg.getchunks())
671 part = bundle2.bundlepart('changegroup', data=cg.getchunks())
665 bundler.addpart(part)
672 bundler.addpart(part)
666 return util.chunkbuffer(bundler.getchunks())
673 return util.chunkbuffer(bundler.getchunks())
667
674
668 class PushRaced(RuntimeError):
675 class PushRaced(RuntimeError):
669 """An exception raised during unbundling that indicate a push race"""
676 """An exception raised during unbundling that indicate a push race"""
670
677
671 def check_heads(repo, their_heads, context):
678 def check_heads(repo, their_heads, context):
672 """check if the heads of a repo have been modified
679 """check if the heads of a repo have been modified
673
680
674 Used by peer for unbundling.
681 Used by peer for unbundling.
675 """
682 """
676 heads = repo.heads()
683 heads = repo.heads()
677 heads_hash = util.sha1(''.join(sorted(heads))).digest()
684 heads_hash = util.sha1(''.join(sorted(heads))).digest()
678 if not (their_heads == ['force'] or their_heads == heads or
685 if not (their_heads == ['force'] or their_heads == heads or
679 their_heads == ['hashed', heads_hash]):
686 their_heads == ['hashed', heads_hash]):
680 # someone else committed/pushed/unbundled while we
687 # someone else committed/pushed/unbundled while we
681 # were transferring data
688 # were transferring data
682 raise PushRaced('repository changed while %s - '
689 raise PushRaced('repository changed while %s - '
683 'please try again' % context)
690 'please try again' % context)
684
691
685 def unbundle(repo, cg, heads, source, url):
692 def unbundle(repo, cg, heads, source, url):
686 """Apply a bundle to a repo.
693 """Apply a bundle to a repo.
687
694
688 this function makes sure the repo is locked during the application and have
695 this function makes sure the repo is locked during the application and have
689 mechanism to check that no push race occurred between the creation of the
696 mechanism to check that no push race occurred between the creation of the
690 bundle and its application.
697 bundle and its application.
691
698
692 If the push was raced as PushRaced exception is raised."""
699 If the push was raced as PushRaced exception is raised."""
693 r = 0
700 r = 0
694 # need a transaction when processing a bundle2 stream
701 # need a transaction when processing a bundle2 stream
695 tr = None
702 tr = None
696 lock = repo.lock()
703 lock = repo.lock()
697 try:
704 try:
698 check_heads(repo, heads, 'uploading changes')
705 check_heads(repo, heads, 'uploading changes')
699 # push can proceed
706 # push can proceed
700 if util.safehasattr(cg, 'params'):
707 if util.safehasattr(cg, 'params'):
701 tr = repo.transaction('unbundle')
708 tr = repo.transaction('unbundle')
702 r = bundle2.processbundle(repo, cg, lambda: tr).reply
709 r = bundle2.processbundle(repo, cg, lambda: tr).reply
703 tr.close()
710 tr.close()
704 else:
711 else:
705 r = changegroup.addchangegroup(repo, cg, source, url)
712 r = changegroup.addchangegroup(repo, cg, source, url)
706 finally:
713 finally:
707 if tr is not None:
714 if tr is not None:
708 tr.release()
715 tr.release()
709 lock.release()
716 lock.release()
710 return r
717 return r
General Comments 0
You need to be logged in to leave comments. Login now