##// END OF EJS Templates
bundle2: add a way to add parts during a `getbundle` request...
Pierre-Yves David -
r21158:8f6530b6 default
parent child Browse files
Show More
@@ -1,745 +1,752 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno, urllib
10 import errno, urllib
11 import util, scmutil, changegroup, base85
11 import util, scmutil, changegroup, base85
12 import discovery, phases, obsolete, bookmarks, bundle2
12 import discovery, phases, obsolete, bookmarks, bundle2
13
13
14 def readbundle(ui, fh, fname, vfs=None):
14 def readbundle(ui, fh, fname, vfs=None):
15 header = changegroup.readexactly(fh, 4)
15 header = changegroup.readexactly(fh, 4)
16
16
17 alg = None
17 alg = None
18 if not fname:
18 if not fname:
19 fname = "stream"
19 fname = "stream"
20 if not header.startswith('HG') and header.startswith('\0'):
20 if not header.startswith('HG') and header.startswith('\0'):
21 fh = changegroup.headerlessfixup(fh, header)
21 fh = changegroup.headerlessfixup(fh, header)
22 header = "HG10"
22 header = "HG10"
23 alg = 'UN'
23 alg = 'UN'
24 elif vfs:
24 elif vfs:
25 fname = vfs.join(fname)
25 fname = vfs.join(fname)
26
26
27 magic, version = header[0:2], header[2:4]
27 magic, version = header[0:2], header[2:4]
28
28
29 if magic != 'HG':
29 if magic != 'HG':
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 if version == '10':
31 if version == '10':
32 if alg is None:
32 if alg is None:
33 alg = changegroup.readexactly(fh, 2)
33 alg = changegroup.readexactly(fh, 2)
34 return changegroup.unbundle10(fh, alg)
34 return changegroup.unbundle10(fh, alg)
35 elif version == '2X':
35 elif version == '2X':
36 return bundle2.unbundle20(ui, fh, header=magic + version)
36 return bundle2.unbundle20(ui, fh, header=magic + version)
37 else:
37 else:
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39
39
40
40
41 class pushoperation(object):
41 class pushoperation(object):
42 """A object that represent a single push operation
42 """A object that represent a single push operation
43
43
44 It purpose is to carry push related state and very common operation.
44 It purpose is to carry push related state and very common operation.
45
45
46 A new should be created at the beginning of each push and discarded
46 A new should be created at the beginning of each push and discarded
47 afterward.
47 afterward.
48 """
48 """
49
49
50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
51 # repo we push from
51 # repo we push from
52 self.repo = repo
52 self.repo = repo
53 self.ui = repo.ui
53 self.ui = repo.ui
54 # repo we push to
54 # repo we push to
55 self.remote = remote
55 self.remote = remote
56 # force option provided
56 # force option provided
57 self.force = force
57 self.force = force
58 # revs to be pushed (None is "all")
58 # revs to be pushed (None is "all")
59 self.revs = revs
59 self.revs = revs
60 # allow push of new branch
60 # allow push of new branch
61 self.newbranch = newbranch
61 self.newbranch = newbranch
62 # did a local lock get acquired?
62 # did a local lock get acquired?
63 self.locallocked = None
63 self.locallocked = None
64 # Integer version of the push result
64 # Integer version of the push result
65 # - None means nothing to push
65 # - None means nothing to push
66 # - 0 means HTTP error
66 # - 0 means HTTP error
67 # - 1 means we pushed and remote head count is unchanged *or*
67 # - 1 means we pushed and remote head count is unchanged *or*
68 # we have outgoing changesets but refused to push
68 # we have outgoing changesets but refused to push
69 # - other values as described by addchangegroup()
69 # - other values as described by addchangegroup()
70 self.ret = None
70 self.ret = None
71 # discover.outgoing object (contains common and outgoing data)
71 # discover.outgoing object (contains common and outgoing data)
72 self.outgoing = None
72 self.outgoing = None
73 # all remote heads before the push
73 # all remote heads before the push
74 self.remoteheads = None
74 self.remoteheads = None
75 # testable as a boolean indicating if any nodes are missing locally.
75 # testable as a boolean indicating if any nodes are missing locally.
76 self.incoming = None
76 self.incoming = None
77 # set of all heads common after changeset bundle push
77 # set of all heads common after changeset bundle push
78 self.commonheads = None
78 self.commonheads = None
79
79
80 def push(repo, remote, force=False, revs=None, newbranch=False):
80 def push(repo, remote, force=False, revs=None, newbranch=False):
81 '''Push outgoing changesets (limited by revs) from a local
81 '''Push outgoing changesets (limited by revs) from a local
82 repository to remote. Return an integer:
82 repository to remote. Return an integer:
83 - None means nothing to push
83 - None means nothing to push
84 - 0 means HTTP error
84 - 0 means HTTP error
85 - 1 means we pushed and remote head count is unchanged *or*
85 - 1 means we pushed and remote head count is unchanged *or*
86 we have outgoing changesets but refused to push
86 we have outgoing changesets but refused to push
87 - other values as described by addchangegroup()
87 - other values as described by addchangegroup()
88 '''
88 '''
89 pushop = pushoperation(repo, remote, force, revs, newbranch)
89 pushop = pushoperation(repo, remote, force, revs, newbranch)
90 if pushop.remote.local():
90 if pushop.remote.local():
91 missing = (set(pushop.repo.requirements)
91 missing = (set(pushop.repo.requirements)
92 - pushop.remote.local().supported)
92 - pushop.remote.local().supported)
93 if missing:
93 if missing:
94 msg = _("required features are not"
94 msg = _("required features are not"
95 " supported in the destination:"
95 " supported in the destination:"
96 " %s") % (', '.join(sorted(missing)))
96 " %s") % (', '.join(sorted(missing)))
97 raise util.Abort(msg)
97 raise util.Abort(msg)
98
98
99 # there are two ways to push to remote repo:
99 # there are two ways to push to remote repo:
100 #
100 #
101 # addchangegroup assumes local user can lock remote
101 # addchangegroup assumes local user can lock remote
102 # repo (local filesystem, old ssh servers).
102 # repo (local filesystem, old ssh servers).
103 #
103 #
104 # unbundle assumes local user cannot lock remote repo (new ssh
104 # unbundle assumes local user cannot lock remote repo (new ssh
105 # servers, http servers).
105 # servers, http servers).
106
106
107 if not pushop.remote.canpush():
107 if not pushop.remote.canpush():
108 raise util.Abort(_("destination does not support push"))
108 raise util.Abort(_("destination does not support push"))
109 # get local lock as we might write phase data
109 # get local lock as we might write phase data
110 locallock = None
110 locallock = None
111 try:
111 try:
112 locallock = pushop.repo.lock()
112 locallock = pushop.repo.lock()
113 pushop.locallocked = True
113 pushop.locallocked = True
114 except IOError, err:
114 except IOError, err:
115 pushop.locallocked = False
115 pushop.locallocked = False
116 if err.errno != errno.EACCES:
116 if err.errno != errno.EACCES:
117 raise
117 raise
118 # source repo cannot be locked.
118 # source repo cannot be locked.
119 # We do not abort the push, but just disable the local phase
119 # We do not abort the push, but just disable the local phase
120 # synchronisation.
120 # synchronisation.
121 msg = 'cannot lock source repository: %s\n' % err
121 msg = 'cannot lock source repository: %s\n' % err
122 pushop.ui.debug(msg)
122 pushop.ui.debug(msg)
123 try:
123 try:
124 pushop.repo.checkpush(pushop)
124 pushop.repo.checkpush(pushop)
125 lock = None
125 lock = None
126 unbundle = pushop.remote.capable('unbundle')
126 unbundle = pushop.remote.capable('unbundle')
127 if not unbundle:
127 if not unbundle:
128 lock = pushop.remote.lock()
128 lock = pushop.remote.lock()
129 try:
129 try:
130 _pushdiscovery(pushop)
130 _pushdiscovery(pushop)
131 if _pushcheckoutgoing(pushop):
131 if _pushcheckoutgoing(pushop):
132 pushop.repo.prepushoutgoinghooks(pushop.repo,
132 pushop.repo.prepushoutgoinghooks(pushop.repo,
133 pushop.remote,
133 pushop.remote,
134 pushop.outgoing)
134 pushop.outgoing)
135 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
135 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
136 False)
136 False)
137 and pushop.remote.capable('bundle2-exp')):
137 and pushop.remote.capable('bundle2-exp')):
138 _pushbundle2(pushop)
138 _pushbundle2(pushop)
139 else:
139 else:
140 _pushchangeset(pushop)
140 _pushchangeset(pushop)
141 _pushcomputecommonheads(pushop)
141 _pushcomputecommonheads(pushop)
142 _pushsyncphase(pushop)
142 _pushsyncphase(pushop)
143 _pushobsolete(pushop)
143 _pushobsolete(pushop)
144 finally:
144 finally:
145 if lock is not None:
145 if lock is not None:
146 lock.release()
146 lock.release()
147 finally:
147 finally:
148 if locallock is not None:
148 if locallock is not None:
149 locallock.release()
149 locallock.release()
150
150
151 _pushbookmark(pushop)
151 _pushbookmark(pushop)
152 return pushop.ret
152 return pushop.ret
153
153
154 def _pushdiscovery(pushop):
154 def _pushdiscovery(pushop):
155 # discovery
155 # discovery
156 unfi = pushop.repo.unfiltered()
156 unfi = pushop.repo.unfiltered()
157 fci = discovery.findcommonincoming
157 fci = discovery.findcommonincoming
158 commoninc = fci(unfi, pushop.remote, force=pushop.force)
158 commoninc = fci(unfi, pushop.remote, force=pushop.force)
159 common, inc, remoteheads = commoninc
159 common, inc, remoteheads = commoninc
160 fco = discovery.findcommonoutgoing
160 fco = discovery.findcommonoutgoing
161 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
161 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
162 commoninc=commoninc, force=pushop.force)
162 commoninc=commoninc, force=pushop.force)
163 pushop.outgoing = outgoing
163 pushop.outgoing = outgoing
164 pushop.remoteheads = remoteheads
164 pushop.remoteheads = remoteheads
165 pushop.incoming = inc
165 pushop.incoming = inc
166
166
167 def _pushcheckoutgoing(pushop):
167 def _pushcheckoutgoing(pushop):
168 outgoing = pushop.outgoing
168 outgoing = pushop.outgoing
169 unfi = pushop.repo.unfiltered()
169 unfi = pushop.repo.unfiltered()
170 if not outgoing.missing:
170 if not outgoing.missing:
171 # nothing to push
171 # nothing to push
172 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
172 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
173 return False
173 return False
174 # something to push
174 # something to push
175 if not pushop.force:
175 if not pushop.force:
176 # if repo.obsstore == False --> no obsolete
176 # if repo.obsstore == False --> no obsolete
177 # then, save the iteration
177 # then, save the iteration
178 if unfi.obsstore:
178 if unfi.obsstore:
179 # this message are here for 80 char limit reason
179 # this message are here for 80 char limit reason
180 mso = _("push includes obsolete changeset: %s!")
180 mso = _("push includes obsolete changeset: %s!")
181 mst = "push includes %s changeset: %s!"
181 mst = "push includes %s changeset: %s!"
182 # plain versions for i18n tool to detect them
182 # plain versions for i18n tool to detect them
183 _("push includes unstable changeset: %s!")
183 _("push includes unstable changeset: %s!")
184 _("push includes bumped changeset: %s!")
184 _("push includes bumped changeset: %s!")
185 _("push includes divergent changeset: %s!")
185 _("push includes divergent changeset: %s!")
186 # If we are to push if there is at least one
186 # If we are to push if there is at least one
187 # obsolete or unstable changeset in missing, at
187 # obsolete or unstable changeset in missing, at
188 # least one of the missinghead will be obsolete or
188 # least one of the missinghead will be obsolete or
189 # unstable. So checking heads only is ok
189 # unstable. So checking heads only is ok
190 for node in outgoing.missingheads:
190 for node in outgoing.missingheads:
191 ctx = unfi[node]
191 ctx = unfi[node]
192 if ctx.obsolete():
192 if ctx.obsolete():
193 raise util.Abort(mso % ctx)
193 raise util.Abort(mso % ctx)
194 elif ctx.troubled():
194 elif ctx.troubled():
195 raise util.Abort(_(mst)
195 raise util.Abort(_(mst)
196 % (ctx.troubles()[0],
196 % (ctx.troubles()[0],
197 ctx))
197 ctx))
198 newbm = pushop.ui.configlist('bookmarks', 'pushing')
198 newbm = pushop.ui.configlist('bookmarks', 'pushing')
199 discovery.checkheads(unfi, pushop.remote, outgoing,
199 discovery.checkheads(unfi, pushop.remote, outgoing,
200 pushop.remoteheads,
200 pushop.remoteheads,
201 pushop.newbranch,
201 pushop.newbranch,
202 bool(pushop.incoming),
202 bool(pushop.incoming),
203 newbm)
203 newbm)
204 return True
204 return True
205
205
206 def _pushbundle2(pushop):
206 def _pushbundle2(pushop):
207 """push data to the remote using bundle2
207 """push data to the remote using bundle2
208
208
209 The only currently supported type of data is changegroup but this will
209 The only currently supported type of data is changegroup but this will
210 evolve in the future."""
210 evolve in the future."""
211 # Send known head to the server for race detection.
211 # Send known head to the server for race detection.
212 capsblob = urllib.unquote(pushop.remote.capable('bundle2-exp'))
212 capsblob = urllib.unquote(pushop.remote.capable('bundle2-exp'))
213 caps = bundle2.decodecaps(capsblob)
213 caps = bundle2.decodecaps(capsblob)
214 bundler = bundle2.bundle20(pushop.ui, caps)
214 bundler = bundle2.bundle20(pushop.ui, caps)
215 # create reply capability
215 # create reply capability
216 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
216 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
217 bundler.addpart(bundle2.bundlepart('b2x:replycaps', data=capsblob))
217 bundler.addpart(bundle2.bundlepart('b2x:replycaps', data=capsblob))
218 if not pushop.force:
218 if not pushop.force:
219 part = bundle2.bundlepart('B2X:CHECK:HEADS',
219 part = bundle2.bundlepart('B2X:CHECK:HEADS',
220 data=iter(pushop.remoteheads))
220 data=iter(pushop.remoteheads))
221 bundler.addpart(part)
221 bundler.addpart(part)
222 extrainfo = _pushbundle2extraparts(pushop, bundler)
222 extrainfo = _pushbundle2extraparts(pushop, bundler)
223 # add the changegroup bundle
223 # add the changegroup bundle
224 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
224 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
225 cgpart = bundle2.bundlepart('B2X:CHANGEGROUP', data=cg.getchunks())
225 cgpart = bundle2.bundlepart('B2X:CHANGEGROUP', data=cg.getchunks())
226 bundler.addpart(cgpart)
226 bundler.addpart(cgpart)
227 stream = util.chunkbuffer(bundler.getchunks())
227 stream = util.chunkbuffer(bundler.getchunks())
228 reply = pushop.remote.unbundle(stream, ['force'], 'push')
228 reply = pushop.remote.unbundle(stream, ['force'], 'push')
229 try:
229 try:
230 op = bundle2.processbundle(pushop.repo, reply)
230 op = bundle2.processbundle(pushop.repo, reply)
231 except KeyError, exc:
231 except KeyError, exc:
232 raise util.Abort('missing support for %s' % exc)
232 raise util.Abort('missing support for %s' % exc)
233 cgreplies = op.records.getreplies(cgpart.id)
233 cgreplies = op.records.getreplies(cgpart.id)
234 assert len(cgreplies['changegroup']) == 1
234 assert len(cgreplies['changegroup']) == 1
235 pushop.ret = cgreplies['changegroup'][0]['return']
235 pushop.ret = cgreplies['changegroup'][0]['return']
236 _pushbundle2extrareply(pushop, op, extrainfo)
236 _pushbundle2extrareply(pushop, op, extrainfo)
237
237
238 def _pushbundle2extraparts(pushop, bundler):
238 def _pushbundle2extraparts(pushop, bundler):
239 """hook function to let extensions add parts
239 """hook function to let extensions add parts
240
240
241 Return a dict to let extensions pass data to the reply processing.
241 Return a dict to let extensions pass data to the reply processing.
242 """
242 """
243 return {}
243 return {}
244
244
245 def _pushbundle2extrareply(pushop, op, extrainfo):
245 def _pushbundle2extrareply(pushop, op, extrainfo):
246 """hook function to let extensions react to part replies
246 """hook function to let extensions react to part replies
247
247
248 The dict from _pushbundle2extrareply is fed to this function.
248 The dict from _pushbundle2extrareply is fed to this function.
249 """
249 """
250 pass
250 pass
251
251
252 def _pushchangeset(pushop):
252 def _pushchangeset(pushop):
253 """Make the actual push of changeset bundle to remote repo"""
253 """Make the actual push of changeset bundle to remote repo"""
254 outgoing = pushop.outgoing
254 outgoing = pushop.outgoing
255 unbundle = pushop.remote.capable('unbundle')
255 unbundle = pushop.remote.capable('unbundle')
256 # TODO: get bundlecaps from remote
256 # TODO: get bundlecaps from remote
257 bundlecaps = None
257 bundlecaps = None
258 # create a changegroup from local
258 # create a changegroup from local
259 if pushop.revs is None and not (outgoing.excluded
259 if pushop.revs is None and not (outgoing.excluded
260 or pushop.repo.changelog.filteredrevs):
260 or pushop.repo.changelog.filteredrevs):
261 # push everything,
261 # push everything,
262 # use the fast path, no race possible on push
262 # use the fast path, no race possible on push
263 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
263 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
264 cg = changegroup.getsubset(pushop.repo,
264 cg = changegroup.getsubset(pushop.repo,
265 outgoing,
265 outgoing,
266 bundler,
266 bundler,
267 'push',
267 'push',
268 fastpath=True)
268 fastpath=True)
269 else:
269 else:
270 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
270 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
271 bundlecaps)
271 bundlecaps)
272
272
273 # apply changegroup to remote
273 # apply changegroup to remote
274 if unbundle:
274 if unbundle:
275 # local repo finds heads on server, finds out what
275 # local repo finds heads on server, finds out what
276 # revs it must push. once revs transferred, if server
276 # revs it must push. once revs transferred, if server
277 # finds it has different heads (someone else won
277 # finds it has different heads (someone else won
278 # commit/push race), server aborts.
278 # commit/push race), server aborts.
279 if pushop.force:
279 if pushop.force:
280 remoteheads = ['force']
280 remoteheads = ['force']
281 else:
281 else:
282 remoteheads = pushop.remoteheads
282 remoteheads = pushop.remoteheads
283 # ssh: return remote's addchangegroup()
283 # ssh: return remote's addchangegroup()
284 # http: return remote's addchangegroup() or 0 for error
284 # http: return remote's addchangegroup() or 0 for error
285 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
285 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
286 'push')
286 'push')
287 else:
287 else:
288 # we return an integer indicating remote head count
288 # we return an integer indicating remote head count
289 # change
289 # change
290 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
290 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
291
291
292 def _pushcomputecommonheads(pushop):
292 def _pushcomputecommonheads(pushop):
293 unfi = pushop.repo.unfiltered()
293 unfi = pushop.repo.unfiltered()
294 if pushop.ret:
294 if pushop.ret:
295 # push succeed, synchronize target of the push
295 # push succeed, synchronize target of the push
296 cheads = pushop.outgoing.missingheads
296 cheads = pushop.outgoing.missingheads
297 elif pushop.revs is None:
297 elif pushop.revs is None:
298 # All out push fails. synchronize all common
298 # All out push fails. synchronize all common
299 cheads = pushop.outgoing.commonheads
299 cheads = pushop.outgoing.commonheads
300 else:
300 else:
301 # I want cheads = heads(::missingheads and ::commonheads)
301 # I want cheads = heads(::missingheads and ::commonheads)
302 # (missingheads is revs with secret changeset filtered out)
302 # (missingheads is revs with secret changeset filtered out)
303 #
303 #
304 # This can be expressed as:
304 # This can be expressed as:
305 # cheads = ( (missingheads and ::commonheads)
305 # cheads = ( (missingheads and ::commonheads)
306 # + (commonheads and ::missingheads))"
306 # + (commonheads and ::missingheads))"
307 # )
307 # )
308 #
308 #
309 # while trying to push we already computed the following:
309 # while trying to push we already computed the following:
310 # common = (::commonheads)
310 # common = (::commonheads)
311 # missing = ((commonheads::missingheads) - commonheads)
311 # missing = ((commonheads::missingheads) - commonheads)
312 #
312 #
313 # We can pick:
313 # We can pick:
314 # * missingheads part of common (::commonheads)
314 # * missingheads part of common (::commonheads)
315 common = set(pushop.outgoing.common)
315 common = set(pushop.outgoing.common)
316 nm = pushop.repo.changelog.nodemap
316 nm = pushop.repo.changelog.nodemap
317 cheads = [node for node in pushop.revs if nm[node] in common]
317 cheads = [node for node in pushop.revs if nm[node] in common]
318 # and
318 # and
319 # * commonheads parents on missing
319 # * commonheads parents on missing
320 revset = unfi.set('%ln and parents(roots(%ln))',
320 revset = unfi.set('%ln and parents(roots(%ln))',
321 pushop.outgoing.commonheads,
321 pushop.outgoing.commonheads,
322 pushop.outgoing.missing)
322 pushop.outgoing.missing)
323 cheads.extend(c.node() for c in revset)
323 cheads.extend(c.node() for c in revset)
324 pushop.commonheads = cheads
324 pushop.commonheads = cheads
325
325
326 def _pushsyncphase(pushop):
326 def _pushsyncphase(pushop):
327 """synchronise phase information locally and remotely"""
327 """synchronise phase information locally and remotely"""
328 unfi = pushop.repo.unfiltered()
328 unfi = pushop.repo.unfiltered()
329 cheads = pushop.commonheads
329 cheads = pushop.commonheads
330 if pushop.ret:
330 if pushop.ret:
331 # push succeed, synchronize target of the push
331 # push succeed, synchronize target of the push
332 cheads = pushop.outgoing.missingheads
332 cheads = pushop.outgoing.missingheads
333 elif pushop.revs is None:
333 elif pushop.revs is None:
334 # All out push fails. synchronize all common
334 # All out push fails. synchronize all common
335 cheads = pushop.outgoing.commonheads
335 cheads = pushop.outgoing.commonheads
336 else:
336 else:
337 # I want cheads = heads(::missingheads and ::commonheads)
337 # I want cheads = heads(::missingheads and ::commonheads)
338 # (missingheads is revs with secret changeset filtered out)
338 # (missingheads is revs with secret changeset filtered out)
339 #
339 #
340 # This can be expressed as:
340 # This can be expressed as:
341 # cheads = ( (missingheads and ::commonheads)
341 # cheads = ( (missingheads and ::commonheads)
342 # + (commonheads and ::missingheads))"
342 # + (commonheads and ::missingheads))"
343 # )
343 # )
344 #
344 #
345 # while trying to push we already computed the following:
345 # while trying to push we already computed the following:
346 # common = (::commonheads)
346 # common = (::commonheads)
347 # missing = ((commonheads::missingheads) - commonheads)
347 # missing = ((commonheads::missingheads) - commonheads)
348 #
348 #
349 # We can pick:
349 # We can pick:
350 # * missingheads part of common (::commonheads)
350 # * missingheads part of common (::commonheads)
351 common = set(pushop.outgoing.common)
351 common = set(pushop.outgoing.common)
352 nm = pushop.repo.changelog.nodemap
352 nm = pushop.repo.changelog.nodemap
353 cheads = [node for node in pushop.revs if nm[node] in common]
353 cheads = [node for node in pushop.revs if nm[node] in common]
354 # and
354 # and
355 # * commonheads parents on missing
355 # * commonheads parents on missing
356 revset = unfi.set('%ln and parents(roots(%ln))',
356 revset = unfi.set('%ln and parents(roots(%ln))',
357 pushop.outgoing.commonheads,
357 pushop.outgoing.commonheads,
358 pushop.outgoing.missing)
358 pushop.outgoing.missing)
359 cheads.extend(c.node() for c in revset)
359 cheads.extend(c.node() for c in revset)
360 pushop.commonheads = cheads
360 pushop.commonheads = cheads
361 # even when we don't push, exchanging phase data is useful
361 # even when we don't push, exchanging phase data is useful
362 remotephases = pushop.remote.listkeys('phases')
362 remotephases = pushop.remote.listkeys('phases')
363 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
363 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
364 and remotephases # server supports phases
364 and remotephases # server supports phases
365 and pushop.ret is None # nothing was pushed
365 and pushop.ret is None # nothing was pushed
366 and remotephases.get('publishing', False)):
366 and remotephases.get('publishing', False)):
367 # When:
367 # When:
368 # - this is a subrepo push
368 # - this is a subrepo push
369 # - and remote support phase
369 # - and remote support phase
370 # - and no changeset was pushed
370 # - and no changeset was pushed
371 # - and remote is publishing
371 # - and remote is publishing
372 # We may be in issue 3871 case!
372 # We may be in issue 3871 case!
373 # We drop the possible phase synchronisation done by
373 # We drop the possible phase synchronisation done by
374 # courtesy to publish changesets possibly locally draft
374 # courtesy to publish changesets possibly locally draft
375 # on the remote.
375 # on the remote.
376 remotephases = {'publishing': 'True'}
376 remotephases = {'publishing': 'True'}
377 if not remotephases: # old server or public only reply from non-publishing
377 if not remotephases: # old server or public only reply from non-publishing
378 _localphasemove(pushop, cheads)
378 _localphasemove(pushop, cheads)
379 # don't push any phase data as there is nothing to push
379 # don't push any phase data as there is nothing to push
380 else:
380 else:
381 ana = phases.analyzeremotephases(pushop.repo, cheads,
381 ana = phases.analyzeremotephases(pushop.repo, cheads,
382 remotephases)
382 remotephases)
383 pheads, droots = ana
383 pheads, droots = ana
384 ### Apply remote phase on local
384 ### Apply remote phase on local
385 if remotephases.get('publishing', False):
385 if remotephases.get('publishing', False):
386 _localphasemove(pushop, cheads)
386 _localphasemove(pushop, cheads)
387 else: # publish = False
387 else: # publish = False
388 _localphasemove(pushop, pheads)
388 _localphasemove(pushop, pheads)
389 _localphasemove(pushop, cheads, phases.draft)
389 _localphasemove(pushop, cheads, phases.draft)
390 ### Apply local phase on remote
390 ### Apply local phase on remote
391
391
392 # Get the list of all revs draft on remote by public here.
392 # Get the list of all revs draft on remote by public here.
393 # XXX Beware that revset break if droots is not strictly
393 # XXX Beware that revset break if droots is not strictly
394 # XXX root we may want to ensure it is but it is costly
394 # XXX root we may want to ensure it is but it is costly
395 outdated = unfi.set('heads((%ln::%ln) and public())',
395 outdated = unfi.set('heads((%ln::%ln) and public())',
396 droots, cheads)
396 droots, cheads)
397 for newremotehead in outdated:
397 for newremotehead in outdated:
398 r = pushop.remote.pushkey('phases',
398 r = pushop.remote.pushkey('phases',
399 newremotehead.hex(),
399 newremotehead.hex(),
400 str(phases.draft),
400 str(phases.draft),
401 str(phases.public))
401 str(phases.public))
402 if not r:
402 if not r:
403 pushop.ui.warn(_('updating %s to public failed!\n')
403 pushop.ui.warn(_('updating %s to public failed!\n')
404 % newremotehead)
404 % newremotehead)
405
405
406 def _localphasemove(pushop, nodes, phase=phases.public):
406 def _localphasemove(pushop, nodes, phase=phases.public):
407 """move <nodes> to <phase> in the local source repo"""
407 """move <nodes> to <phase> in the local source repo"""
408 if pushop.locallocked:
408 if pushop.locallocked:
409 phases.advanceboundary(pushop.repo, phase, nodes)
409 phases.advanceboundary(pushop.repo, phase, nodes)
410 else:
410 else:
411 # repo is not locked, do not change any phases!
411 # repo is not locked, do not change any phases!
412 # Informs the user that phases should have been moved when
412 # Informs the user that phases should have been moved when
413 # applicable.
413 # applicable.
414 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
414 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
415 phasestr = phases.phasenames[phase]
415 phasestr = phases.phasenames[phase]
416 if actualmoves:
416 if actualmoves:
417 pushop.ui.status(_('cannot lock source repo, skipping '
417 pushop.ui.status(_('cannot lock source repo, skipping '
418 'local %s phase update\n') % phasestr)
418 'local %s phase update\n') % phasestr)
419
419
420 def _pushobsolete(pushop):
420 def _pushobsolete(pushop):
421 """utility function to push obsolete markers to a remote"""
421 """utility function to push obsolete markers to a remote"""
422 pushop.ui.debug('try to push obsolete markers to remote\n')
422 pushop.ui.debug('try to push obsolete markers to remote\n')
423 repo = pushop.repo
423 repo = pushop.repo
424 remote = pushop.remote
424 remote = pushop.remote
425 if (obsolete._enabled and repo.obsstore and
425 if (obsolete._enabled and repo.obsstore and
426 'obsolete' in remote.listkeys('namespaces')):
426 'obsolete' in remote.listkeys('namespaces')):
427 rslts = []
427 rslts = []
428 remotedata = repo.listkeys('obsolete')
428 remotedata = repo.listkeys('obsolete')
429 for key in sorted(remotedata, reverse=True):
429 for key in sorted(remotedata, reverse=True):
430 # reverse sort to ensure we end with dump0
430 # reverse sort to ensure we end with dump0
431 data = remotedata[key]
431 data = remotedata[key]
432 rslts.append(remote.pushkey('obsolete', key, '', data))
432 rslts.append(remote.pushkey('obsolete', key, '', data))
433 if [r for r in rslts if not r]:
433 if [r for r in rslts if not r]:
434 msg = _('failed to push some obsolete markers!\n')
434 msg = _('failed to push some obsolete markers!\n')
435 repo.ui.warn(msg)
435 repo.ui.warn(msg)
436
436
437 def _pushbookmark(pushop):
437 def _pushbookmark(pushop):
438 """Update bookmark position on remote"""
438 """Update bookmark position on remote"""
439 ui = pushop.ui
439 ui = pushop.ui
440 repo = pushop.repo.unfiltered()
440 repo = pushop.repo.unfiltered()
441 remote = pushop.remote
441 remote = pushop.remote
442 ui.debug("checking for updated bookmarks\n")
442 ui.debug("checking for updated bookmarks\n")
443 revnums = map(repo.changelog.rev, pushop.revs or [])
443 revnums = map(repo.changelog.rev, pushop.revs or [])
444 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
444 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
445 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
445 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
446 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
446 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
447 srchex=hex)
447 srchex=hex)
448
448
449 for b, scid, dcid in advsrc:
449 for b, scid, dcid in advsrc:
450 if ancestors and repo[scid].rev() not in ancestors:
450 if ancestors and repo[scid].rev() not in ancestors:
451 continue
451 continue
452 if remote.pushkey('bookmarks', b, dcid, scid):
452 if remote.pushkey('bookmarks', b, dcid, scid):
453 ui.status(_("updating bookmark %s\n") % b)
453 ui.status(_("updating bookmark %s\n") % b)
454 else:
454 else:
455 ui.warn(_('updating bookmark %s failed!\n') % b)
455 ui.warn(_('updating bookmark %s failed!\n') % b)
456
456
457 class pulloperation(object):
457 class pulloperation(object):
458 """A object that represent a single pull operation
458 """A object that represent a single pull operation
459
459
460 It purpose is to carry push related state and very common operation.
460 It purpose is to carry push related state and very common operation.
461
461
462 A new should be created at the beginning of each pull and discarded
462 A new should be created at the beginning of each pull and discarded
463 afterward.
463 afterward.
464 """
464 """
465
465
466 def __init__(self, repo, remote, heads=None, force=False):
466 def __init__(self, repo, remote, heads=None, force=False):
467 # repo we pull into
467 # repo we pull into
468 self.repo = repo
468 self.repo = repo
469 # repo we pull from
469 # repo we pull from
470 self.remote = remote
470 self.remote = remote
471 # revision we try to pull (None is "all")
471 # revision we try to pull (None is "all")
472 self.heads = heads
472 self.heads = heads
473 # do we force pull?
473 # do we force pull?
474 self.force = force
474 self.force = force
475 # the name the pull transaction
475 # the name the pull transaction
476 self._trname = 'pull\n' + util.hidepassword(remote.url())
476 self._trname = 'pull\n' + util.hidepassword(remote.url())
477 # hold the transaction once created
477 # hold the transaction once created
478 self._tr = None
478 self._tr = None
479 # set of common changeset between local and remote before pull
479 # set of common changeset between local and remote before pull
480 self.common = None
480 self.common = None
481 # set of pulled head
481 # set of pulled head
482 self.rheads = None
482 self.rheads = None
483 # list of missing changeset to fetch remotely
483 # list of missing changeset to fetch remotely
484 self.fetch = None
484 self.fetch = None
485 # result of changegroup pulling (used as return code by pull)
485 # result of changegroup pulling (used as return code by pull)
486 self.cgresult = None
486 self.cgresult = None
487 # list of step remaining todo (related to future bundle2 usage)
487 # list of step remaining todo (related to future bundle2 usage)
488 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
488 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
489
489
490 @util.propertycache
490 @util.propertycache
491 def pulledsubset(self):
491 def pulledsubset(self):
492 """heads of the set of changeset target by the pull"""
492 """heads of the set of changeset target by the pull"""
493 # compute target subset
493 # compute target subset
494 if self.heads is None:
494 if self.heads is None:
495 # We pulled every thing possible
495 # We pulled every thing possible
496 # sync on everything common
496 # sync on everything common
497 c = set(self.common)
497 c = set(self.common)
498 ret = list(self.common)
498 ret = list(self.common)
499 for n in self.rheads:
499 for n in self.rheads:
500 if n not in c:
500 if n not in c:
501 ret.append(n)
501 ret.append(n)
502 return ret
502 return ret
503 else:
503 else:
504 # We pulled a specific subset
504 # We pulled a specific subset
505 # sync on this subset
505 # sync on this subset
506 return self.heads
506 return self.heads
507
507
508 def gettransaction(self):
508 def gettransaction(self):
509 """get appropriate pull transaction, creating it if needed"""
509 """get appropriate pull transaction, creating it if needed"""
510 if self._tr is None:
510 if self._tr is None:
511 self._tr = self.repo.transaction(self._trname)
511 self._tr = self.repo.transaction(self._trname)
512 return self._tr
512 return self._tr
513
513
514 def closetransaction(self):
514 def closetransaction(self):
515 """close transaction if created"""
515 """close transaction if created"""
516 if self._tr is not None:
516 if self._tr is not None:
517 self._tr.close()
517 self._tr.close()
518
518
519 def releasetransaction(self):
519 def releasetransaction(self):
520 """release transaction if created"""
520 """release transaction if created"""
521 if self._tr is not None:
521 if self._tr is not None:
522 self._tr.release()
522 self._tr.release()
523
523
524 def pull(repo, remote, heads=None, force=False):
524 def pull(repo, remote, heads=None, force=False):
525 pullop = pulloperation(repo, remote, heads, force)
525 pullop = pulloperation(repo, remote, heads, force)
526 if pullop.remote.local():
526 if pullop.remote.local():
527 missing = set(pullop.remote.requirements) - pullop.repo.supported
527 missing = set(pullop.remote.requirements) - pullop.repo.supported
528 if missing:
528 if missing:
529 msg = _("required features are not"
529 msg = _("required features are not"
530 " supported in the destination:"
530 " supported in the destination:"
531 " %s") % (', '.join(sorted(missing)))
531 " %s") % (', '.join(sorted(missing)))
532 raise util.Abort(msg)
532 raise util.Abort(msg)
533
533
534 lock = pullop.repo.lock()
534 lock = pullop.repo.lock()
535 try:
535 try:
536 _pulldiscovery(pullop)
536 _pulldiscovery(pullop)
537 if (pullop.repo.ui.configbool('server', 'bundle2', False)
537 if (pullop.repo.ui.configbool('server', 'bundle2', False)
538 and pullop.remote.capable('bundle2-exp')):
538 and pullop.remote.capable('bundle2-exp')):
539 _pullbundle2(pullop)
539 _pullbundle2(pullop)
540 if 'changegroup' in pullop.todosteps:
540 if 'changegroup' in pullop.todosteps:
541 _pullchangeset(pullop)
541 _pullchangeset(pullop)
542 if 'phases' in pullop.todosteps:
542 if 'phases' in pullop.todosteps:
543 _pullphase(pullop)
543 _pullphase(pullop)
544 if 'obsmarkers' in pullop.todosteps:
544 if 'obsmarkers' in pullop.todosteps:
545 _pullobsolete(pullop)
545 _pullobsolete(pullop)
546 pullop.closetransaction()
546 pullop.closetransaction()
547 finally:
547 finally:
548 pullop.releasetransaction()
548 pullop.releasetransaction()
549 lock.release()
549 lock.release()
550
550
551 return pullop.cgresult
551 return pullop.cgresult
552
552
553 def _pulldiscovery(pullop):
553 def _pulldiscovery(pullop):
554 """discovery phase for the pull
554 """discovery phase for the pull
555
555
556 Current handle changeset discovery only, will change handle all discovery
556 Current handle changeset discovery only, will change handle all discovery
557 at some point."""
557 at some point."""
558 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
558 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
559 pullop.remote,
559 pullop.remote,
560 heads=pullop.heads,
560 heads=pullop.heads,
561 force=pullop.force)
561 force=pullop.force)
562 pullop.common, pullop.fetch, pullop.rheads = tmp
562 pullop.common, pullop.fetch, pullop.rheads = tmp
563
563
564 def _pullbundle2(pullop):
564 def _pullbundle2(pullop):
565 """pull data using bundle2
565 """pull data using bundle2
566
566
567 For now, the only supported data are changegroup."""
567 For now, the only supported data are changegroup."""
568 kwargs = {'bundlecaps': set(['HG2X'])}
568 kwargs = {'bundlecaps': set(['HG2X'])}
569 capsblob = bundle2.encodecaps(pullop.repo.bundle2caps)
569 capsblob = bundle2.encodecaps(pullop.repo.bundle2caps)
570 kwargs['bundlecaps'].add('bundle2=' + urllib.quote(capsblob))
570 kwargs['bundlecaps'].add('bundle2=' + urllib.quote(capsblob))
571 # pulling changegroup
571 # pulling changegroup
572 pullop.todosteps.remove('changegroup')
572 pullop.todosteps.remove('changegroup')
573 if not pullop.fetch:
573 if not pullop.fetch:
574 pullop.repo.ui.status(_("no changes found\n"))
574 pullop.repo.ui.status(_("no changes found\n"))
575 pullop.cgresult = 0
575 pullop.cgresult = 0
576 else:
576 else:
577 kwargs['common'] = pullop.common
577 kwargs['common'] = pullop.common
578 kwargs['heads'] = pullop.heads or pullop.rheads
578 kwargs['heads'] = pullop.heads or pullop.rheads
579 if pullop.heads is None and list(pullop.common) == [nullid]:
579 if pullop.heads is None and list(pullop.common) == [nullid]:
580 pullop.repo.ui.status(_("requesting all changes\n"))
580 pullop.repo.ui.status(_("requesting all changes\n"))
581 if kwargs.keys() == ['format']:
581 if kwargs.keys() == ['format']:
582 return # nothing to pull
582 return # nothing to pull
583 bundle = pullop.remote.getbundle('pull', **kwargs)
583 bundle = pullop.remote.getbundle('pull', **kwargs)
584 try:
584 try:
585 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
585 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
586 except KeyError, exc:
586 except KeyError, exc:
587 raise util.Abort('missing support for %s' % exc)
587 raise util.Abort('missing support for %s' % exc)
588 assert len(op.records['changegroup']) == 1
588 assert len(op.records['changegroup']) == 1
589 pullop.cgresult = op.records['changegroup'][0]['return']
589 pullop.cgresult = op.records['changegroup'][0]['return']
590
590
591 def _pullchangeset(pullop):
591 def _pullchangeset(pullop):
592 """pull changeset from unbundle into the local repo"""
592 """pull changeset from unbundle into the local repo"""
593 # We delay the open of the transaction as late as possible so we
593 # We delay the open of the transaction as late as possible so we
594 # don't open transaction for nothing or you break future useful
594 # don't open transaction for nothing or you break future useful
595 # rollback call
595 # rollback call
596 pullop.todosteps.remove('changegroup')
596 pullop.todosteps.remove('changegroup')
597 if not pullop.fetch:
597 if not pullop.fetch:
598 pullop.repo.ui.status(_("no changes found\n"))
598 pullop.repo.ui.status(_("no changes found\n"))
599 pullop.cgresult = 0
599 pullop.cgresult = 0
600 return
600 return
601 pullop.gettransaction()
601 pullop.gettransaction()
602 if pullop.heads is None and list(pullop.common) == [nullid]:
602 if pullop.heads is None and list(pullop.common) == [nullid]:
603 pullop.repo.ui.status(_("requesting all changes\n"))
603 pullop.repo.ui.status(_("requesting all changes\n"))
604 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
604 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
605 # issue1320, avoid a race if remote changed after discovery
605 # issue1320, avoid a race if remote changed after discovery
606 pullop.heads = pullop.rheads
606 pullop.heads = pullop.rheads
607
607
608 if pullop.remote.capable('getbundle'):
608 if pullop.remote.capable('getbundle'):
609 # TODO: get bundlecaps from remote
609 # TODO: get bundlecaps from remote
610 cg = pullop.remote.getbundle('pull', common=pullop.common,
610 cg = pullop.remote.getbundle('pull', common=pullop.common,
611 heads=pullop.heads or pullop.rheads)
611 heads=pullop.heads or pullop.rheads)
612 elif pullop.heads is None:
612 elif pullop.heads is None:
613 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
613 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
614 elif not pullop.remote.capable('changegroupsubset'):
614 elif not pullop.remote.capable('changegroupsubset'):
615 raise util.Abort(_("partial pull cannot be done because "
615 raise util.Abort(_("partial pull cannot be done because "
616 "other repository doesn't support "
616 "other repository doesn't support "
617 "changegroupsubset."))
617 "changegroupsubset."))
618 else:
618 else:
619 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
619 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
620 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
620 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
621 pullop.remote.url())
621 pullop.remote.url())
622
622
623 def _pullphase(pullop):
623 def _pullphase(pullop):
624 # Get remote phases data from remote
624 # Get remote phases data from remote
625 pullop.todosteps.remove('phases')
625 pullop.todosteps.remove('phases')
626 remotephases = pullop.remote.listkeys('phases')
626 remotephases = pullop.remote.listkeys('phases')
627 publishing = bool(remotephases.get('publishing', False))
627 publishing = bool(remotephases.get('publishing', False))
628 if remotephases and not publishing:
628 if remotephases and not publishing:
629 # remote is new and unpublishing
629 # remote is new and unpublishing
630 pheads, _dr = phases.analyzeremotephases(pullop.repo,
630 pheads, _dr = phases.analyzeremotephases(pullop.repo,
631 pullop.pulledsubset,
631 pullop.pulledsubset,
632 remotephases)
632 remotephases)
633 phases.advanceboundary(pullop.repo, phases.public, pheads)
633 phases.advanceboundary(pullop.repo, phases.public, pheads)
634 phases.advanceboundary(pullop.repo, phases.draft,
634 phases.advanceboundary(pullop.repo, phases.draft,
635 pullop.pulledsubset)
635 pullop.pulledsubset)
636 else:
636 else:
637 # Remote is old or publishing all common changesets
637 # Remote is old or publishing all common changesets
638 # should be seen as public
638 # should be seen as public
639 phases.advanceboundary(pullop.repo, phases.public,
639 phases.advanceboundary(pullop.repo, phases.public,
640 pullop.pulledsubset)
640 pullop.pulledsubset)
641
641
642 def _pullobsolete(pullop):
642 def _pullobsolete(pullop):
643 """utility function to pull obsolete markers from a remote
643 """utility function to pull obsolete markers from a remote
644
644
645 The `gettransaction` is function that return the pull transaction, creating
645 The `gettransaction` is function that return the pull transaction, creating
646 one if necessary. We return the transaction to inform the calling code that
646 one if necessary. We return the transaction to inform the calling code that
647 a new transaction have been created (when applicable).
647 a new transaction have been created (when applicable).
648
648
649 Exists mostly to allow overriding for experimentation purpose"""
649 Exists mostly to allow overriding for experimentation purpose"""
650 pullop.todosteps.remove('obsmarkers')
650 pullop.todosteps.remove('obsmarkers')
651 tr = None
651 tr = None
652 if obsolete._enabled:
652 if obsolete._enabled:
653 pullop.repo.ui.debug('fetching remote obsolete markers\n')
653 pullop.repo.ui.debug('fetching remote obsolete markers\n')
654 remoteobs = pullop.remote.listkeys('obsolete')
654 remoteobs = pullop.remote.listkeys('obsolete')
655 if 'dump0' in remoteobs:
655 if 'dump0' in remoteobs:
656 tr = pullop.gettransaction()
656 tr = pullop.gettransaction()
657 for key in sorted(remoteobs, reverse=True):
657 for key in sorted(remoteobs, reverse=True):
658 if key.startswith('dump'):
658 if key.startswith('dump'):
659 data = base85.b85decode(remoteobs[key])
659 data = base85.b85decode(remoteobs[key])
660 pullop.repo.obsstore.mergemarkers(tr, data)
660 pullop.repo.obsstore.mergemarkers(tr, data)
661 pullop.repo.invalidatevolatilesets()
661 pullop.repo.invalidatevolatilesets()
662 return tr
662 return tr
663
663
664 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
664 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
665 **kwargs):
665 **kwargs):
666 """return a full bundle (with potentially multiple kind of parts)
666 """return a full bundle (with potentially multiple kind of parts)
667
667
668 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
668 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
669 passed. For now, the bundle can contain only changegroup, but this will
669 passed. For now, the bundle can contain only changegroup, but this will
670 changes when more part type will be available for bundle2.
670 changes when more part type will be available for bundle2.
671
671
672 This is different from changegroup.getbundle that only returns an HG10
672 This is different from changegroup.getbundle that only returns an HG10
673 changegroup bundle. They may eventually get reunited in the future when we
673 changegroup bundle. They may eventually get reunited in the future when we
674 have a clearer idea of the API we what to query different data.
674 have a clearer idea of the API we what to query different data.
675
675
676 The implementation is at a very early stage and will get massive rework
676 The implementation is at a very early stage and will get massive rework
677 when the API of bundle is refined.
677 when the API of bundle is refined.
678 """
678 """
679 # build bundle here.
679 # build bundle here.
680 cg = changegroup.getbundle(repo, source, heads=heads,
680 cg = changegroup.getbundle(repo, source, heads=heads,
681 common=common, bundlecaps=bundlecaps)
681 common=common, bundlecaps=bundlecaps)
682 if bundlecaps is None or 'HG2X' not in bundlecaps:
682 if bundlecaps is None or 'HG2X' not in bundlecaps:
683 return cg
683 return cg
684 # very crude first implementation,
684 # very crude first implementation,
685 # the bundle API will change and the generation will be done lazily.
685 # the bundle API will change and the generation will be done lazily.
686 b2caps = {}
686 b2caps = {}
687 for bcaps in bundlecaps:
687 for bcaps in bundlecaps:
688 if bcaps.startswith('bundle2='):
688 if bcaps.startswith('bundle2='):
689 blob = urllib.unquote(bcaps[len('bundle2='):])
689 blob = urllib.unquote(bcaps[len('bundle2='):])
690 b2caps.update(bundle2.decodecaps(blob))
690 b2caps.update(bundle2.decodecaps(blob))
691 bundler = bundle2.bundle20(repo.ui, b2caps)
691 bundler = bundle2.bundle20(repo.ui, b2caps)
692 part = bundle2.bundlepart('b2x:changegroup', data=cg.getchunks())
692 part = bundle2.bundlepart('b2x:changegroup', data=cg.getchunks())
693 bundler.addpart(part)
693 bundler.addpart(part)
694 _getbundleextrapart(bundler, repo, source, heads=None, common=None,
695 bundlecaps=None, **kwargs)
694 return util.chunkbuffer(bundler.getchunks())
696 return util.chunkbuffer(bundler.getchunks())
695
697
698 def _getbundleextrapart(bundler, repo, source, heads=None, common=None,
699 bundlecaps=None, **kwargs):
700 """hook function to let extensions add parts to the requested bundle"""
701 pass
702
696 class PushRaced(RuntimeError):
703 class PushRaced(RuntimeError):
697 """An exception raised during unbundling that indicate a push race"""
704 """An exception raised during unbundling that indicate a push race"""
698
705
699 def check_heads(repo, their_heads, context):
706 def check_heads(repo, their_heads, context):
700 """check if the heads of a repo have been modified
707 """check if the heads of a repo have been modified
701
708
702 Used by peer for unbundling.
709 Used by peer for unbundling.
703 """
710 """
704 heads = repo.heads()
711 heads = repo.heads()
705 heads_hash = util.sha1(''.join(sorted(heads))).digest()
712 heads_hash = util.sha1(''.join(sorted(heads))).digest()
706 if not (their_heads == ['force'] or their_heads == heads or
713 if not (their_heads == ['force'] or their_heads == heads or
707 their_heads == ['hashed', heads_hash]):
714 their_heads == ['hashed', heads_hash]):
708 # someone else committed/pushed/unbundled while we
715 # someone else committed/pushed/unbundled while we
709 # were transferring data
716 # were transferring data
710 raise PushRaced('repository changed while %s - '
717 raise PushRaced('repository changed while %s - '
711 'please try again' % context)
718 'please try again' % context)
712
719
713 def unbundle(repo, cg, heads, source, url):
720 def unbundle(repo, cg, heads, source, url):
714 """Apply a bundle to a repo.
721 """Apply a bundle to a repo.
715
722
716 this function makes sure the repo is locked during the application and have
723 this function makes sure the repo is locked during the application and have
717 mechanism to check that no push race occurred between the creation of the
724 mechanism to check that no push race occurred between the creation of the
718 bundle and its application.
725 bundle and its application.
719
726
720 If the push was raced as PushRaced exception is raised."""
727 If the push was raced as PushRaced exception is raised."""
721 r = 0
728 r = 0
722 # need a transaction when processing a bundle2 stream
729 # need a transaction when processing a bundle2 stream
723 tr = None
730 tr = None
724 lock = repo.lock()
731 lock = repo.lock()
725 try:
732 try:
726 check_heads(repo, heads, 'uploading changes')
733 check_heads(repo, heads, 'uploading changes')
727 # push can proceed
734 # push can proceed
728 if util.safehasattr(cg, 'params'):
735 if util.safehasattr(cg, 'params'):
729 tr = repo.transaction('unbundle')
736 tr = repo.transaction('unbundle')
730 tr.hookargs['bundle2-exp'] = '1'
737 tr.hookargs['bundle2-exp'] = '1'
731 r = bundle2.processbundle(repo, cg, lambda: tr).reply
738 r = bundle2.processbundle(repo, cg, lambda: tr).reply
732 cl = repo.unfiltered().changelog
739 cl = repo.unfiltered().changelog
733 p = cl.writepending() and repo.root or ""
740 p = cl.writepending() and repo.root or ""
734 repo.hook('b2x-pretransactionclose', throw=True, source=source,
741 repo.hook('b2x-pretransactionclose', throw=True, source=source,
735 url=url, pending=p, **tr.hookargs)
742 url=url, pending=p, **tr.hookargs)
736 tr.close()
743 tr.close()
737 repo.hook('b2x-transactionclose', source=source, url=url,
744 repo.hook('b2x-transactionclose', source=source, url=url,
738 **tr.hookargs)
745 **tr.hookargs)
739 else:
746 else:
740 r = changegroup.addchangegroup(repo, cg, source, url)
747 r = changegroup.addchangegroup(repo, cg, source, url)
741 finally:
748 finally:
742 if tr is not None:
749 if tr is not None:
743 tr.release()
750 tr.release()
744 lock.release()
751 lock.release()
745 return r
752 return r
General Comments 0
You need to be logged in to leave comments. Login now