##// END OF EJS Templates
bundle2: catch UnknownPartError during local push...
Pierre-Yves David -
r21182:08c84fd9 stable
parent child Browse files
Show More
@@ -1,757 +1,760 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno, urllib
10 import errno, urllib
11 import util, scmutil, changegroup, base85
11 import util, scmutil, changegroup, base85
12 import discovery, phases, obsolete, bookmarks, bundle2
12 import discovery, phases, obsolete, bookmarks, bundle2
13
13
14 def readbundle(ui, fh, fname, vfs=None):
14 def readbundle(ui, fh, fname, vfs=None):
15 header = changegroup.readexactly(fh, 4)
15 header = changegroup.readexactly(fh, 4)
16
16
17 alg = None
17 alg = None
18 if not fname:
18 if not fname:
19 fname = "stream"
19 fname = "stream"
20 if not header.startswith('HG') and header.startswith('\0'):
20 if not header.startswith('HG') and header.startswith('\0'):
21 fh = changegroup.headerlessfixup(fh, header)
21 fh = changegroup.headerlessfixup(fh, header)
22 header = "HG10"
22 header = "HG10"
23 alg = 'UN'
23 alg = 'UN'
24 elif vfs:
24 elif vfs:
25 fname = vfs.join(fname)
25 fname = vfs.join(fname)
26
26
27 magic, version = header[0:2], header[2:4]
27 magic, version = header[0:2], header[2:4]
28
28
29 if magic != 'HG':
29 if magic != 'HG':
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 if version == '10':
31 if version == '10':
32 if alg is None:
32 if alg is None:
33 alg = changegroup.readexactly(fh, 2)
33 alg = changegroup.readexactly(fh, 2)
34 return changegroup.unbundle10(fh, alg)
34 return changegroup.unbundle10(fh, alg)
35 elif version == '2X':
35 elif version == '2X':
36 return bundle2.unbundle20(ui, fh, header=magic + version)
36 return bundle2.unbundle20(ui, fh, header=magic + version)
37 else:
37 else:
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39
39
40
40
41 class pushoperation(object):
41 class pushoperation(object):
42 """A object that represent a single push operation
42 """A object that represent a single push operation
43
43
44 It purpose is to carry push related state and very common operation.
44 It purpose is to carry push related state and very common operation.
45
45
46 A new should be created at the beginning of each push and discarded
46 A new should be created at the beginning of each push and discarded
47 afterward.
47 afterward.
48 """
48 """
49
49
50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
51 # repo we push from
51 # repo we push from
52 self.repo = repo
52 self.repo = repo
53 self.ui = repo.ui
53 self.ui = repo.ui
54 # repo we push to
54 # repo we push to
55 self.remote = remote
55 self.remote = remote
56 # force option provided
56 # force option provided
57 self.force = force
57 self.force = force
58 # revs to be pushed (None is "all")
58 # revs to be pushed (None is "all")
59 self.revs = revs
59 self.revs = revs
60 # allow push of new branch
60 # allow push of new branch
61 self.newbranch = newbranch
61 self.newbranch = newbranch
62 # did a local lock get acquired?
62 # did a local lock get acquired?
63 self.locallocked = None
63 self.locallocked = None
64 # Integer version of the push result
64 # Integer version of the push result
65 # - None means nothing to push
65 # - None means nothing to push
66 # - 0 means HTTP error
66 # - 0 means HTTP error
67 # - 1 means we pushed and remote head count is unchanged *or*
67 # - 1 means we pushed and remote head count is unchanged *or*
68 # we have outgoing changesets but refused to push
68 # we have outgoing changesets but refused to push
69 # - other values as described by addchangegroup()
69 # - other values as described by addchangegroup()
70 self.ret = None
70 self.ret = None
71 # discover.outgoing object (contains common and outgoing data)
71 # discover.outgoing object (contains common and outgoing data)
72 self.outgoing = None
72 self.outgoing = None
73 # all remote heads before the push
73 # all remote heads before the push
74 self.remoteheads = None
74 self.remoteheads = None
75 # testable as a boolean indicating if any nodes are missing locally.
75 # testable as a boolean indicating if any nodes are missing locally.
76 self.incoming = None
76 self.incoming = None
77 # set of all heads common after changeset bundle push
77 # set of all heads common after changeset bundle push
78 self.commonheads = None
78 self.commonheads = None
79
79
80 def push(repo, remote, force=False, revs=None, newbranch=False):
80 def push(repo, remote, force=False, revs=None, newbranch=False):
81 '''Push outgoing changesets (limited by revs) from a local
81 '''Push outgoing changesets (limited by revs) from a local
82 repository to remote. Return an integer:
82 repository to remote. Return an integer:
83 - None means nothing to push
83 - None means nothing to push
84 - 0 means HTTP error
84 - 0 means HTTP error
85 - 1 means we pushed and remote head count is unchanged *or*
85 - 1 means we pushed and remote head count is unchanged *or*
86 we have outgoing changesets but refused to push
86 we have outgoing changesets but refused to push
87 - other values as described by addchangegroup()
87 - other values as described by addchangegroup()
88 '''
88 '''
89 pushop = pushoperation(repo, remote, force, revs, newbranch)
89 pushop = pushoperation(repo, remote, force, revs, newbranch)
90 if pushop.remote.local():
90 if pushop.remote.local():
91 missing = (set(pushop.repo.requirements)
91 missing = (set(pushop.repo.requirements)
92 - pushop.remote.local().supported)
92 - pushop.remote.local().supported)
93 if missing:
93 if missing:
94 msg = _("required features are not"
94 msg = _("required features are not"
95 " supported in the destination:"
95 " supported in the destination:"
96 " %s") % (', '.join(sorted(missing)))
96 " %s") % (', '.join(sorted(missing)))
97 raise util.Abort(msg)
97 raise util.Abort(msg)
98
98
99 # there are two ways to push to remote repo:
99 # there are two ways to push to remote repo:
100 #
100 #
101 # addchangegroup assumes local user can lock remote
101 # addchangegroup assumes local user can lock remote
102 # repo (local filesystem, old ssh servers).
102 # repo (local filesystem, old ssh servers).
103 #
103 #
104 # unbundle assumes local user cannot lock remote repo (new ssh
104 # unbundle assumes local user cannot lock remote repo (new ssh
105 # servers, http servers).
105 # servers, http servers).
106
106
107 if not pushop.remote.canpush():
107 if not pushop.remote.canpush():
108 raise util.Abort(_("destination does not support push"))
108 raise util.Abort(_("destination does not support push"))
109 # get local lock as we might write phase data
109 # get local lock as we might write phase data
110 locallock = None
110 locallock = None
111 try:
111 try:
112 locallock = pushop.repo.lock()
112 locallock = pushop.repo.lock()
113 pushop.locallocked = True
113 pushop.locallocked = True
114 except IOError, err:
114 except IOError, err:
115 pushop.locallocked = False
115 pushop.locallocked = False
116 if err.errno != errno.EACCES:
116 if err.errno != errno.EACCES:
117 raise
117 raise
118 # source repo cannot be locked.
118 # source repo cannot be locked.
119 # We do not abort the push, but just disable the local phase
119 # We do not abort the push, but just disable the local phase
120 # synchronisation.
120 # synchronisation.
121 msg = 'cannot lock source repository: %s\n' % err
121 msg = 'cannot lock source repository: %s\n' % err
122 pushop.ui.debug(msg)
122 pushop.ui.debug(msg)
123 try:
123 try:
124 pushop.repo.checkpush(pushop)
124 pushop.repo.checkpush(pushop)
125 lock = None
125 lock = None
126 unbundle = pushop.remote.capable('unbundle')
126 unbundle = pushop.remote.capable('unbundle')
127 if not unbundle:
127 if not unbundle:
128 lock = pushop.remote.lock()
128 lock = pushop.remote.lock()
129 try:
129 try:
130 _pushdiscovery(pushop)
130 _pushdiscovery(pushop)
131 if _pushcheckoutgoing(pushop):
131 if _pushcheckoutgoing(pushop):
132 pushop.repo.prepushoutgoinghooks(pushop.repo,
132 pushop.repo.prepushoutgoinghooks(pushop.repo,
133 pushop.remote,
133 pushop.remote,
134 pushop.outgoing)
134 pushop.outgoing)
135 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
135 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
136 False)
136 False)
137 and pushop.remote.capable('bundle2-exp')):
137 and pushop.remote.capable('bundle2-exp')):
138 _pushbundle2(pushop)
138 _pushbundle2(pushop)
139 else:
139 else:
140 _pushchangeset(pushop)
140 _pushchangeset(pushop)
141 _pushcomputecommonheads(pushop)
141 _pushcomputecommonheads(pushop)
142 _pushsyncphase(pushop)
142 _pushsyncphase(pushop)
143 _pushobsolete(pushop)
143 _pushobsolete(pushop)
144 finally:
144 finally:
145 if lock is not None:
145 if lock is not None:
146 lock.release()
146 lock.release()
147 finally:
147 finally:
148 if locallock is not None:
148 if locallock is not None:
149 locallock.release()
149 locallock.release()
150
150
151 _pushbookmark(pushop)
151 _pushbookmark(pushop)
152 return pushop.ret
152 return pushop.ret
153
153
154 def _pushdiscovery(pushop):
154 def _pushdiscovery(pushop):
155 # discovery
155 # discovery
156 unfi = pushop.repo.unfiltered()
156 unfi = pushop.repo.unfiltered()
157 fci = discovery.findcommonincoming
157 fci = discovery.findcommonincoming
158 commoninc = fci(unfi, pushop.remote, force=pushop.force)
158 commoninc = fci(unfi, pushop.remote, force=pushop.force)
159 common, inc, remoteheads = commoninc
159 common, inc, remoteheads = commoninc
160 fco = discovery.findcommonoutgoing
160 fco = discovery.findcommonoutgoing
161 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
161 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
162 commoninc=commoninc, force=pushop.force)
162 commoninc=commoninc, force=pushop.force)
163 pushop.outgoing = outgoing
163 pushop.outgoing = outgoing
164 pushop.remoteheads = remoteheads
164 pushop.remoteheads = remoteheads
165 pushop.incoming = inc
165 pushop.incoming = inc
166
166
167 def _pushcheckoutgoing(pushop):
167 def _pushcheckoutgoing(pushop):
168 outgoing = pushop.outgoing
168 outgoing = pushop.outgoing
169 unfi = pushop.repo.unfiltered()
169 unfi = pushop.repo.unfiltered()
170 if not outgoing.missing:
170 if not outgoing.missing:
171 # nothing to push
171 # nothing to push
172 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
172 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
173 return False
173 return False
174 # something to push
174 # something to push
175 if not pushop.force:
175 if not pushop.force:
176 # if repo.obsstore == False --> no obsolete
176 # if repo.obsstore == False --> no obsolete
177 # then, save the iteration
177 # then, save the iteration
178 if unfi.obsstore:
178 if unfi.obsstore:
179 # this message are here for 80 char limit reason
179 # this message are here for 80 char limit reason
180 mso = _("push includes obsolete changeset: %s!")
180 mso = _("push includes obsolete changeset: %s!")
181 mst = "push includes %s changeset: %s!"
181 mst = "push includes %s changeset: %s!"
182 # plain versions for i18n tool to detect them
182 # plain versions for i18n tool to detect them
183 _("push includes unstable changeset: %s!")
183 _("push includes unstable changeset: %s!")
184 _("push includes bumped changeset: %s!")
184 _("push includes bumped changeset: %s!")
185 _("push includes divergent changeset: %s!")
185 _("push includes divergent changeset: %s!")
186 # If we are to push if there is at least one
186 # If we are to push if there is at least one
187 # obsolete or unstable changeset in missing, at
187 # obsolete or unstable changeset in missing, at
188 # least one of the missinghead will be obsolete or
188 # least one of the missinghead will be obsolete or
189 # unstable. So checking heads only is ok
189 # unstable. So checking heads only is ok
190 for node in outgoing.missingheads:
190 for node in outgoing.missingheads:
191 ctx = unfi[node]
191 ctx = unfi[node]
192 if ctx.obsolete():
192 if ctx.obsolete():
193 raise util.Abort(mso % ctx)
193 raise util.Abort(mso % ctx)
194 elif ctx.troubled():
194 elif ctx.troubled():
195 raise util.Abort(_(mst)
195 raise util.Abort(_(mst)
196 % (ctx.troubles()[0],
196 % (ctx.troubles()[0],
197 ctx))
197 ctx))
198 newbm = pushop.ui.configlist('bookmarks', 'pushing')
198 newbm = pushop.ui.configlist('bookmarks', 'pushing')
199 discovery.checkheads(unfi, pushop.remote, outgoing,
199 discovery.checkheads(unfi, pushop.remote, outgoing,
200 pushop.remoteheads,
200 pushop.remoteheads,
201 pushop.newbranch,
201 pushop.newbranch,
202 bool(pushop.incoming),
202 bool(pushop.incoming),
203 newbm)
203 newbm)
204 return True
204 return True
205
205
206 def _pushbundle2(pushop):
206 def _pushbundle2(pushop):
207 """push data to the remote using bundle2
207 """push data to the remote using bundle2
208
208
209 The only currently supported type of data is changegroup but this will
209 The only currently supported type of data is changegroup but this will
210 evolve in the future."""
210 evolve in the future."""
211 # Send known head to the server for race detection.
211 # Send known head to the server for race detection.
212 capsblob = urllib.unquote(pushop.remote.capable('bundle2-exp'))
212 capsblob = urllib.unquote(pushop.remote.capable('bundle2-exp'))
213 caps = bundle2.decodecaps(capsblob)
213 caps = bundle2.decodecaps(capsblob)
214 bundler = bundle2.bundle20(pushop.ui, caps)
214 bundler = bundle2.bundle20(pushop.ui, caps)
215 # create reply capability
215 # create reply capability
216 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
216 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
217 bundler.addpart(bundle2.bundlepart('b2x:replycaps', data=capsblob))
217 bundler.addpart(bundle2.bundlepart('b2x:replycaps', data=capsblob))
218 if not pushop.force:
218 if not pushop.force:
219 part = bundle2.bundlepart('B2X:CHECK:HEADS',
219 part = bundle2.bundlepart('B2X:CHECK:HEADS',
220 data=iter(pushop.remoteheads))
220 data=iter(pushop.remoteheads))
221 bundler.addpart(part)
221 bundler.addpart(part)
222 extrainfo = _pushbundle2extraparts(pushop, bundler)
222 extrainfo = _pushbundle2extraparts(pushop, bundler)
223 # add the changegroup bundle
223 # add the changegroup bundle
224 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
224 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
225 cgpart = bundle2.bundlepart('B2X:CHANGEGROUP', data=cg.getchunks())
225 cgpart = bundle2.bundlepart('B2X:CHANGEGROUP', data=cg.getchunks())
226 bundler.addpart(cgpart)
226 bundler.addpart(cgpart)
227 stream = util.chunkbuffer(bundler.getchunks())
227 stream = util.chunkbuffer(bundler.getchunks())
228 reply = pushop.remote.unbundle(stream, ['force'], 'push')
228 try:
229 reply = pushop.remote.unbundle(stream, ['force'], 'push')
230 except bundle2.UnknownPartError, exc:
231 raise util.Abort('missing support for %s' % exc)
229 try:
232 try:
230 op = bundle2.processbundle(pushop.repo, reply)
233 op = bundle2.processbundle(pushop.repo, reply)
231 except bundle2.UnknownPartError, exc:
234 except bundle2.UnknownPartError, exc:
232 raise util.Abort('missing support for %s' % exc)
235 raise util.Abort('missing support for %s' % exc)
233 cgreplies = op.records.getreplies(cgpart.id)
236 cgreplies = op.records.getreplies(cgpart.id)
234 assert len(cgreplies['changegroup']) == 1
237 assert len(cgreplies['changegroup']) == 1
235 pushop.ret = cgreplies['changegroup'][0]['return']
238 pushop.ret = cgreplies['changegroup'][0]['return']
236 _pushbundle2extrareply(pushop, op, extrainfo)
239 _pushbundle2extrareply(pushop, op, extrainfo)
237
240
238 def _pushbundle2extraparts(pushop, bundler):
241 def _pushbundle2extraparts(pushop, bundler):
239 """hook function to let extensions add parts
242 """hook function to let extensions add parts
240
243
241 Return a dict to let extensions pass data to the reply processing.
244 Return a dict to let extensions pass data to the reply processing.
242 """
245 """
243 return {}
246 return {}
244
247
245 def _pushbundle2extrareply(pushop, op, extrainfo):
248 def _pushbundle2extrareply(pushop, op, extrainfo):
246 """hook function to let extensions react to part replies
249 """hook function to let extensions react to part replies
247
250
248 The dict from _pushbundle2extrareply is fed to this function.
251 The dict from _pushbundle2extrareply is fed to this function.
249 """
252 """
250 pass
253 pass
251
254
252 def _pushchangeset(pushop):
255 def _pushchangeset(pushop):
253 """Make the actual push of changeset bundle to remote repo"""
256 """Make the actual push of changeset bundle to remote repo"""
254 outgoing = pushop.outgoing
257 outgoing = pushop.outgoing
255 unbundle = pushop.remote.capable('unbundle')
258 unbundle = pushop.remote.capable('unbundle')
256 # TODO: get bundlecaps from remote
259 # TODO: get bundlecaps from remote
257 bundlecaps = None
260 bundlecaps = None
258 # create a changegroup from local
261 # create a changegroup from local
259 if pushop.revs is None and not (outgoing.excluded
262 if pushop.revs is None and not (outgoing.excluded
260 or pushop.repo.changelog.filteredrevs):
263 or pushop.repo.changelog.filteredrevs):
261 # push everything,
264 # push everything,
262 # use the fast path, no race possible on push
265 # use the fast path, no race possible on push
263 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
266 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
264 cg = changegroup.getsubset(pushop.repo,
267 cg = changegroup.getsubset(pushop.repo,
265 outgoing,
268 outgoing,
266 bundler,
269 bundler,
267 'push',
270 'push',
268 fastpath=True)
271 fastpath=True)
269 else:
272 else:
270 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
273 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
271 bundlecaps)
274 bundlecaps)
272
275
273 # apply changegroup to remote
276 # apply changegroup to remote
274 if unbundle:
277 if unbundle:
275 # local repo finds heads on server, finds out what
278 # local repo finds heads on server, finds out what
276 # revs it must push. once revs transferred, if server
279 # revs it must push. once revs transferred, if server
277 # finds it has different heads (someone else won
280 # finds it has different heads (someone else won
278 # commit/push race), server aborts.
281 # commit/push race), server aborts.
279 if pushop.force:
282 if pushop.force:
280 remoteheads = ['force']
283 remoteheads = ['force']
281 else:
284 else:
282 remoteheads = pushop.remoteheads
285 remoteheads = pushop.remoteheads
283 # ssh: return remote's addchangegroup()
286 # ssh: return remote's addchangegroup()
284 # http: return remote's addchangegroup() or 0 for error
287 # http: return remote's addchangegroup() or 0 for error
285 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
288 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
286 'push')
289 'push')
287 else:
290 else:
288 # we return an integer indicating remote head count
291 # we return an integer indicating remote head count
289 # change
292 # change
290 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
293 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
291
294
292 def _pushcomputecommonheads(pushop):
295 def _pushcomputecommonheads(pushop):
293 unfi = pushop.repo.unfiltered()
296 unfi = pushop.repo.unfiltered()
294 if pushop.ret:
297 if pushop.ret:
295 # push succeed, synchronize target of the push
298 # push succeed, synchronize target of the push
296 cheads = pushop.outgoing.missingheads
299 cheads = pushop.outgoing.missingheads
297 elif pushop.revs is None:
300 elif pushop.revs is None:
298 # All out push fails. synchronize all common
301 # All out push fails. synchronize all common
299 cheads = pushop.outgoing.commonheads
302 cheads = pushop.outgoing.commonheads
300 else:
303 else:
301 # I want cheads = heads(::missingheads and ::commonheads)
304 # I want cheads = heads(::missingheads and ::commonheads)
302 # (missingheads is revs with secret changeset filtered out)
305 # (missingheads is revs with secret changeset filtered out)
303 #
306 #
304 # This can be expressed as:
307 # This can be expressed as:
305 # cheads = ( (missingheads and ::commonheads)
308 # cheads = ( (missingheads and ::commonheads)
306 # + (commonheads and ::missingheads))"
309 # + (commonheads and ::missingheads))"
307 # )
310 # )
308 #
311 #
309 # while trying to push we already computed the following:
312 # while trying to push we already computed the following:
310 # common = (::commonheads)
313 # common = (::commonheads)
311 # missing = ((commonheads::missingheads) - commonheads)
314 # missing = ((commonheads::missingheads) - commonheads)
312 #
315 #
313 # We can pick:
316 # We can pick:
314 # * missingheads part of common (::commonheads)
317 # * missingheads part of common (::commonheads)
315 common = set(pushop.outgoing.common)
318 common = set(pushop.outgoing.common)
316 nm = pushop.repo.changelog.nodemap
319 nm = pushop.repo.changelog.nodemap
317 cheads = [node for node in pushop.revs if nm[node] in common]
320 cheads = [node for node in pushop.revs if nm[node] in common]
318 # and
321 # and
319 # * commonheads parents on missing
322 # * commonheads parents on missing
320 revset = unfi.set('%ln and parents(roots(%ln))',
323 revset = unfi.set('%ln and parents(roots(%ln))',
321 pushop.outgoing.commonheads,
324 pushop.outgoing.commonheads,
322 pushop.outgoing.missing)
325 pushop.outgoing.missing)
323 cheads.extend(c.node() for c in revset)
326 cheads.extend(c.node() for c in revset)
324 pushop.commonheads = cheads
327 pushop.commonheads = cheads
325
328
326 def _pushsyncphase(pushop):
329 def _pushsyncphase(pushop):
327 """synchronise phase information locally and remotely"""
330 """synchronise phase information locally and remotely"""
328 unfi = pushop.repo.unfiltered()
331 unfi = pushop.repo.unfiltered()
329 cheads = pushop.commonheads
332 cheads = pushop.commonheads
330 if pushop.ret:
333 if pushop.ret:
331 # push succeed, synchronize target of the push
334 # push succeed, synchronize target of the push
332 cheads = pushop.outgoing.missingheads
335 cheads = pushop.outgoing.missingheads
333 elif pushop.revs is None:
336 elif pushop.revs is None:
334 # All out push fails. synchronize all common
337 # All out push fails. synchronize all common
335 cheads = pushop.outgoing.commonheads
338 cheads = pushop.outgoing.commonheads
336 else:
339 else:
337 # I want cheads = heads(::missingheads and ::commonheads)
340 # I want cheads = heads(::missingheads and ::commonheads)
338 # (missingheads is revs with secret changeset filtered out)
341 # (missingheads is revs with secret changeset filtered out)
339 #
342 #
340 # This can be expressed as:
343 # This can be expressed as:
341 # cheads = ( (missingheads and ::commonheads)
344 # cheads = ( (missingheads and ::commonheads)
342 # + (commonheads and ::missingheads))"
345 # + (commonheads and ::missingheads))"
343 # )
346 # )
344 #
347 #
345 # while trying to push we already computed the following:
348 # while trying to push we already computed the following:
346 # common = (::commonheads)
349 # common = (::commonheads)
347 # missing = ((commonheads::missingheads) - commonheads)
350 # missing = ((commonheads::missingheads) - commonheads)
348 #
351 #
349 # We can pick:
352 # We can pick:
350 # * missingheads part of common (::commonheads)
353 # * missingheads part of common (::commonheads)
351 common = set(pushop.outgoing.common)
354 common = set(pushop.outgoing.common)
352 nm = pushop.repo.changelog.nodemap
355 nm = pushop.repo.changelog.nodemap
353 cheads = [node for node in pushop.revs if nm[node] in common]
356 cheads = [node for node in pushop.revs if nm[node] in common]
354 # and
357 # and
355 # * commonheads parents on missing
358 # * commonheads parents on missing
356 revset = unfi.set('%ln and parents(roots(%ln))',
359 revset = unfi.set('%ln and parents(roots(%ln))',
357 pushop.outgoing.commonheads,
360 pushop.outgoing.commonheads,
358 pushop.outgoing.missing)
361 pushop.outgoing.missing)
359 cheads.extend(c.node() for c in revset)
362 cheads.extend(c.node() for c in revset)
360 pushop.commonheads = cheads
363 pushop.commonheads = cheads
361 # even when we don't push, exchanging phase data is useful
364 # even when we don't push, exchanging phase data is useful
362 remotephases = pushop.remote.listkeys('phases')
365 remotephases = pushop.remote.listkeys('phases')
363 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
366 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
364 and remotephases # server supports phases
367 and remotephases # server supports phases
365 and pushop.ret is None # nothing was pushed
368 and pushop.ret is None # nothing was pushed
366 and remotephases.get('publishing', False)):
369 and remotephases.get('publishing', False)):
367 # When:
370 # When:
368 # - this is a subrepo push
371 # - this is a subrepo push
369 # - and remote support phase
372 # - and remote support phase
370 # - and no changeset was pushed
373 # - and no changeset was pushed
371 # - and remote is publishing
374 # - and remote is publishing
372 # We may be in issue 3871 case!
375 # We may be in issue 3871 case!
373 # We drop the possible phase synchronisation done by
376 # We drop the possible phase synchronisation done by
374 # courtesy to publish changesets possibly locally draft
377 # courtesy to publish changesets possibly locally draft
375 # on the remote.
378 # on the remote.
376 remotephases = {'publishing': 'True'}
379 remotephases = {'publishing': 'True'}
377 if not remotephases: # old server or public only reply from non-publishing
380 if not remotephases: # old server or public only reply from non-publishing
378 _localphasemove(pushop, cheads)
381 _localphasemove(pushop, cheads)
379 # don't push any phase data as there is nothing to push
382 # don't push any phase data as there is nothing to push
380 else:
383 else:
381 ana = phases.analyzeremotephases(pushop.repo, cheads,
384 ana = phases.analyzeremotephases(pushop.repo, cheads,
382 remotephases)
385 remotephases)
383 pheads, droots = ana
386 pheads, droots = ana
384 ### Apply remote phase on local
387 ### Apply remote phase on local
385 if remotephases.get('publishing', False):
388 if remotephases.get('publishing', False):
386 _localphasemove(pushop, cheads)
389 _localphasemove(pushop, cheads)
387 else: # publish = False
390 else: # publish = False
388 _localphasemove(pushop, pheads)
391 _localphasemove(pushop, pheads)
389 _localphasemove(pushop, cheads, phases.draft)
392 _localphasemove(pushop, cheads, phases.draft)
390 ### Apply local phase on remote
393 ### Apply local phase on remote
391
394
392 # Get the list of all revs draft on remote by public here.
395 # Get the list of all revs draft on remote by public here.
393 # XXX Beware that revset break if droots is not strictly
396 # XXX Beware that revset break if droots is not strictly
394 # XXX root we may want to ensure it is but it is costly
397 # XXX root we may want to ensure it is but it is costly
395 outdated = unfi.set('heads((%ln::%ln) and public())',
398 outdated = unfi.set('heads((%ln::%ln) and public())',
396 droots, cheads)
399 droots, cheads)
397 for newremotehead in outdated:
400 for newremotehead in outdated:
398 r = pushop.remote.pushkey('phases',
401 r = pushop.remote.pushkey('phases',
399 newremotehead.hex(),
402 newremotehead.hex(),
400 str(phases.draft),
403 str(phases.draft),
401 str(phases.public))
404 str(phases.public))
402 if not r:
405 if not r:
403 pushop.ui.warn(_('updating %s to public failed!\n')
406 pushop.ui.warn(_('updating %s to public failed!\n')
404 % newremotehead)
407 % newremotehead)
405
408
406 def _localphasemove(pushop, nodes, phase=phases.public):
409 def _localphasemove(pushop, nodes, phase=phases.public):
407 """move <nodes> to <phase> in the local source repo"""
410 """move <nodes> to <phase> in the local source repo"""
408 if pushop.locallocked:
411 if pushop.locallocked:
409 phases.advanceboundary(pushop.repo, phase, nodes)
412 phases.advanceboundary(pushop.repo, phase, nodes)
410 else:
413 else:
411 # repo is not locked, do not change any phases!
414 # repo is not locked, do not change any phases!
412 # Informs the user that phases should have been moved when
415 # Informs the user that phases should have been moved when
413 # applicable.
416 # applicable.
414 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
417 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
415 phasestr = phases.phasenames[phase]
418 phasestr = phases.phasenames[phase]
416 if actualmoves:
419 if actualmoves:
417 pushop.ui.status(_('cannot lock source repo, skipping '
420 pushop.ui.status(_('cannot lock source repo, skipping '
418 'local %s phase update\n') % phasestr)
421 'local %s phase update\n') % phasestr)
419
422
420 def _pushobsolete(pushop):
423 def _pushobsolete(pushop):
421 """utility function to push obsolete markers to a remote"""
424 """utility function to push obsolete markers to a remote"""
422 pushop.ui.debug('try to push obsolete markers to remote\n')
425 pushop.ui.debug('try to push obsolete markers to remote\n')
423 repo = pushop.repo
426 repo = pushop.repo
424 remote = pushop.remote
427 remote = pushop.remote
425 if (obsolete._enabled and repo.obsstore and
428 if (obsolete._enabled and repo.obsstore and
426 'obsolete' in remote.listkeys('namespaces')):
429 'obsolete' in remote.listkeys('namespaces')):
427 rslts = []
430 rslts = []
428 remotedata = repo.listkeys('obsolete')
431 remotedata = repo.listkeys('obsolete')
429 for key in sorted(remotedata, reverse=True):
432 for key in sorted(remotedata, reverse=True):
430 # reverse sort to ensure we end with dump0
433 # reverse sort to ensure we end with dump0
431 data = remotedata[key]
434 data = remotedata[key]
432 rslts.append(remote.pushkey('obsolete', key, '', data))
435 rslts.append(remote.pushkey('obsolete', key, '', data))
433 if [r for r in rslts if not r]:
436 if [r for r in rslts if not r]:
434 msg = _('failed to push some obsolete markers!\n')
437 msg = _('failed to push some obsolete markers!\n')
435 repo.ui.warn(msg)
438 repo.ui.warn(msg)
436
439
437 def _pushbookmark(pushop):
440 def _pushbookmark(pushop):
438 """Update bookmark position on remote"""
441 """Update bookmark position on remote"""
439 ui = pushop.ui
442 ui = pushop.ui
440 repo = pushop.repo.unfiltered()
443 repo = pushop.repo.unfiltered()
441 remote = pushop.remote
444 remote = pushop.remote
442 ui.debug("checking for updated bookmarks\n")
445 ui.debug("checking for updated bookmarks\n")
443 revnums = map(repo.changelog.rev, pushop.revs or [])
446 revnums = map(repo.changelog.rev, pushop.revs or [])
444 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
447 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
445 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
448 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
446 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
449 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
447 srchex=hex)
450 srchex=hex)
448
451
449 for b, scid, dcid in advsrc:
452 for b, scid, dcid in advsrc:
450 if ancestors and repo[scid].rev() not in ancestors:
453 if ancestors and repo[scid].rev() not in ancestors:
451 continue
454 continue
452 if remote.pushkey('bookmarks', b, dcid, scid):
455 if remote.pushkey('bookmarks', b, dcid, scid):
453 ui.status(_("updating bookmark %s\n") % b)
456 ui.status(_("updating bookmark %s\n") % b)
454 else:
457 else:
455 ui.warn(_('updating bookmark %s failed!\n') % b)
458 ui.warn(_('updating bookmark %s failed!\n') % b)
456
459
457 class pulloperation(object):
460 class pulloperation(object):
458 """A object that represent a single pull operation
461 """A object that represent a single pull operation
459
462
460 It purpose is to carry push related state and very common operation.
463 It purpose is to carry push related state and very common operation.
461
464
462 A new should be created at the beginning of each pull and discarded
465 A new should be created at the beginning of each pull and discarded
463 afterward.
466 afterward.
464 """
467 """
465
468
466 def __init__(self, repo, remote, heads=None, force=False):
469 def __init__(self, repo, remote, heads=None, force=False):
467 # repo we pull into
470 # repo we pull into
468 self.repo = repo
471 self.repo = repo
469 # repo we pull from
472 # repo we pull from
470 self.remote = remote
473 self.remote = remote
471 # revision we try to pull (None is "all")
474 # revision we try to pull (None is "all")
472 self.heads = heads
475 self.heads = heads
473 # do we force pull?
476 # do we force pull?
474 self.force = force
477 self.force = force
475 # the name the pull transaction
478 # the name the pull transaction
476 self._trname = 'pull\n' + util.hidepassword(remote.url())
479 self._trname = 'pull\n' + util.hidepassword(remote.url())
477 # hold the transaction once created
480 # hold the transaction once created
478 self._tr = None
481 self._tr = None
479 # set of common changeset between local and remote before pull
482 # set of common changeset between local and remote before pull
480 self.common = None
483 self.common = None
481 # set of pulled head
484 # set of pulled head
482 self.rheads = None
485 self.rheads = None
483 # list of missing changeset to fetch remotely
486 # list of missing changeset to fetch remotely
484 self.fetch = None
487 self.fetch = None
485 # result of changegroup pulling (used as return code by pull)
488 # result of changegroup pulling (used as return code by pull)
486 self.cgresult = None
489 self.cgresult = None
487 # list of step remaining todo (related to future bundle2 usage)
490 # list of step remaining todo (related to future bundle2 usage)
488 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
491 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
489
492
490 @util.propertycache
493 @util.propertycache
491 def pulledsubset(self):
494 def pulledsubset(self):
492 """heads of the set of changeset target by the pull"""
495 """heads of the set of changeset target by the pull"""
493 # compute target subset
496 # compute target subset
494 if self.heads is None:
497 if self.heads is None:
495 # We pulled every thing possible
498 # We pulled every thing possible
496 # sync on everything common
499 # sync on everything common
497 c = set(self.common)
500 c = set(self.common)
498 ret = list(self.common)
501 ret = list(self.common)
499 for n in self.rheads:
502 for n in self.rheads:
500 if n not in c:
503 if n not in c:
501 ret.append(n)
504 ret.append(n)
502 return ret
505 return ret
503 else:
506 else:
504 # We pulled a specific subset
507 # We pulled a specific subset
505 # sync on this subset
508 # sync on this subset
506 return self.heads
509 return self.heads
507
510
508 def gettransaction(self):
511 def gettransaction(self):
509 """get appropriate pull transaction, creating it if needed"""
512 """get appropriate pull transaction, creating it if needed"""
510 if self._tr is None:
513 if self._tr is None:
511 self._tr = self.repo.transaction(self._trname)
514 self._tr = self.repo.transaction(self._trname)
512 return self._tr
515 return self._tr
513
516
514 def closetransaction(self):
517 def closetransaction(self):
515 """close transaction if created"""
518 """close transaction if created"""
516 if self._tr is not None:
519 if self._tr is not None:
517 self._tr.close()
520 self._tr.close()
518
521
519 def releasetransaction(self):
522 def releasetransaction(self):
520 """release transaction if created"""
523 """release transaction if created"""
521 if self._tr is not None:
524 if self._tr is not None:
522 self._tr.release()
525 self._tr.release()
523
526
524 def pull(repo, remote, heads=None, force=False):
527 def pull(repo, remote, heads=None, force=False):
525 pullop = pulloperation(repo, remote, heads, force)
528 pullop = pulloperation(repo, remote, heads, force)
526 if pullop.remote.local():
529 if pullop.remote.local():
527 missing = set(pullop.remote.requirements) - pullop.repo.supported
530 missing = set(pullop.remote.requirements) - pullop.repo.supported
528 if missing:
531 if missing:
529 msg = _("required features are not"
532 msg = _("required features are not"
530 " supported in the destination:"
533 " supported in the destination:"
531 " %s") % (', '.join(sorted(missing)))
534 " %s") % (', '.join(sorted(missing)))
532 raise util.Abort(msg)
535 raise util.Abort(msg)
533
536
534 lock = pullop.repo.lock()
537 lock = pullop.repo.lock()
535 try:
538 try:
536 _pulldiscovery(pullop)
539 _pulldiscovery(pullop)
537 if (pullop.repo.ui.configbool('server', 'bundle2', False)
540 if (pullop.repo.ui.configbool('server', 'bundle2', False)
538 and pullop.remote.capable('bundle2-exp')):
541 and pullop.remote.capable('bundle2-exp')):
539 _pullbundle2(pullop)
542 _pullbundle2(pullop)
540 if 'changegroup' in pullop.todosteps:
543 if 'changegroup' in pullop.todosteps:
541 _pullchangeset(pullop)
544 _pullchangeset(pullop)
542 if 'phases' in pullop.todosteps:
545 if 'phases' in pullop.todosteps:
543 _pullphase(pullop)
546 _pullphase(pullop)
544 if 'obsmarkers' in pullop.todosteps:
547 if 'obsmarkers' in pullop.todosteps:
545 _pullobsolete(pullop)
548 _pullobsolete(pullop)
546 pullop.closetransaction()
549 pullop.closetransaction()
547 finally:
550 finally:
548 pullop.releasetransaction()
551 pullop.releasetransaction()
549 lock.release()
552 lock.release()
550
553
551 return pullop.cgresult
554 return pullop.cgresult
552
555
553 def _pulldiscovery(pullop):
556 def _pulldiscovery(pullop):
554 """discovery phase for the pull
557 """discovery phase for the pull
555
558
556 Current handle changeset discovery only, will change handle all discovery
559 Current handle changeset discovery only, will change handle all discovery
557 at some point."""
560 at some point."""
558 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
561 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
559 pullop.remote,
562 pullop.remote,
560 heads=pullop.heads,
563 heads=pullop.heads,
561 force=pullop.force)
564 force=pullop.force)
562 pullop.common, pullop.fetch, pullop.rheads = tmp
565 pullop.common, pullop.fetch, pullop.rheads = tmp
563
566
564 def _pullbundle2(pullop):
567 def _pullbundle2(pullop):
565 """pull data using bundle2
568 """pull data using bundle2
566
569
567 For now, the only supported data are changegroup."""
570 For now, the only supported data are changegroup."""
568 kwargs = {'bundlecaps': set(['HG2X'])}
571 kwargs = {'bundlecaps': set(['HG2X'])}
569 capsblob = bundle2.encodecaps(pullop.repo.bundle2caps)
572 capsblob = bundle2.encodecaps(pullop.repo.bundle2caps)
570 kwargs['bundlecaps'].add('bundle2=' + urllib.quote(capsblob))
573 kwargs['bundlecaps'].add('bundle2=' + urllib.quote(capsblob))
571 # pulling changegroup
574 # pulling changegroup
572 pullop.todosteps.remove('changegroup')
575 pullop.todosteps.remove('changegroup')
573 if not pullop.fetch:
576 if not pullop.fetch:
574 pullop.repo.ui.status(_("no changes found\n"))
577 pullop.repo.ui.status(_("no changes found\n"))
575 pullop.cgresult = 0
578 pullop.cgresult = 0
576 else:
579 else:
577 kwargs['common'] = pullop.common
580 kwargs['common'] = pullop.common
578 kwargs['heads'] = pullop.heads or pullop.rheads
581 kwargs['heads'] = pullop.heads or pullop.rheads
579 if pullop.heads is None and list(pullop.common) == [nullid]:
582 if pullop.heads is None and list(pullop.common) == [nullid]:
580 pullop.repo.ui.status(_("requesting all changes\n"))
583 pullop.repo.ui.status(_("requesting all changes\n"))
581 _pullbundle2extraprepare(pullop, kwargs)
584 _pullbundle2extraprepare(pullop, kwargs)
582 if kwargs.keys() == ['format']:
585 if kwargs.keys() == ['format']:
583 return # nothing to pull
586 return # nothing to pull
584 bundle = pullop.remote.getbundle('pull', **kwargs)
587 bundle = pullop.remote.getbundle('pull', **kwargs)
585 try:
588 try:
586 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
589 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
587 except UnknownPartError, exc:
590 except UnknownPartError, exc:
588 raise util.Abort('missing support for %s' % exc)
591 raise util.Abort('missing support for %s' % exc)
589 assert len(op.records['changegroup']) == 1
592 assert len(op.records['changegroup']) == 1
590 pullop.cgresult = op.records['changegroup'][0]['return']
593 pullop.cgresult = op.records['changegroup'][0]['return']
591
594
592 def _pullbundle2extraprepare(pullop, kwargs):
595 def _pullbundle2extraprepare(pullop, kwargs):
593 """hook function so that extensions can extend the getbundle call"""
596 """hook function so that extensions can extend the getbundle call"""
594 pass
597 pass
595
598
596 def _pullchangeset(pullop):
599 def _pullchangeset(pullop):
597 """pull changeset from unbundle into the local repo"""
600 """pull changeset from unbundle into the local repo"""
598 # We delay the open of the transaction as late as possible so we
601 # We delay the open of the transaction as late as possible so we
599 # don't open transaction for nothing or you break future useful
602 # don't open transaction for nothing or you break future useful
600 # rollback call
603 # rollback call
601 pullop.todosteps.remove('changegroup')
604 pullop.todosteps.remove('changegroup')
602 if not pullop.fetch:
605 if not pullop.fetch:
603 pullop.repo.ui.status(_("no changes found\n"))
606 pullop.repo.ui.status(_("no changes found\n"))
604 pullop.cgresult = 0
607 pullop.cgresult = 0
605 return
608 return
606 pullop.gettransaction()
609 pullop.gettransaction()
607 if pullop.heads is None and list(pullop.common) == [nullid]:
610 if pullop.heads is None and list(pullop.common) == [nullid]:
608 pullop.repo.ui.status(_("requesting all changes\n"))
611 pullop.repo.ui.status(_("requesting all changes\n"))
609 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
612 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
610 # issue1320, avoid a race if remote changed after discovery
613 # issue1320, avoid a race if remote changed after discovery
611 pullop.heads = pullop.rheads
614 pullop.heads = pullop.rheads
612
615
613 if pullop.remote.capable('getbundle'):
616 if pullop.remote.capable('getbundle'):
614 # TODO: get bundlecaps from remote
617 # TODO: get bundlecaps from remote
615 cg = pullop.remote.getbundle('pull', common=pullop.common,
618 cg = pullop.remote.getbundle('pull', common=pullop.common,
616 heads=pullop.heads or pullop.rheads)
619 heads=pullop.heads or pullop.rheads)
617 elif pullop.heads is None:
620 elif pullop.heads is None:
618 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
621 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
619 elif not pullop.remote.capable('changegroupsubset'):
622 elif not pullop.remote.capable('changegroupsubset'):
620 raise util.Abort(_("partial pull cannot be done because "
623 raise util.Abort(_("partial pull cannot be done because "
621 "other repository doesn't support "
624 "other repository doesn't support "
622 "changegroupsubset."))
625 "changegroupsubset."))
623 else:
626 else:
624 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
627 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
625 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
628 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
626 pullop.remote.url())
629 pullop.remote.url())
627
630
628 def _pullphase(pullop):
631 def _pullphase(pullop):
629 # Get remote phases data from remote
632 # Get remote phases data from remote
630 pullop.todosteps.remove('phases')
633 pullop.todosteps.remove('phases')
631 remotephases = pullop.remote.listkeys('phases')
634 remotephases = pullop.remote.listkeys('phases')
632 publishing = bool(remotephases.get('publishing', False))
635 publishing = bool(remotephases.get('publishing', False))
633 if remotephases and not publishing:
636 if remotephases and not publishing:
634 # remote is new and unpublishing
637 # remote is new and unpublishing
635 pheads, _dr = phases.analyzeremotephases(pullop.repo,
638 pheads, _dr = phases.analyzeremotephases(pullop.repo,
636 pullop.pulledsubset,
639 pullop.pulledsubset,
637 remotephases)
640 remotephases)
638 phases.advanceboundary(pullop.repo, phases.public, pheads)
641 phases.advanceboundary(pullop.repo, phases.public, pheads)
639 phases.advanceboundary(pullop.repo, phases.draft,
642 phases.advanceboundary(pullop.repo, phases.draft,
640 pullop.pulledsubset)
643 pullop.pulledsubset)
641 else:
644 else:
642 # Remote is old or publishing all common changesets
645 # Remote is old or publishing all common changesets
643 # should be seen as public
646 # should be seen as public
644 phases.advanceboundary(pullop.repo, phases.public,
647 phases.advanceboundary(pullop.repo, phases.public,
645 pullop.pulledsubset)
648 pullop.pulledsubset)
646
649
647 def _pullobsolete(pullop):
650 def _pullobsolete(pullop):
648 """utility function to pull obsolete markers from a remote
651 """utility function to pull obsolete markers from a remote
649
652
650 The `gettransaction` is function that return the pull transaction, creating
653 The `gettransaction` is function that return the pull transaction, creating
651 one if necessary. We return the transaction to inform the calling code that
654 one if necessary. We return the transaction to inform the calling code that
652 a new transaction have been created (when applicable).
655 a new transaction have been created (when applicable).
653
656
654 Exists mostly to allow overriding for experimentation purpose"""
657 Exists mostly to allow overriding for experimentation purpose"""
655 pullop.todosteps.remove('obsmarkers')
658 pullop.todosteps.remove('obsmarkers')
656 tr = None
659 tr = None
657 if obsolete._enabled:
660 if obsolete._enabled:
658 pullop.repo.ui.debug('fetching remote obsolete markers\n')
661 pullop.repo.ui.debug('fetching remote obsolete markers\n')
659 remoteobs = pullop.remote.listkeys('obsolete')
662 remoteobs = pullop.remote.listkeys('obsolete')
660 if 'dump0' in remoteobs:
663 if 'dump0' in remoteobs:
661 tr = pullop.gettransaction()
664 tr = pullop.gettransaction()
662 for key in sorted(remoteobs, reverse=True):
665 for key in sorted(remoteobs, reverse=True):
663 if key.startswith('dump'):
666 if key.startswith('dump'):
664 data = base85.b85decode(remoteobs[key])
667 data = base85.b85decode(remoteobs[key])
665 pullop.repo.obsstore.mergemarkers(tr, data)
668 pullop.repo.obsstore.mergemarkers(tr, data)
666 pullop.repo.invalidatevolatilesets()
669 pullop.repo.invalidatevolatilesets()
667 return tr
670 return tr
668
671
669 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
672 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
670 **kwargs):
673 **kwargs):
671 """return a full bundle (with potentially multiple kind of parts)
674 """return a full bundle (with potentially multiple kind of parts)
672
675
673 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
676 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
674 passed. For now, the bundle can contain only changegroup, but this will
677 passed. For now, the bundle can contain only changegroup, but this will
675 changes when more part type will be available for bundle2.
678 changes when more part type will be available for bundle2.
676
679
677 This is different from changegroup.getbundle that only returns an HG10
680 This is different from changegroup.getbundle that only returns an HG10
678 changegroup bundle. They may eventually get reunited in the future when we
681 changegroup bundle. They may eventually get reunited in the future when we
679 have a clearer idea of the API we what to query different data.
682 have a clearer idea of the API we what to query different data.
680
683
681 The implementation is at a very early stage and will get massive rework
684 The implementation is at a very early stage and will get massive rework
682 when the API of bundle is refined.
685 when the API of bundle is refined.
683 """
686 """
684 # build bundle here.
687 # build bundle here.
685 cg = changegroup.getbundle(repo, source, heads=heads,
688 cg = changegroup.getbundle(repo, source, heads=heads,
686 common=common, bundlecaps=bundlecaps)
689 common=common, bundlecaps=bundlecaps)
687 if bundlecaps is None or 'HG2X' not in bundlecaps:
690 if bundlecaps is None or 'HG2X' not in bundlecaps:
688 return cg
691 return cg
689 # very crude first implementation,
692 # very crude first implementation,
690 # the bundle API will change and the generation will be done lazily.
693 # the bundle API will change and the generation will be done lazily.
691 b2caps = {}
694 b2caps = {}
692 for bcaps in bundlecaps:
695 for bcaps in bundlecaps:
693 if bcaps.startswith('bundle2='):
696 if bcaps.startswith('bundle2='):
694 blob = urllib.unquote(bcaps[len('bundle2='):])
697 blob = urllib.unquote(bcaps[len('bundle2='):])
695 b2caps.update(bundle2.decodecaps(blob))
698 b2caps.update(bundle2.decodecaps(blob))
696 bundler = bundle2.bundle20(repo.ui, b2caps)
699 bundler = bundle2.bundle20(repo.ui, b2caps)
697 part = bundle2.bundlepart('b2x:changegroup', data=cg.getchunks())
700 part = bundle2.bundlepart('b2x:changegroup', data=cg.getchunks())
698 bundler.addpart(part)
701 bundler.addpart(part)
699 _getbundleextrapart(bundler, repo, source, heads=None, common=None,
702 _getbundleextrapart(bundler, repo, source, heads=None, common=None,
700 bundlecaps=None, **kwargs)
703 bundlecaps=None, **kwargs)
701 return util.chunkbuffer(bundler.getchunks())
704 return util.chunkbuffer(bundler.getchunks())
702
705
703 def _getbundleextrapart(bundler, repo, source, heads=None, common=None,
706 def _getbundleextrapart(bundler, repo, source, heads=None, common=None,
704 bundlecaps=None, **kwargs):
707 bundlecaps=None, **kwargs):
705 """hook function to let extensions add parts to the requested bundle"""
708 """hook function to let extensions add parts to the requested bundle"""
706 pass
709 pass
707
710
708 class PushRaced(RuntimeError):
711 class PushRaced(RuntimeError):
709 """An exception raised during unbundling that indicate a push race"""
712 """An exception raised during unbundling that indicate a push race"""
710
713
711 def check_heads(repo, their_heads, context):
714 def check_heads(repo, their_heads, context):
712 """check if the heads of a repo have been modified
715 """check if the heads of a repo have been modified
713
716
714 Used by peer for unbundling.
717 Used by peer for unbundling.
715 """
718 """
716 heads = repo.heads()
719 heads = repo.heads()
717 heads_hash = util.sha1(''.join(sorted(heads))).digest()
720 heads_hash = util.sha1(''.join(sorted(heads))).digest()
718 if not (their_heads == ['force'] or their_heads == heads or
721 if not (their_heads == ['force'] or their_heads == heads or
719 their_heads == ['hashed', heads_hash]):
722 their_heads == ['hashed', heads_hash]):
720 # someone else committed/pushed/unbundled while we
723 # someone else committed/pushed/unbundled while we
721 # were transferring data
724 # were transferring data
722 raise PushRaced('repository changed while %s - '
725 raise PushRaced('repository changed while %s - '
723 'please try again' % context)
726 'please try again' % context)
724
727
725 def unbundle(repo, cg, heads, source, url):
728 def unbundle(repo, cg, heads, source, url):
726 """Apply a bundle to a repo.
729 """Apply a bundle to a repo.
727
730
728 this function makes sure the repo is locked during the application and have
731 this function makes sure the repo is locked during the application and have
729 mechanism to check that no push race occurred between the creation of the
732 mechanism to check that no push race occurred between the creation of the
730 bundle and its application.
733 bundle and its application.
731
734
732 If the push was raced as PushRaced exception is raised."""
735 If the push was raced as PushRaced exception is raised."""
733 r = 0
736 r = 0
734 # need a transaction when processing a bundle2 stream
737 # need a transaction when processing a bundle2 stream
735 tr = None
738 tr = None
736 lock = repo.lock()
739 lock = repo.lock()
737 try:
740 try:
738 check_heads(repo, heads, 'uploading changes')
741 check_heads(repo, heads, 'uploading changes')
739 # push can proceed
742 # push can proceed
740 if util.safehasattr(cg, 'params'):
743 if util.safehasattr(cg, 'params'):
741 tr = repo.transaction('unbundle')
744 tr = repo.transaction('unbundle')
742 tr.hookargs['bundle2-exp'] = '1'
745 tr.hookargs['bundle2-exp'] = '1'
743 r = bundle2.processbundle(repo, cg, lambda: tr).reply
746 r = bundle2.processbundle(repo, cg, lambda: tr).reply
744 cl = repo.unfiltered().changelog
747 cl = repo.unfiltered().changelog
745 p = cl.writepending() and repo.root or ""
748 p = cl.writepending() and repo.root or ""
746 repo.hook('b2x-pretransactionclose', throw=True, source=source,
749 repo.hook('b2x-pretransactionclose', throw=True, source=source,
747 url=url, pending=p, **tr.hookargs)
750 url=url, pending=p, **tr.hookargs)
748 tr.close()
751 tr.close()
749 repo.hook('b2x-transactionclose', source=source, url=url,
752 repo.hook('b2x-transactionclose', source=source, url=url,
750 **tr.hookargs)
753 **tr.hookargs)
751 else:
754 else:
752 r = changegroup.addchangegroup(repo, cg, source, url)
755 r = changegroup.addchangegroup(repo, cg, source, url)
753 finally:
756 finally:
754 if tr is not None:
757 if tr is not None:
755 tr.release()
758 tr.release()
756 lock.release()
759 lock.release()
757 return r
760 return r
General Comments 0
You need to be logged in to leave comments. Login now