##// END OF EJS Templates
push: add a ``pushop.stepsdone`` attribute...
Pierre-Yves David -
r21901:8612c4ab default
parent child Browse files
Show More
@@ -1,803 +1,806 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno, urllib
10 import errno, urllib
11 import util, scmutil, changegroup, base85, error
11 import util, scmutil, changegroup, base85, error
12 import discovery, phases, obsolete, bookmarks, bundle2, pushkey
12 import discovery, phases, obsolete, bookmarks, bundle2, pushkey
13
13
14 def readbundle(ui, fh, fname, vfs=None):
14 def readbundle(ui, fh, fname, vfs=None):
15 header = changegroup.readexactly(fh, 4)
15 header = changegroup.readexactly(fh, 4)
16
16
17 alg = None
17 alg = None
18 if not fname:
18 if not fname:
19 fname = "stream"
19 fname = "stream"
20 if not header.startswith('HG') and header.startswith('\0'):
20 if not header.startswith('HG') and header.startswith('\0'):
21 fh = changegroup.headerlessfixup(fh, header)
21 fh = changegroup.headerlessfixup(fh, header)
22 header = "HG10"
22 header = "HG10"
23 alg = 'UN'
23 alg = 'UN'
24 elif vfs:
24 elif vfs:
25 fname = vfs.join(fname)
25 fname = vfs.join(fname)
26
26
27 magic, version = header[0:2], header[2:4]
27 magic, version = header[0:2], header[2:4]
28
28
29 if magic != 'HG':
29 if magic != 'HG':
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 if version == '10':
31 if version == '10':
32 if alg is None:
32 if alg is None:
33 alg = changegroup.readexactly(fh, 2)
33 alg = changegroup.readexactly(fh, 2)
34 return changegroup.unbundle10(fh, alg)
34 return changegroup.unbundle10(fh, alg)
35 elif version == '2X':
35 elif version == '2X':
36 return bundle2.unbundle20(ui, fh, header=magic + version)
36 return bundle2.unbundle20(ui, fh, header=magic + version)
37 else:
37 else:
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39
39
40
40
41 class pushoperation(object):
41 class pushoperation(object):
42 """A object that represent a single push operation
42 """A object that represent a single push operation
43
43
44 It purpose is to carry push related state and very common operation.
44 It purpose is to carry push related state and very common operation.
45
45
46 A new should be created at the beginning of each push and discarded
46 A new should be created at the beginning of each push and discarded
47 afterward.
47 afterward.
48 """
48 """
49
49
50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
51 # repo we push from
51 # repo we push from
52 self.repo = repo
52 self.repo = repo
53 self.ui = repo.ui
53 self.ui = repo.ui
54 # repo we push to
54 # repo we push to
55 self.remote = remote
55 self.remote = remote
56 # force option provided
56 # force option provided
57 self.force = force
57 self.force = force
58 # revs to be pushed (None is "all")
58 # revs to be pushed (None is "all")
59 self.revs = revs
59 self.revs = revs
60 # allow push of new branch
60 # allow push of new branch
61 self.newbranch = newbranch
61 self.newbranch = newbranch
62 # did a local lock get acquired?
62 # did a local lock get acquired?
63 self.locallocked = None
63 self.locallocked = None
64 # step already performed
65 # (used to check what steps have been already performed through bundle2)
66 self.stepsdone = set()
64 # Integer version of the push result
67 # Integer version of the push result
65 # - None means nothing to push
68 # - None means nothing to push
66 # - 0 means HTTP error
69 # - 0 means HTTP error
67 # - 1 means we pushed and remote head count is unchanged *or*
70 # - 1 means we pushed and remote head count is unchanged *or*
68 # we have outgoing changesets but refused to push
71 # we have outgoing changesets but refused to push
69 # - other values as described by addchangegroup()
72 # - other values as described by addchangegroup()
70 self.ret = None
73 self.ret = None
71 # discover.outgoing object (contains common and outgoing data)
74 # discover.outgoing object (contains common and outgoing data)
72 self.outgoing = None
75 self.outgoing = None
73 # all remote heads before the push
76 # all remote heads before the push
74 self.remoteheads = None
77 self.remoteheads = None
75 # testable as a boolean indicating if any nodes are missing locally.
78 # testable as a boolean indicating if any nodes are missing locally.
76 self.incoming = None
79 self.incoming = None
77 # set of all heads common after changeset bundle push
80 # set of all heads common after changeset bundle push
78 self.commonheads = None
81 self.commonheads = None
79
82
80 def push(repo, remote, force=False, revs=None, newbranch=False):
83 def push(repo, remote, force=False, revs=None, newbranch=False):
81 '''Push outgoing changesets (limited by revs) from a local
84 '''Push outgoing changesets (limited by revs) from a local
82 repository to remote. Return an integer:
85 repository to remote. Return an integer:
83 - None means nothing to push
86 - None means nothing to push
84 - 0 means HTTP error
87 - 0 means HTTP error
85 - 1 means we pushed and remote head count is unchanged *or*
88 - 1 means we pushed and remote head count is unchanged *or*
86 we have outgoing changesets but refused to push
89 we have outgoing changesets but refused to push
87 - other values as described by addchangegroup()
90 - other values as described by addchangegroup()
88 '''
91 '''
89 pushop = pushoperation(repo, remote, force, revs, newbranch)
92 pushop = pushoperation(repo, remote, force, revs, newbranch)
90 if pushop.remote.local():
93 if pushop.remote.local():
91 missing = (set(pushop.repo.requirements)
94 missing = (set(pushop.repo.requirements)
92 - pushop.remote.local().supported)
95 - pushop.remote.local().supported)
93 if missing:
96 if missing:
94 msg = _("required features are not"
97 msg = _("required features are not"
95 " supported in the destination:"
98 " supported in the destination:"
96 " %s") % (', '.join(sorted(missing)))
99 " %s") % (', '.join(sorted(missing)))
97 raise util.Abort(msg)
100 raise util.Abort(msg)
98
101
99 # there are two ways to push to remote repo:
102 # there are two ways to push to remote repo:
100 #
103 #
101 # addchangegroup assumes local user can lock remote
104 # addchangegroup assumes local user can lock remote
102 # repo (local filesystem, old ssh servers).
105 # repo (local filesystem, old ssh servers).
103 #
106 #
104 # unbundle assumes local user cannot lock remote repo (new ssh
107 # unbundle assumes local user cannot lock remote repo (new ssh
105 # servers, http servers).
108 # servers, http servers).
106
109
107 if not pushop.remote.canpush():
110 if not pushop.remote.canpush():
108 raise util.Abort(_("destination does not support push"))
111 raise util.Abort(_("destination does not support push"))
109 # get local lock as we might write phase data
112 # get local lock as we might write phase data
110 locallock = None
113 locallock = None
111 try:
114 try:
112 locallock = pushop.repo.lock()
115 locallock = pushop.repo.lock()
113 pushop.locallocked = True
116 pushop.locallocked = True
114 except IOError, err:
117 except IOError, err:
115 pushop.locallocked = False
118 pushop.locallocked = False
116 if err.errno != errno.EACCES:
119 if err.errno != errno.EACCES:
117 raise
120 raise
118 # source repo cannot be locked.
121 # source repo cannot be locked.
119 # We do not abort the push, but just disable the local phase
122 # We do not abort the push, but just disable the local phase
120 # synchronisation.
123 # synchronisation.
121 msg = 'cannot lock source repository: %s\n' % err
124 msg = 'cannot lock source repository: %s\n' % err
122 pushop.ui.debug(msg)
125 pushop.ui.debug(msg)
123 try:
126 try:
124 pushop.repo.checkpush(pushop)
127 pushop.repo.checkpush(pushop)
125 lock = None
128 lock = None
126 unbundle = pushop.remote.capable('unbundle')
129 unbundle = pushop.remote.capable('unbundle')
127 if not unbundle:
130 if not unbundle:
128 lock = pushop.remote.lock()
131 lock = pushop.remote.lock()
129 try:
132 try:
130 _pushdiscovery(pushop)
133 _pushdiscovery(pushop)
131 if _pushcheckoutgoing(pushop):
134 if _pushcheckoutgoing(pushop):
132 pushop.repo.prepushoutgoinghooks(pushop.repo,
135 pushop.repo.prepushoutgoinghooks(pushop.repo,
133 pushop.remote,
136 pushop.remote,
134 pushop.outgoing)
137 pushop.outgoing)
135 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
138 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
136 False)
139 False)
137 and pushop.remote.capable('bundle2-exp')):
140 and pushop.remote.capable('bundle2-exp')):
138 _pushbundle2(pushop)
141 _pushbundle2(pushop)
139 else:
142 else:
140 _pushchangeset(pushop)
143 _pushchangeset(pushop)
141 _pushcomputecommonheads(pushop)
144 _pushcomputecommonheads(pushop)
142 _pushsyncphase(pushop)
145 _pushsyncphase(pushop)
143 _pushobsolete(pushop)
146 _pushobsolete(pushop)
144 finally:
147 finally:
145 if lock is not None:
148 if lock is not None:
146 lock.release()
149 lock.release()
147 finally:
150 finally:
148 if locallock is not None:
151 if locallock is not None:
149 locallock.release()
152 locallock.release()
150
153
151 _pushbookmark(pushop)
154 _pushbookmark(pushop)
152 return pushop.ret
155 return pushop.ret
153
156
154 def _pushdiscovery(pushop):
157 def _pushdiscovery(pushop):
155 # discovery
158 # discovery
156 unfi = pushop.repo.unfiltered()
159 unfi = pushop.repo.unfiltered()
157 fci = discovery.findcommonincoming
160 fci = discovery.findcommonincoming
158 commoninc = fci(unfi, pushop.remote, force=pushop.force)
161 commoninc = fci(unfi, pushop.remote, force=pushop.force)
159 common, inc, remoteheads = commoninc
162 common, inc, remoteheads = commoninc
160 fco = discovery.findcommonoutgoing
163 fco = discovery.findcommonoutgoing
161 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
164 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
162 commoninc=commoninc, force=pushop.force)
165 commoninc=commoninc, force=pushop.force)
163 pushop.outgoing = outgoing
166 pushop.outgoing = outgoing
164 pushop.remoteheads = remoteheads
167 pushop.remoteheads = remoteheads
165 pushop.incoming = inc
168 pushop.incoming = inc
166
169
167 def _pushcheckoutgoing(pushop):
170 def _pushcheckoutgoing(pushop):
168 outgoing = pushop.outgoing
171 outgoing = pushop.outgoing
169 unfi = pushop.repo.unfiltered()
172 unfi = pushop.repo.unfiltered()
170 if not outgoing.missing:
173 if not outgoing.missing:
171 # nothing to push
174 # nothing to push
172 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
175 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
173 return False
176 return False
174 # something to push
177 # something to push
175 if not pushop.force:
178 if not pushop.force:
176 # if repo.obsstore == False --> no obsolete
179 # if repo.obsstore == False --> no obsolete
177 # then, save the iteration
180 # then, save the iteration
178 if unfi.obsstore:
181 if unfi.obsstore:
179 # this message are here for 80 char limit reason
182 # this message are here for 80 char limit reason
180 mso = _("push includes obsolete changeset: %s!")
183 mso = _("push includes obsolete changeset: %s!")
181 mst = "push includes %s changeset: %s!"
184 mst = "push includes %s changeset: %s!"
182 # plain versions for i18n tool to detect them
185 # plain versions for i18n tool to detect them
183 _("push includes unstable changeset: %s!")
186 _("push includes unstable changeset: %s!")
184 _("push includes bumped changeset: %s!")
187 _("push includes bumped changeset: %s!")
185 _("push includes divergent changeset: %s!")
188 _("push includes divergent changeset: %s!")
186 # If we are to push if there is at least one
189 # If we are to push if there is at least one
187 # obsolete or unstable changeset in missing, at
190 # obsolete or unstable changeset in missing, at
188 # least one of the missinghead will be obsolete or
191 # least one of the missinghead will be obsolete or
189 # unstable. So checking heads only is ok
192 # unstable. So checking heads only is ok
190 for node in outgoing.missingheads:
193 for node in outgoing.missingheads:
191 ctx = unfi[node]
194 ctx = unfi[node]
192 if ctx.obsolete():
195 if ctx.obsolete():
193 raise util.Abort(mso % ctx)
196 raise util.Abort(mso % ctx)
194 elif ctx.troubled():
197 elif ctx.troubled():
195 raise util.Abort(_(mst)
198 raise util.Abort(_(mst)
196 % (ctx.troubles()[0],
199 % (ctx.troubles()[0],
197 ctx))
200 ctx))
198 newbm = pushop.ui.configlist('bookmarks', 'pushing')
201 newbm = pushop.ui.configlist('bookmarks', 'pushing')
199 discovery.checkheads(unfi, pushop.remote, outgoing,
202 discovery.checkheads(unfi, pushop.remote, outgoing,
200 pushop.remoteheads,
203 pushop.remoteheads,
201 pushop.newbranch,
204 pushop.newbranch,
202 bool(pushop.incoming),
205 bool(pushop.incoming),
203 newbm)
206 newbm)
204 return True
207 return True
205
208
206 def _pushb2ctx(pushop, bundler):
209 def _pushb2ctx(pushop, bundler):
207 """handle changegroup push through bundle2
210 """handle changegroup push through bundle2
208
211
209 addchangegroup result is stored in the ``pushop.ret`` attribute.
212 addchangegroup result is stored in the ``pushop.ret`` attribute.
210 """
213 """
211 # Send known heads to the server for race detection.
214 # Send known heads to the server for race detection.
212 if not pushop.force:
215 if not pushop.force:
213 bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads))
216 bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads))
214 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
217 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
215 cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks())
218 cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks())
216 def handlereply(op):
219 def handlereply(op):
217 """extract addchangroup returns from server reply"""
220 """extract addchangroup returns from server reply"""
218 cgreplies = op.records.getreplies(cgpart.id)
221 cgreplies = op.records.getreplies(cgpart.id)
219 assert len(cgreplies['changegroup']) == 1
222 assert len(cgreplies['changegroup']) == 1
220 pushop.ret = cgreplies['changegroup'][0]['return']
223 pushop.ret = cgreplies['changegroup'][0]['return']
221 return handlereply
224 return handlereply
222
225
223 def _pushbundle2(pushop):
226 def _pushbundle2(pushop):
224 """push data to the remote using bundle2
227 """push data to the remote using bundle2
225
228
226 The only currently supported type of data is changegroup but this will
229 The only currently supported type of data is changegroup but this will
227 evolve in the future."""
230 evolve in the future."""
228 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
231 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
229 # create reply capability
232 # create reply capability
230 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
233 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
231 bundler.newpart('b2x:replycaps', data=capsblob)
234 bundler.newpart('b2x:replycaps', data=capsblob)
232 extrainfo = _pushbundle2extraparts(pushop, bundler)
235 extrainfo = _pushbundle2extraparts(pushop, bundler)
233 # add the changegroup bundle
236 # add the changegroup bundle
234 cgreplyhandler = _pushb2ctx(pushop, bundler)
237 cgreplyhandler = _pushb2ctx(pushop, bundler)
235 stream = util.chunkbuffer(bundler.getchunks())
238 stream = util.chunkbuffer(bundler.getchunks())
236 try:
239 try:
237 reply = pushop.remote.unbundle(stream, ['force'], 'push')
240 reply = pushop.remote.unbundle(stream, ['force'], 'push')
238 except error.BundleValueError, exc:
241 except error.BundleValueError, exc:
239 raise util.Abort('missing support for %s' % exc)
242 raise util.Abort('missing support for %s' % exc)
240 try:
243 try:
241 op = bundle2.processbundle(pushop.repo, reply)
244 op = bundle2.processbundle(pushop.repo, reply)
242 except error.BundleValueError, exc:
245 except error.BundleValueError, exc:
243 raise util.Abort('missing support for %s' % exc)
246 raise util.Abort('missing support for %s' % exc)
244 cgreplyhandler(op)
247 cgreplyhandler(op)
245 _pushbundle2extrareply(pushop, op, extrainfo)
248 _pushbundle2extrareply(pushop, op, extrainfo)
246
249
247 def _pushbundle2extraparts(pushop, bundler):
250 def _pushbundle2extraparts(pushop, bundler):
248 """hook function to let extensions add parts
251 """hook function to let extensions add parts
249
252
250 Return a dict to let extensions pass data to the reply processing.
253 Return a dict to let extensions pass data to the reply processing.
251 """
254 """
252 return {}
255 return {}
253
256
254 def _pushbundle2extrareply(pushop, op, extrainfo):
257 def _pushbundle2extrareply(pushop, op, extrainfo):
255 """hook function to let extensions react to part replies
258 """hook function to let extensions react to part replies
256
259
257 The dict from _pushbundle2extrareply is fed to this function.
260 The dict from _pushbundle2extrareply is fed to this function.
258 """
261 """
259 pass
262 pass
260
263
261 def _pushchangeset(pushop):
264 def _pushchangeset(pushop):
262 """Make the actual push of changeset bundle to remote repo"""
265 """Make the actual push of changeset bundle to remote repo"""
263 outgoing = pushop.outgoing
266 outgoing = pushop.outgoing
264 unbundle = pushop.remote.capable('unbundle')
267 unbundle = pushop.remote.capable('unbundle')
265 # TODO: get bundlecaps from remote
268 # TODO: get bundlecaps from remote
266 bundlecaps = None
269 bundlecaps = None
267 # create a changegroup from local
270 # create a changegroup from local
268 if pushop.revs is None and not (outgoing.excluded
271 if pushop.revs is None and not (outgoing.excluded
269 or pushop.repo.changelog.filteredrevs):
272 or pushop.repo.changelog.filteredrevs):
270 # push everything,
273 # push everything,
271 # use the fast path, no race possible on push
274 # use the fast path, no race possible on push
272 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
275 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
273 cg = changegroup.getsubset(pushop.repo,
276 cg = changegroup.getsubset(pushop.repo,
274 outgoing,
277 outgoing,
275 bundler,
278 bundler,
276 'push',
279 'push',
277 fastpath=True)
280 fastpath=True)
278 else:
281 else:
279 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
282 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
280 bundlecaps)
283 bundlecaps)
281
284
282 # apply changegroup to remote
285 # apply changegroup to remote
283 if unbundle:
286 if unbundle:
284 # local repo finds heads on server, finds out what
287 # local repo finds heads on server, finds out what
285 # revs it must push. once revs transferred, if server
288 # revs it must push. once revs transferred, if server
286 # finds it has different heads (someone else won
289 # finds it has different heads (someone else won
287 # commit/push race), server aborts.
290 # commit/push race), server aborts.
288 if pushop.force:
291 if pushop.force:
289 remoteheads = ['force']
292 remoteheads = ['force']
290 else:
293 else:
291 remoteheads = pushop.remoteheads
294 remoteheads = pushop.remoteheads
292 # ssh: return remote's addchangegroup()
295 # ssh: return remote's addchangegroup()
293 # http: return remote's addchangegroup() or 0 for error
296 # http: return remote's addchangegroup() or 0 for error
294 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
297 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
295 pushop.repo.url())
298 pushop.repo.url())
296 else:
299 else:
297 # we return an integer indicating remote head count
300 # we return an integer indicating remote head count
298 # change
301 # change
299 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
302 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
300
303
301 def _pushcomputecommonheads(pushop):
304 def _pushcomputecommonheads(pushop):
302 unfi = pushop.repo.unfiltered()
305 unfi = pushop.repo.unfiltered()
303 if pushop.ret:
306 if pushop.ret:
304 # push succeed, synchronize target of the push
307 # push succeed, synchronize target of the push
305 cheads = pushop.outgoing.missingheads
308 cheads = pushop.outgoing.missingheads
306 elif pushop.revs is None:
309 elif pushop.revs is None:
307 # All out push fails. synchronize all common
310 # All out push fails. synchronize all common
308 cheads = pushop.outgoing.commonheads
311 cheads = pushop.outgoing.commonheads
309 else:
312 else:
310 # I want cheads = heads(::missingheads and ::commonheads)
313 # I want cheads = heads(::missingheads and ::commonheads)
311 # (missingheads is revs with secret changeset filtered out)
314 # (missingheads is revs with secret changeset filtered out)
312 #
315 #
313 # This can be expressed as:
316 # This can be expressed as:
314 # cheads = ( (missingheads and ::commonheads)
317 # cheads = ( (missingheads and ::commonheads)
315 # + (commonheads and ::missingheads))"
318 # + (commonheads and ::missingheads))"
316 # )
319 # )
317 #
320 #
318 # while trying to push we already computed the following:
321 # while trying to push we already computed the following:
319 # common = (::commonheads)
322 # common = (::commonheads)
320 # missing = ((commonheads::missingheads) - commonheads)
323 # missing = ((commonheads::missingheads) - commonheads)
321 #
324 #
322 # We can pick:
325 # We can pick:
323 # * missingheads part of common (::commonheads)
326 # * missingheads part of common (::commonheads)
324 common = set(pushop.outgoing.common)
327 common = set(pushop.outgoing.common)
325 nm = pushop.repo.changelog.nodemap
328 nm = pushop.repo.changelog.nodemap
326 cheads = [node for node in pushop.revs if nm[node] in common]
329 cheads = [node for node in pushop.revs if nm[node] in common]
327 # and
330 # and
328 # * commonheads parents on missing
331 # * commonheads parents on missing
329 revset = unfi.set('%ln and parents(roots(%ln))',
332 revset = unfi.set('%ln and parents(roots(%ln))',
330 pushop.outgoing.commonheads,
333 pushop.outgoing.commonheads,
331 pushop.outgoing.missing)
334 pushop.outgoing.missing)
332 cheads.extend(c.node() for c in revset)
335 cheads.extend(c.node() for c in revset)
333 pushop.commonheads = cheads
336 pushop.commonheads = cheads
334
337
335 def _pushsyncphase(pushop):
338 def _pushsyncphase(pushop):
336 """synchronise phase information locally and remotely"""
339 """synchronise phase information locally and remotely"""
337 unfi = pushop.repo.unfiltered()
340 unfi = pushop.repo.unfiltered()
338 cheads = pushop.commonheads
341 cheads = pushop.commonheads
339 # even when we don't push, exchanging phase data is useful
342 # even when we don't push, exchanging phase data is useful
340 remotephases = pushop.remote.listkeys('phases')
343 remotephases = pushop.remote.listkeys('phases')
341 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
344 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
342 and remotephases # server supports phases
345 and remotephases # server supports phases
343 and pushop.ret is None # nothing was pushed
346 and pushop.ret is None # nothing was pushed
344 and remotephases.get('publishing', False)):
347 and remotephases.get('publishing', False)):
345 # When:
348 # When:
346 # - this is a subrepo push
349 # - this is a subrepo push
347 # - and remote support phase
350 # - and remote support phase
348 # - and no changeset was pushed
351 # - and no changeset was pushed
349 # - and remote is publishing
352 # - and remote is publishing
350 # We may be in issue 3871 case!
353 # We may be in issue 3871 case!
351 # We drop the possible phase synchronisation done by
354 # We drop the possible phase synchronisation done by
352 # courtesy to publish changesets possibly locally draft
355 # courtesy to publish changesets possibly locally draft
353 # on the remote.
356 # on the remote.
354 remotephases = {'publishing': 'True'}
357 remotephases = {'publishing': 'True'}
355 if not remotephases: # old server or public only reply from non-publishing
358 if not remotephases: # old server or public only reply from non-publishing
356 _localphasemove(pushop, cheads)
359 _localphasemove(pushop, cheads)
357 # don't push any phase data as there is nothing to push
360 # don't push any phase data as there is nothing to push
358 else:
361 else:
359 ana = phases.analyzeremotephases(pushop.repo, cheads,
362 ana = phases.analyzeremotephases(pushop.repo, cheads,
360 remotephases)
363 remotephases)
361 pheads, droots = ana
364 pheads, droots = ana
362 ### Apply remote phase on local
365 ### Apply remote phase on local
363 if remotephases.get('publishing', False):
366 if remotephases.get('publishing', False):
364 _localphasemove(pushop, cheads)
367 _localphasemove(pushop, cheads)
365 else: # publish = False
368 else: # publish = False
366 _localphasemove(pushop, pheads)
369 _localphasemove(pushop, pheads)
367 _localphasemove(pushop, cheads, phases.draft)
370 _localphasemove(pushop, cheads, phases.draft)
368 ### Apply local phase on remote
371 ### Apply local phase on remote
369
372
370 # Get the list of all revs draft on remote by public here.
373 # Get the list of all revs draft on remote by public here.
371 # XXX Beware that revset break if droots is not strictly
374 # XXX Beware that revset break if droots is not strictly
372 # XXX root we may want to ensure it is but it is costly
375 # XXX root we may want to ensure it is but it is costly
373 outdated = unfi.set('heads((%ln::%ln) and public())',
376 outdated = unfi.set('heads((%ln::%ln) and public())',
374 droots, cheads)
377 droots, cheads)
375
378
376 b2caps = bundle2.bundle2caps(pushop.remote)
379 b2caps = bundle2.bundle2caps(pushop.remote)
377 if 'b2x:pushkey' in b2caps:
380 if 'b2x:pushkey' in b2caps:
378 # server supports bundle2, let's do a batched push through it
381 # server supports bundle2, let's do a batched push through it
379 #
382 #
380 # This will eventually be unified with the changesets bundle2 push
383 # This will eventually be unified with the changesets bundle2 push
381 bundler = bundle2.bundle20(pushop.ui, b2caps)
384 bundler = bundle2.bundle20(pushop.ui, b2caps)
382 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
385 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
383 bundler.newpart('b2x:replycaps', data=capsblob)
386 bundler.newpart('b2x:replycaps', data=capsblob)
384 part2node = []
387 part2node = []
385 enc = pushkey.encode
388 enc = pushkey.encode
386 for newremotehead in outdated:
389 for newremotehead in outdated:
387 part = bundler.newpart('b2x:pushkey')
390 part = bundler.newpart('b2x:pushkey')
388 part.addparam('namespace', enc('phases'))
391 part.addparam('namespace', enc('phases'))
389 part.addparam('key', enc(newremotehead.hex()))
392 part.addparam('key', enc(newremotehead.hex()))
390 part.addparam('old', enc(str(phases.draft)))
393 part.addparam('old', enc(str(phases.draft)))
391 part.addparam('new', enc(str(phases.public)))
394 part.addparam('new', enc(str(phases.public)))
392 part2node.append((part.id, newremotehead))
395 part2node.append((part.id, newremotehead))
393 stream = util.chunkbuffer(bundler.getchunks())
396 stream = util.chunkbuffer(bundler.getchunks())
394 try:
397 try:
395 reply = pushop.remote.unbundle(stream, ['force'], 'push')
398 reply = pushop.remote.unbundle(stream, ['force'], 'push')
396 op = bundle2.processbundle(pushop.repo, reply)
399 op = bundle2.processbundle(pushop.repo, reply)
397 except error.BundleValueError, exc:
400 except error.BundleValueError, exc:
398 raise util.Abort('missing support for %s' % exc)
401 raise util.Abort('missing support for %s' % exc)
399 for partid, node in part2node:
402 for partid, node in part2node:
400 partrep = op.records.getreplies(partid)
403 partrep = op.records.getreplies(partid)
401 results = partrep['pushkey']
404 results = partrep['pushkey']
402 assert len(results) <= 1
405 assert len(results) <= 1
403 msg = None
406 msg = None
404 if not results:
407 if not results:
405 msg = _('server ignored update of %s to public!\n') % node
408 msg = _('server ignored update of %s to public!\n') % node
406 elif not int(results[0]['return']):
409 elif not int(results[0]['return']):
407 msg = _('updating %s to public failed!\n') % node
410 msg = _('updating %s to public failed!\n') % node
408 if msg is not None:
411 if msg is not None:
409 pushop.ui.warn(msg)
412 pushop.ui.warn(msg)
410
413
411 else:
414 else:
412 # fallback to independant pushkey command
415 # fallback to independant pushkey command
413 for newremotehead in outdated:
416 for newremotehead in outdated:
414 r = pushop.remote.pushkey('phases',
417 r = pushop.remote.pushkey('phases',
415 newremotehead.hex(),
418 newremotehead.hex(),
416 str(phases.draft),
419 str(phases.draft),
417 str(phases.public))
420 str(phases.public))
418 if not r:
421 if not r:
419 pushop.ui.warn(_('updating %s to public failed!\n')
422 pushop.ui.warn(_('updating %s to public failed!\n')
420 % newremotehead)
423 % newremotehead)
421
424
422 def _localphasemove(pushop, nodes, phase=phases.public):
425 def _localphasemove(pushop, nodes, phase=phases.public):
423 """move <nodes> to <phase> in the local source repo"""
426 """move <nodes> to <phase> in the local source repo"""
424 if pushop.locallocked:
427 if pushop.locallocked:
425 phases.advanceboundary(pushop.repo, phase, nodes)
428 phases.advanceboundary(pushop.repo, phase, nodes)
426 else:
429 else:
427 # repo is not locked, do not change any phases!
430 # repo is not locked, do not change any phases!
428 # Informs the user that phases should have been moved when
431 # Informs the user that phases should have been moved when
429 # applicable.
432 # applicable.
430 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
433 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
431 phasestr = phases.phasenames[phase]
434 phasestr = phases.phasenames[phase]
432 if actualmoves:
435 if actualmoves:
433 pushop.ui.status(_('cannot lock source repo, skipping '
436 pushop.ui.status(_('cannot lock source repo, skipping '
434 'local %s phase update\n') % phasestr)
437 'local %s phase update\n') % phasestr)
435
438
436 def _pushobsolete(pushop):
439 def _pushobsolete(pushop):
437 """utility function to push obsolete markers to a remote"""
440 """utility function to push obsolete markers to a remote"""
438 pushop.ui.debug('try to push obsolete markers to remote\n')
441 pushop.ui.debug('try to push obsolete markers to remote\n')
439 repo = pushop.repo
442 repo = pushop.repo
440 remote = pushop.remote
443 remote = pushop.remote
441 if (obsolete._enabled and repo.obsstore and
444 if (obsolete._enabled and repo.obsstore and
442 'obsolete' in remote.listkeys('namespaces')):
445 'obsolete' in remote.listkeys('namespaces')):
443 rslts = []
446 rslts = []
444 remotedata = repo.listkeys('obsolete')
447 remotedata = repo.listkeys('obsolete')
445 for key in sorted(remotedata, reverse=True):
448 for key in sorted(remotedata, reverse=True):
446 # reverse sort to ensure we end with dump0
449 # reverse sort to ensure we end with dump0
447 data = remotedata[key]
450 data = remotedata[key]
448 rslts.append(remote.pushkey('obsolete', key, '', data))
451 rslts.append(remote.pushkey('obsolete', key, '', data))
449 if [r for r in rslts if not r]:
452 if [r for r in rslts if not r]:
450 msg = _('failed to push some obsolete markers!\n')
453 msg = _('failed to push some obsolete markers!\n')
451 repo.ui.warn(msg)
454 repo.ui.warn(msg)
452
455
453 def _pushbookmark(pushop):
456 def _pushbookmark(pushop):
454 """Update bookmark position on remote"""
457 """Update bookmark position on remote"""
455 ui = pushop.ui
458 ui = pushop.ui
456 repo = pushop.repo.unfiltered()
459 repo = pushop.repo.unfiltered()
457 remote = pushop.remote
460 remote = pushop.remote
458 ui.debug("checking for updated bookmarks\n")
461 ui.debug("checking for updated bookmarks\n")
459 revnums = map(repo.changelog.rev, pushop.revs or [])
462 revnums = map(repo.changelog.rev, pushop.revs or [])
460 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
463 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
461 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
464 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
462 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
465 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
463 srchex=hex)
466 srchex=hex)
464
467
465 for b, scid, dcid in advsrc:
468 for b, scid, dcid in advsrc:
466 if ancestors and repo[scid].rev() not in ancestors:
469 if ancestors and repo[scid].rev() not in ancestors:
467 continue
470 continue
468 if remote.pushkey('bookmarks', b, dcid, scid):
471 if remote.pushkey('bookmarks', b, dcid, scid):
469 ui.status(_("updating bookmark %s\n") % b)
472 ui.status(_("updating bookmark %s\n") % b)
470 else:
473 else:
471 ui.warn(_('updating bookmark %s failed!\n') % b)
474 ui.warn(_('updating bookmark %s failed!\n') % b)
472
475
473 class pulloperation(object):
476 class pulloperation(object):
474 """A object that represent a single pull operation
477 """A object that represent a single pull operation
475
478
476 It purpose is to carry push related state and very common operation.
479 It purpose is to carry push related state and very common operation.
477
480
478 A new should be created at the beginning of each pull and discarded
481 A new should be created at the beginning of each pull and discarded
479 afterward.
482 afterward.
480 """
483 """
481
484
482 def __init__(self, repo, remote, heads=None, force=False):
485 def __init__(self, repo, remote, heads=None, force=False):
483 # repo we pull into
486 # repo we pull into
484 self.repo = repo
487 self.repo = repo
485 # repo we pull from
488 # repo we pull from
486 self.remote = remote
489 self.remote = remote
487 # revision we try to pull (None is "all")
490 # revision we try to pull (None is "all")
488 self.heads = heads
491 self.heads = heads
489 # do we force pull?
492 # do we force pull?
490 self.force = force
493 self.force = force
491 # the name the pull transaction
494 # the name the pull transaction
492 self._trname = 'pull\n' + util.hidepassword(remote.url())
495 self._trname = 'pull\n' + util.hidepassword(remote.url())
493 # hold the transaction once created
496 # hold the transaction once created
494 self._tr = None
497 self._tr = None
495 # set of common changeset between local and remote before pull
498 # set of common changeset between local and remote before pull
496 self.common = None
499 self.common = None
497 # set of pulled head
500 # set of pulled head
498 self.rheads = None
501 self.rheads = None
499 # list of missing changeset to fetch remotely
502 # list of missing changeset to fetch remotely
500 self.fetch = None
503 self.fetch = None
501 # result of changegroup pulling (used as return code by pull)
504 # result of changegroup pulling (used as return code by pull)
502 self.cgresult = None
505 self.cgresult = None
503 # list of step remaining todo (related to future bundle2 usage)
506 # list of step remaining todo (related to future bundle2 usage)
504 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
507 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
505
508
506 @util.propertycache
509 @util.propertycache
507 def pulledsubset(self):
510 def pulledsubset(self):
508 """heads of the set of changeset target by the pull"""
511 """heads of the set of changeset target by the pull"""
509 # compute target subset
512 # compute target subset
510 if self.heads is None:
513 if self.heads is None:
511 # We pulled every thing possible
514 # We pulled every thing possible
512 # sync on everything common
515 # sync on everything common
513 c = set(self.common)
516 c = set(self.common)
514 ret = list(self.common)
517 ret = list(self.common)
515 for n in self.rheads:
518 for n in self.rheads:
516 if n not in c:
519 if n not in c:
517 ret.append(n)
520 ret.append(n)
518 return ret
521 return ret
519 else:
522 else:
520 # We pulled a specific subset
523 # We pulled a specific subset
521 # sync on this subset
524 # sync on this subset
522 return self.heads
525 return self.heads
523
526
524 def gettransaction(self):
527 def gettransaction(self):
525 """get appropriate pull transaction, creating it if needed"""
528 """get appropriate pull transaction, creating it if needed"""
526 if self._tr is None:
529 if self._tr is None:
527 self._tr = self.repo.transaction(self._trname)
530 self._tr = self.repo.transaction(self._trname)
528 return self._tr
531 return self._tr
529
532
530 def closetransaction(self):
533 def closetransaction(self):
531 """close transaction if created"""
534 """close transaction if created"""
532 if self._tr is not None:
535 if self._tr is not None:
533 self._tr.close()
536 self._tr.close()
534
537
535 def releasetransaction(self):
538 def releasetransaction(self):
536 """release transaction if created"""
539 """release transaction if created"""
537 if self._tr is not None:
540 if self._tr is not None:
538 self._tr.release()
541 self._tr.release()
539
542
540 def pull(repo, remote, heads=None, force=False):
543 def pull(repo, remote, heads=None, force=False):
541 pullop = pulloperation(repo, remote, heads, force)
544 pullop = pulloperation(repo, remote, heads, force)
542 if pullop.remote.local():
545 if pullop.remote.local():
543 missing = set(pullop.remote.requirements) - pullop.repo.supported
546 missing = set(pullop.remote.requirements) - pullop.repo.supported
544 if missing:
547 if missing:
545 msg = _("required features are not"
548 msg = _("required features are not"
546 " supported in the destination:"
549 " supported in the destination:"
547 " %s") % (', '.join(sorted(missing)))
550 " %s") % (', '.join(sorted(missing)))
548 raise util.Abort(msg)
551 raise util.Abort(msg)
549
552
550 lock = pullop.repo.lock()
553 lock = pullop.repo.lock()
551 try:
554 try:
552 _pulldiscovery(pullop)
555 _pulldiscovery(pullop)
553 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
556 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
554 and pullop.remote.capable('bundle2-exp')):
557 and pullop.remote.capable('bundle2-exp')):
555 _pullbundle2(pullop)
558 _pullbundle2(pullop)
556 if 'changegroup' in pullop.todosteps:
559 if 'changegroup' in pullop.todosteps:
557 _pullchangeset(pullop)
560 _pullchangeset(pullop)
558 if 'phases' in pullop.todosteps:
561 if 'phases' in pullop.todosteps:
559 _pullphase(pullop)
562 _pullphase(pullop)
560 if 'obsmarkers' in pullop.todosteps:
563 if 'obsmarkers' in pullop.todosteps:
561 _pullobsolete(pullop)
564 _pullobsolete(pullop)
562 pullop.closetransaction()
565 pullop.closetransaction()
563 finally:
566 finally:
564 pullop.releasetransaction()
567 pullop.releasetransaction()
565 lock.release()
568 lock.release()
566
569
567 return pullop.cgresult
570 return pullop.cgresult
568
571
569 def _pulldiscovery(pullop):
572 def _pulldiscovery(pullop):
570 """discovery phase for the pull
573 """discovery phase for the pull
571
574
572 Current handle changeset discovery only, will change handle all discovery
575 Current handle changeset discovery only, will change handle all discovery
573 at some point."""
576 at some point."""
574 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
577 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
575 pullop.remote,
578 pullop.remote,
576 heads=pullop.heads,
579 heads=pullop.heads,
577 force=pullop.force)
580 force=pullop.force)
578 pullop.common, pullop.fetch, pullop.rheads = tmp
581 pullop.common, pullop.fetch, pullop.rheads = tmp
579
582
580 def _pullbundle2(pullop):
583 def _pullbundle2(pullop):
581 """pull data using bundle2
584 """pull data using bundle2
582
585
583 For now, the only supported data are changegroup."""
586 For now, the only supported data are changegroup."""
584 remotecaps = bundle2.bundle2caps(pullop.remote)
587 remotecaps = bundle2.bundle2caps(pullop.remote)
585 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
588 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
586 # pulling changegroup
589 # pulling changegroup
587 pullop.todosteps.remove('changegroup')
590 pullop.todosteps.remove('changegroup')
588
591
589 kwargs['common'] = pullop.common
592 kwargs['common'] = pullop.common
590 kwargs['heads'] = pullop.heads or pullop.rheads
593 kwargs['heads'] = pullop.heads or pullop.rheads
591 if 'b2x:listkeys' in remotecaps:
594 if 'b2x:listkeys' in remotecaps:
592 kwargs['listkeys'] = ['phase']
595 kwargs['listkeys'] = ['phase']
593 if not pullop.fetch:
596 if not pullop.fetch:
594 pullop.repo.ui.status(_("no changes found\n"))
597 pullop.repo.ui.status(_("no changes found\n"))
595 pullop.cgresult = 0
598 pullop.cgresult = 0
596 else:
599 else:
597 if pullop.heads is None and list(pullop.common) == [nullid]:
600 if pullop.heads is None and list(pullop.common) == [nullid]:
598 pullop.repo.ui.status(_("requesting all changes\n"))
601 pullop.repo.ui.status(_("requesting all changes\n"))
599 _pullbundle2extraprepare(pullop, kwargs)
602 _pullbundle2extraprepare(pullop, kwargs)
600 if kwargs.keys() == ['format']:
603 if kwargs.keys() == ['format']:
601 return # nothing to pull
604 return # nothing to pull
602 bundle = pullop.remote.getbundle('pull', **kwargs)
605 bundle = pullop.remote.getbundle('pull', **kwargs)
603 try:
606 try:
604 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
607 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
605 except error.BundleValueError, exc:
608 except error.BundleValueError, exc:
606 raise util.Abort('missing support for %s' % exc)
609 raise util.Abort('missing support for %s' % exc)
607
610
608 if pullop.fetch:
611 if pullop.fetch:
609 assert len(op.records['changegroup']) == 1
612 assert len(op.records['changegroup']) == 1
610 pullop.cgresult = op.records['changegroup'][0]['return']
613 pullop.cgresult = op.records['changegroup'][0]['return']
611
614
612 # processing phases change
615 # processing phases change
613 for namespace, value in op.records['listkeys']:
616 for namespace, value in op.records['listkeys']:
614 if namespace == 'phases':
617 if namespace == 'phases':
615 _pullapplyphases(pullop, value)
618 _pullapplyphases(pullop, value)
616
619
617 def _pullbundle2extraprepare(pullop, kwargs):
620 def _pullbundle2extraprepare(pullop, kwargs):
618 """hook function so that extensions can extend the getbundle call"""
621 """hook function so that extensions can extend the getbundle call"""
619 pass
622 pass
620
623
621 def _pullchangeset(pullop):
624 def _pullchangeset(pullop):
622 """pull changeset from unbundle into the local repo"""
625 """pull changeset from unbundle into the local repo"""
623 # We delay the open of the transaction as late as possible so we
626 # We delay the open of the transaction as late as possible so we
624 # don't open transaction for nothing or you break future useful
627 # don't open transaction for nothing or you break future useful
625 # rollback call
628 # rollback call
626 pullop.todosteps.remove('changegroup')
629 pullop.todosteps.remove('changegroup')
627 if not pullop.fetch:
630 if not pullop.fetch:
628 pullop.repo.ui.status(_("no changes found\n"))
631 pullop.repo.ui.status(_("no changes found\n"))
629 pullop.cgresult = 0
632 pullop.cgresult = 0
630 return
633 return
631 pullop.gettransaction()
634 pullop.gettransaction()
632 if pullop.heads is None and list(pullop.common) == [nullid]:
635 if pullop.heads is None and list(pullop.common) == [nullid]:
633 pullop.repo.ui.status(_("requesting all changes\n"))
636 pullop.repo.ui.status(_("requesting all changes\n"))
634 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
637 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
635 # issue1320, avoid a race if remote changed after discovery
638 # issue1320, avoid a race if remote changed after discovery
636 pullop.heads = pullop.rheads
639 pullop.heads = pullop.rheads
637
640
638 if pullop.remote.capable('getbundle'):
641 if pullop.remote.capable('getbundle'):
639 # TODO: get bundlecaps from remote
642 # TODO: get bundlecaps from remote
640 cg = pullop.remote.getbundle('pull', common=pullop.common,
643 cg = pullop.remote.getbundle('pull', common=pullop.common,
641 heads=pullop.heads or pullop.rheads)
644 heads=pullop.heads or pullop.rheads)
642 elif pullop.heads is None:
645 elif pullop.heads is None:
643 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
646 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
644 elif not pullop.remote.capable('changegroupsubset'):
647 elif not pullop.remote.capable('changegroupsubset'):
645 raise util.Abort(_("partial pull cannot be done because "
648 raise util.Abort(_("partial pull cannot be done because "
646 "other repository doesn't support "
649 "other repository doesn't support "
647 "changegroupsubset."))
650 "changegroupsubset."))
648 else:
651 else:
649 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
652 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
650 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
653 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
651 pullop.remote.url())
654 pullop.remote.url())
652
655
653 def _pullphase(pullop):
656 def _pullphase(pullop):
654 # Get remote phases data from remote
657 # Get remote phases data from remote
655 remotephases = pullop.remote.listkeys('phases')
658 remotephases = pullop.remote.listkeys('phases')
656 _pullapplyphases(pullop, remotephases)
659 _pullapplyphases(pullop, remotephases)
657
660
658 def _pullapplyphases(pullop, remotephases):
661 def _pullapplyphases(pullop, remotephases):
659 """apply phase movement from observed remote state"""
662 """apply phase movement from observed remote state"""
660 pullop.todosteps.remove('phases')
663 pullop.todosteps.remove('phases')
661 publishing = bool(remotephases.get('publishing', False))
664 publishing = bool(remotephases.get('publishing', False))
662 if remotephases and not publishing:
665 if remotephases and not publishing:
663 # remote is new and unpublishing
666 # remote is new and unpublishing
664 pheads, _dr = phases.analyzeremotephases(pullop.repo,
667 pheads, _dr = phases.analyzeremotephases(pullop.repo,
665 pullop.pulledsubset,
668 pullop.pulledsubset,
666 remotephases)
669 remotephases)
667 phases.advanceboundary(pullop.repo, phases.public, pheads)
670 phases.advanceboundary(pullop.repo, phases.public, pheads)
668 phases.advanceboundary(pullop.repo, phases.draft,
671 phases.advanceboundary(pullop.repo, phases.draft,
669 pullop.pulledsubset)
672 pullop.pulledsubset)
670 else:
673 else:
671 # Remote is old or publishing all common changesets
674 # Remote is old or publishing all common changesets
672 # should be seen as public
675 # should be seen as public
673 phases.advanceboundary(pullop.repo, phases.public,
676 phases.advanceboundary(pullop.repo, phases.public,
674 pullop.pulledsubset)
677 pullop.pulledsubset)
675
678
676 def _pullobsolete(pullop):
679 def _pullobsolete(pullop):
677 """utility function to pull obsolete markers from a remote
680 """utility function to pull obsolete markers from a remote
678
681
679 The `gettransaction` is function that return the pull transaction, creating
682 The `gettransaction` is function that return the pull transaction, creating
680 one if necessary. We return the transaction to inform the calling code that
683 one if necessary. We return the transaction to inform the calling code that
681 a new transaction have been created (when applicable).
684 a new transaction have been created (when applicable).
682
685
683 Exists mostly to allow overriding for experimentation purpose"""
686 Exists mostly to allow overriding for experimentation purpose"""
684 pullop.todosteps.remove('obsmarkers')
687 pullop.todosteps.remove('obsmarkers')
685 tr = None
688 tr = None
686 if obsolete._enabled:
689 if obsolete._enabled:
687 pullop.repo.ui.debug('fetching remote obsolete markers\n')
690 pullop.repo.ui.debug('fetching remote obsolete markers\n')
688 remoteobs = pullop.remote.listkeys('obsolete')
691 remoteobs = pullop.remote.listkeys('obsolete')
689 if 'dump0' in remoteobs:
692 if 'dump0' in remoteobs:
690 tr = pullop.gettransaction()
693 tr = pullop.gettransaction()
691 for key in sorted(remoteobs, reverse=True):
694 for key in sorted(remoteobs, reverse=True):
692 if key.startswith('dump'):
695 if key.startswith('dump'):
693 data = base85.b85decode(remoteobs[key])
696 data = base85.b85decode(remoteobs[key])
694 pullop.repo.obsstore.mergemarkers(tr, data)
697 pullop.repo.obsstore.mergemarkers(tr, data)
695 pullop.repo.invalidatevolatilesets()
698 pullop.repo.invalidatevolatilesets()
696 return tr
699 return tr
697
700
698 def caps20to10(repo):
701 def caps20to10(repo):
699 """return a set with appropriate options to use bundle20 during getbundle"""
702 """return a set with appropriate options to use bundle20 during getbundle"""
700 caps = set(['HG2X'])
703 caps = set(['HG2X'])
701 capsblob = bundle2.encodecaps(repo.bundle2caps)
704 capsblob = bundle2.encodecaps(repo.bundle2caps)
702 caps.add('bundle2=' + urllib.quote(capsblob))
705 caps.add('bundle2=' + urllib.quote(capsblob))
703 return caps
706 return caps
704
707
705 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
708 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
706 **kwargs):
709 **kwargs):
707 """return a full bundle (with potentially multiple kind of parts)
710 """return a full bundle (with potentially multiple kind of parts)
708
711
709 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
712 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
710 passed. For now, the bundle can contain only changegroup, but this will
713 passed. For now, the bundle can contain only changegroup, but this will
711 changes when more part type will be available for bundle2.
714 changes when more part type will be available for bundle2.
712
715
713 This is different from changegroup.getbundle that only returns an HG10
716 This is different from changegroup.getbundle that only returns an HG10
714 changegroup bundle. They may eventually get reunited in the future when we
717 changegroup bundle. They may eventually get reunited in the future when we
715 have a clearer idea of the API we what to query different data.
718 have a clearer idea of the API we what to query different data.
716
719
717 The implementation is at a very early stage and will get massive rework
720 The implementation is at a very early stage and will get massive rework
718 when the API of bundle is refined.
721 when the API of bundle is refined.
719 """
722 """
720 # build changegroup bundle here.
723 # build changegroup bundle here.
721 cg = changegroup.getbundle(repo, source, heads=heads,
724 cg = changegroup.getbundle(repo, source, heads=heads,
722 common=common, bundlecaps=bundlecaps)
725 common=common, bundlecaps=bundlecaps)
723 if bundlecaps is None or 'HG2X' not in bundlecaps:
726 if bundlecaps is None or 'HG2X' not in bundlecaps:
724 if kwargs:
727 if kwargs:
725 raise ValueError(_('unsupported getbundle arguments: %s')
728 raise ValueError(_('unsupported getbundle arguments: %s')
726 % ', '.join(sorted(kwargs.keys())))
729 % ', '.join(sorted(kwargs.keys())))
727 return cg
730 return cg
728 # very crude first implementation,
731 # very crude first implementation,
729 # the bundle API will change and the generation will be done lazily.
732 # the bundle API will change and the generation will be done lazily.
730 b2caps = {}
733 b2caps = {}
731 for bcaps in bundlecaps:
734 for bcaps in bundlecaps:
732 if bcaps.startswith('bundle2='):
735 if bcaps.startswith('bundle2='):
733 blob = urllib.unquote(bcaps[len('bundle2='):])
736 blob = urllib.unquote(bcaps[len('bundle2='):])
734 b2caps.update(bundle2.decodecaps(blob))
737 b2caps.update(bundle2.decodecaps(blob))
735 bundler = bundle2.bundle20(repo.ui, b2caps)
738 bundler = bundle2.bundle20(repo.ui, b2caps)
736 if cg:
739 if cg:
737 bundler.newpart('b2x:changegroup', data=cg.getchunks())
740 bundler.newpart('b2x:changegroup', data=cg.getchunks())
738 listkeys = kwargs.get('listkeys', ())
741 listkeys = kwargs.get('listkeys', ())
739 for namespace in listkeys:
742 for namespace in listkeys:
740 part = bundler.newpart('b2x:listkeys')
743 part = bundler.newpart('b2x:listkeys')
741 part.addparam('namespace', namespace)
744 part.addparam('namespace', namespace)
742 keys = repo.listkeys(namespace).items()
745 keys = repo.listkeys(namespace).items()
743 part.data = pushkey.encodekeys(keys)
746 part.data = pushkey.encodekeys(keys)
744 _getbundleextrapart(bundler, repo, source, heads=heads, common=common,
747 _getbundleextrapart(bundler, repo, source, heads=heads, common=common,
745 bundlecaps=bundlecaps, **kwargs)
748 bundlecaps=bundlecaps, **kwargs)
746 return util.chunkbuffer(bundler.getchunks())
749 return util.chunkbuffer(bundler.getchunks())
747
750
748 def _getbundleextrapart(bundler, repo, source, heads=None, common=None,
751 def _getbundleextrapart(bundler, repo, source, heads=None, common=None,
749 bundlecaps=None, **kwargs):
752 bundlecaps=None, **kwargs):
750 """hook function to let extensions add parts to the requested bundle"""
753 """hook function to let extensions add parts to the requested bundle"""
751 pass
754 pass
752
755
753 def check_heads(repo, their_heads, context):
756 def check_heads(repo, their_heads, context):
754 """check if the heads of a repo have been modified
757 """check if the heads of a repo have been modified
755
758
756 Used by peer for unbundling.
759 Used by peer for unbundling.
757 """
760 """
758 heads = repo.heads()
761 heads = repo.heads()
759 heads_hash = util.sha1(''.join(sorted(heads))).digest()
762 heads_hash = util.sha1(''.join(sorted(heads))).digest()
760 if not (their_heads == ['force'] or their_heads == heads or
763 if not (their_heads == ['force'] or their_heads == heads or
761 their_heads == ['hashed', heads_hash]):
764 their_heads == ['hashed', heads_hash]):
762 # someone else committed/pushed/unbundled while we
765 # someone else committed/pushed/unbundled while we
763 # were transferring data
766 # were transferring data
764 raise error.PushRaced('repository changed while %s - '
767 raise error.PushRaced('repository changed while %s - '
765 'please try again' % context)
768 'please try again' % context)
766
769
767 def unbundle(repo, cg, heads, source, url):
770 def unbundle(repo, cg, heads, source, url):
768 """Apply a bundle to a repo.
771 """Apply a bundle to a repo.
769
772
770 this function makes sure the repo is locked during the application and have
773 this function makes sure the repo is locked during the application and have
771 mechanism to check that no push race occurred between the creation of the
774 mechanism to check that no push race occurred between the creation of the
772 bundle and its application.
775 bundle and its application.
773
776
774 If the push was raced as PushRaced exception is raised."""
777 If the push was raced as PushRaced exception is raised."""
775 r = 0
778 r = 0
776 # need a transaction when processing a bundle2 stream
779 # need a transaction when processing a bundle2 stream
777 tr = None
780 tr = None
778 lock = repo.lock()
781 lock = repo.lock()
779 try:
782 try:
780 check_heads(repo, heads, 'uploading changes')
783 check_heads(repo, heads, 'uploading changes')
781 # push can proceed
784 # push can proceed
782 if util.safehasattr(cg, 'params'):
785 if util.safehasattr(cg, 'params'):
783 try:
786 try:
784 tr = repo.transaction('unbundle')
787 tr = repo.transaction('unbundle')
785 tr.hookargs['bundle2-exp'] = '1'
788 tr.hookargs['bundle2-exp'] = '1'
786 r = bundle2.processbundle(repo, cg, lambda: tr).reply
789 r = bundle2.processbundle(repo, cg, lambda: tr).reply
787 cl = repo.unfiltered().changelog
790 cl = repo.unfiltered().changelog
788 p = cl.writepending() and repo.root or ""
791 p = cl.writepending() and repo.root or ""
789 repo.hook('b2x-pretransactionclose', throw=True, source=source,
792 repo.hook('b2x-pretransactionclose', throw=True, source=source,
790 url=url, pending=p, **tr.hookargs)
793 url=url, pending=p, **tr.hookargs)
791 tr.close()
794 tr.close()
792 repo.hook('b2x-transactionclose', source=source, url=url,
795 repo.hook('b2x-transactionclose', source=source, url=url,
793 **tr.hookargs)
796 **tr.hookargs)
794 except Exception, exc:
797 except Exception, exc:
795 exc.duringunbundle2 = True
798 exc.duringunbundle2 = True
796 raise
799 raise
797 else:
800 else:
798 r = changegroup.addchangegroup(repo, cg, source, url)
801 r = changegroup.addchangegroup(repo, cg, source, url)
799 finally:
802 finally:
800 if tr is not None:
803 if tr is not None:
801 tr.release()
804 tr.release()
802 lock.release()
805 lock.release()
803 return r
806 return r
General Comments 0
You need to be logged in to leave comments. Login now