##// END OF EJS Templates
push: restore contents of HG_URL for hooks (issue4268)
Matt Mackall -
r21761:b2dc026a stable
parent child Browse files
Show More
@@ -1,765 +1,765
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno, urllib
10 import errno, urllib
11 import util, scmutil, changegroup, base85, error
11 import util, scmutil, changegroup, base85, error
12 import discovery, phases, obsolete, bookmarks, bundle2
12 import discovery, phases, obsolete, bookmarks, bundle2
13
13
14 def readbundle(ui, fh, fname, vfs=None):
14 def readbundle(ui, fh, fname, vfs=None):
15 header = changegroup.readexactly(fh, 4)
15 header = changegroup.readexactly(fh, 4)
16
16
17 alg = None
17 alg = None
18 if not fname:
18 if not fname:
19 fname = "stream"
19 fname = "stream"
20 if not header.startswith('HG') and header.startswith('\0'):
20 if not header.startswith('HG') and header.startswith('\0'):
21 fh = changegroup.headerlessfixup(fh, header)
21 fh = changegroup.headerlessfixup(fh, header)
22 header = "HG10"
22 header = "HG10"
23 alg = 'UN'
23 alg = 'UN'
24 elif vfs:
24 elif vfs:
25 fname = vfs.join(fname)
25 fname = vfs.join(fname)
26
26
27 magic, version = header[0:2], header[2:4]
27 magic, version = header[0:2], header[2:4]
28
28
29 if magic != 'HG':
29 if magic != 'HG':
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 if version == '10':
31 if version == '10':
32 if alg is None:
32 if alg is None:
33 alg = changegroup.readexactly(fh, 2)
33 alg = changegroup.readexactly(fh, 2)
34 return changegroup.unbundle10(fh, alg)
34 return changegroup.unbundle10(fh, alg)
35 elif version == '2X':
35 elif version == '2X':
36 return bundle2.unbundle20(ui, fh, header=magic + version)
36 return bundle2.unbundle20(ui, fh, header=magic + version)
37 else:
37 else:
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39
39
40
40
41 class pushoperation(object):
41 class pushoperation(object):
42 """A object that represent a single push operation
42 """A object that represent a single push operation
43
43
44 It purpose is to carry push related state and very common operation.
44 It purpose is to carry push related state and very common operation.
45
45
46 A new should be created at the beginning of each push and discarded
46 A new should be created at the beginning of each push and discarded
47 afterward.
47 afterward.
48 """
48 """
49
49
50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
51 # repo we push from
51 # repo we push from
52 self.repo = repo
52 self.repo = repo
53 self.ui = repo.ui
53 self.ui = repo.ui
54 # repo we push to
54 # repo we push to
55 self.remote = remote
55 self.remote = remote
56 # force option provided
56 # force option provided
57 self.force = force
57 self.force = force
58 # revs to be pushed (None is "all")
58 # revs to be pushed (None is "all")
59 self.revs = revs
59 self.revs = revs
60 # allow push of new branch
60 # allow push of new branch
61 self.newbranch = newbranch
61 self.newbranch = newbranch
62 # did a local lock get acquired?
62 # did a local lock get acquired?
63 self.locallocked = None
63 self.locallocked = None
64 # Integer version of the push result
64 # Integer version of the push result
65 # - None means nothing to push
65 # - None means nothing to push
66 # - 0 means HTTP error
66 # - 0 means HTTP error
67 # - 1 means we pushed and remote head count is unchanged *or*
67 # - 1 means we pushed and remote head count is unchanged *or*
68 # we have outgoing changesets but refused to push
68 # we have outgoing changesets but refused to push
69 # - other values as described by addchangegroup()
69 # - other values as described by addchangegroup()
70 self.ret = None
70 self.ret = None
71 # discover.outgoing object (contains common and outgoing data)
71 # discover.outgoing object (contains common and outgoing data)
72 self.outgoing = None
72 self.outgoing = None
73 # all remote heads before the push
73 # all remote heads before the push
74 self.remoteheads = None
74 self.remoteheads = None
75 # testable as a boolean indicating if any nodes are missing locally.
75 # testable as a boolean indicating if any nodes are missing locally.
76 self.incoming = None
76 self.incoming = None
77 # set of all heads common after changeset bundle push
77 # set of all heads common after changeset bundle push
78 self.commonheads = None
78 self.commonheads = None
79
79
80 def push(repo, remote, force=False, revs=None, newbranch=False):
80 def push(repo, remote, force=False, revs=None, newbranch=False):
81 '''Push outgoing changesets (limited by revs) from a local
81 '''Push outgoing changesets (limited by revs) from a local
82 repository to remote. Return an integer:
82 repository to remote. Return an integer:
83 - None means nothing to push
83 - None means nothing to push
84 - 0 means HTTP error
84 - 0 means HTTP error
85 - 1 means we pushed and remote head count is unchanged *or*
85 - 1 means we pushed and remote head count is unchanged *or*
86 we have outgoing changesets but refused to push
86 we have outgoing changesets but refused to push
87 - other values as described by addchangegroup()
87 - other values as described by addchangegroup()
88 '''
88 '''
89 pushop = pushoperation(repo, remote, force, revs, newbranch)
89 pushop = pushoperation(repo, remote, force, revs, newbranch)
90 if pushop.remote.local():
90 if pushop.remote.local():
91 missing = (set(pushop.repo.requirements)
91 missing = (set(pushop.repo.requirements)
92 - pushop.remote.local().supported)
92 - pushop.remote.local().supported)
93 if missing:
93 if missing:
94 msg = _("required features are not"
94 msg = _("required features are not"
95 " supported in the destination:"
95 " supported in the destination:"
96 " %s") % (', '.join(sorted(missing)))
96 " %s") % (', '.join(sorted(missing)))
97 raise util.Abort(msg)
97 raise util.Abort(msg)
98
98
99 # there are two ways to push to remote repo:
99 # there are two ways to push to remote repo:
100 #
100 #
101 # addchangegroup assumes local user can lock remote
101 # addchangegroup assumes local user can lock remote
102 # repo (local filesystem, old ssh servers).
102 # repo (local filesystem, old ssh servers).
103 #
103 #
104 # unbundle assumes local user cannot lock remote repo (new ssh
104 # unbundle assumes local user cannot lock remote repo (new ssh
105 # servers, http servers).
105 # servers, http servers).
106
106
107 if not pushop.remote.canpush():
107 if not pushop.remote.canpush():
108 raise util.Abort(_("destination does not support push"))
108 raise util.Abort(_("destination does not support push"))
109 # get local lock as we might write phase data
109 # get local lock as we might write phase data
110 locallock = None
110 locallock = None
111 try:
111 try:
112 locallock = pushop.repo.lock()
112 locallock = pushop.repo.lock()
113 pushop.locallocked = True
113 pushop.locallocked = True
114 except IOError, err:
114 except IOError, err:
115 pushop.locallocked = False
115 pushop.locallocked = False
116 if err.errno != errno.EACCES:
116 if err.errno != errno.EACCES:
117 raise
117 raise
118 # source repo cannot be locked.
118 # source repo cannot be locked.
119 # We do not abort the push, but just disable the local phase
119 # We do not abort the push, but just disable the local phase
120 # synchronisation.
120 # synchronisation.
121 msg = 'cannot lock source repository: %s\n' % err
121 msg = 'cannot lock source repository: %s\n' % err
122 pushop.ui.debug(msg)
122 pushop.ui.debug(msg)
123 try:
123 try:
124 pushop.repo.checkpush(pushop)
124 pushop.repo.checkpush(pushop)
125 lock = None
125 lock = None
126 unbundle = pushop.remote.capable('unbundle')
126 unbundle = pushop.remote.capable('unbundle')
127 if not unbundle:
127 if not unbundle:
128 lock = pushop.remote.lock()
128 lock = pushop.remote.lock()
129 try:
129 try:
130 _pushdiscovery(pushop)
130 _pushdiscovery(pushop)
131 if _pushcheckoutgoing(pushop):
131 if _pushcheckoutgoing(pushop):
132 pushop.repo.prepushoutgoinghooks(pushop.repo,
132 pushop.repo.prepushoutgoinghooks(pushop.repo,
133 pushop.remote,
133 pushop.remote,
134 pushop.outgoing)
134 pushop.outgoing)
135 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
135 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
136 False)
136 False)
137 and pushop.remote.capable('bundle2-exp')):
137 and pushop.remote.capable('bundle2-exp')):
138 _pushbundle2(pushop)
138 _pushbundle2(pushop)
139 else:
139 else:
140 _pushchangeset(pushop)
140 _pushchangeset(pushop)
141 _pushcomputecommonheads(pushop)
141 _pushcomputecommonheads(pushop)
142 _pushsyncphase(pushop)
142 _pushsyncphase(pushop)
143 _pushobsolete(pushop)
143 _pushobsolete(pushop)
144 finally:
144 finally:
145 if lock is not None:
145 if lock is not None:
146 lock.release()
146 lock.release()
147 finally:
147 finally:
148 if locallock is not None:
148 if locallock is not None:
149 locallock.release()
149 locallock.release()
150
150
151 _pushbookmark(pushop)
151 _pushbookmark(pushop)
152 return pushop.ret
152 return pushop.ret
153
153
154 def _pushdiscovery(pushop):
154 def _pushdiscovery(pushop):
155 # discovery
155 # discovery
156 unfi = pushop.repo.unfiltered()
156 unfi = pushop.repo.unfiltered()
157 fci = discovery.findcommonincoming
157 fci = discovery.findcommonincoming
158 commoninc = fci(unfi, pushop.remote, force=pushop.force)
158 commoninc = fci(unfi, pushop.remote, force=pushop.force)
159 common, inc, remoteheads = commoninc
159 common, inc, remoteheads = commoninc
160 fco = discovery.findcommonoutgoing
160 fco = discovery.findcommonoutgoing
161 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
161 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
162 commoninc=commoninc, force=pushop.force)
162 commoninc=commoninc, force=pushop.force)
163 pushop.outgoing = outgoing
163 pushop.outgoing = outgoing
164 pushop.remoteheads = remoteheads
164 pushop.remoteheads = remoteheads
165 pushop.incoming = inc
165 pushop.incoming = inc
166
166
167 def _pushcheckoutgoing(pushop):
167 def _pushcheckoutgoing(pushop):
168 outgoing = pushop.outgoing
168 outgoing = pushop.outgoing
169 unfi = pushop.repo.unfiltered()
169 unfi = pushop.repo.unfiltered()
170 if not outgoing.missing:
170 if not outgoing.missing:
171 # nothing to push
171 # nothing to push
172 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
172 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
173 return False
173 return False
174 # something to push
174 # something to push
175 if not pushop.force:
175 if not pushop.force:
176 # if repo.obsstore == False --> no obsolete
176 # if repo.obsstore == False --> no obsolete
177 # then, save the iteration
177 # then, save the iteration
178 if unfi.obsstore:
178 if unfi.obsstore:
179 # this message are here for 80 char limit reason
179 # this message are here for 80 char limit reason
180 mso = _("push includes obsolete changeset: %s!")
180 mso = _("push includes obsolete changeset: %s!")
181 mst = "push includes %s changeset: %s!"
181 mst = "push includes %s changeset: %s!"
182 # plain versions for i18n tool to detect them
182 # plain versions for i18n tool to detect them
183 _("push includes unstable changeset: %s!")
183 _("push includes unstable changeset: %s!")
184 _("push includes bumped changeset: %s!")
184 _("push includes bumped changeset: %s!")
185 _("push includes divergent changeset: %s!")
185 _("push includes divergent changeset: %s!")
186 # If we are to push if there is at least one
186 # If we are to push if there is at least one
187 # obsolete or unstable changeset in missing, at
187 # obsolete or unstable changeset in missing, at
188 # least one of the missinghead will be obsolete or
188 # least one of the missinghead will be obsolete or
189 # unstable. So checking heads only is ok
189 # unstable. So checking heads only is ok
190 for node in outgoing.missingheads:
190 for node in outgoing.missingheads:
191 ctx = unfi[node]
191 ctx = unfi[node]
192 if ctx.obsolete():
192 if ctx.obsolete():
193 raise util.Abort(mso % ctx)
193 raise util.Abort(mso % ctx)
194 elif ctx.troubled():
194 elif ctx.troubled():
195 raise util.Abort(_(mst)
195 raise util.Abort(_(mst)
196 % (ctx.troubles()[0],
196 % (ctx.troubles()[0],
197 ctx))
197 ctx))
198 newbm = pushop.ui.configlist('bookmarks', 'pushing')
198 newbm = pushop.ui.configlist('bookmarks', 'pushing')
199 discovery.checkheads(unfi, pushop.remote, outgoing,
199 discovery.checkheads(unfi, pushop.remote, outgoing,
200 pushop.remoteheads,
200 pushop.remoteheads,
201 pushop.newbranch,
201 pushop.newbranch,
202 bool(pushop.incoming),
202 bool(pushop.incoming),
203 newbm)
203 newbm)
204 return True
204 return True
205
205
206 def _pushbundle2(pushop):
206 def _pushbundle2(pushop):
207 """push data to the remote using bundle2
207 """push data to the remote using bundle2
208
208
209 The only currently supported type of data is changegroup but this will
209 The only currently supported type of data is changegroup but this will
210 evolve in the future."""
210 evolve in the future."""
211 # Send known head to the server for race detection.
211 # Send known head to the server for race detection.
212 capsblob = urllib.unquote(pushop.remote.capable('bundle2-exp'))
212 capsblob = urllib.unquote(pushop.remote.capable('bundle2-exp'))
213 caps = bundle2.decodecaps(capsblob)
213 caps = bundle2.decodecaps(capsblob)
214 bundler = bundle2.bundle20(pushop.ui, caps)
214 bundler = bundle2.bundle20(pushop.ui, caps)
215 # create reply capability
215 # create reply capability
216 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
216 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
217 bundler.addpart(bundle2.bundlepart('b2x:replycaps', data=capsblob))
217 bundler.addpart(bundle2.bundlepart('b2x:replycaps', data=capsblob))
218 if not pushop.force:
218 if not pushop.force:
219 part = bundle2.bundlepart('B2X:CHECK:HEADS',
219 part = bundle2.bundlepart('B2X:CHECK:HEADS',
220 data=iter(pushop.remoteheads))
220 data=iter(pushop.remoteheads))
221 bundler.addpart(part)
221 bundler.addpart(part)
222 extrainfo = _pushbundle2extraparts(pushop, bundler)
222 extrainfo = _pushbundle2extraparts(pushop, bundler)
223 # add the changegroup bundle
223 # add the changegroup bundle
224 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
224 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
225 cgpart = bundle2.bundlepart('B2X:CHANGEGROUP', data=cg.getchunks())
225 cgpart = bundle2.bundlepart('B2X:CHANGEGROUP', data=cg.getchunks())
226 bundler.addpart(cgpart)
226 bundler.addpart(cgpart)
227 stream = util.chunkbuffer(bundler.getchunks())
227 stream = util.chunkbuffer(bundler.getchunks())
228 try:
228 try:
229 reply = pushop.remote.unbundle(stream, ['force'], 'push')
229 reply = pushop.remote.unbundle(stream, ['force'], 'push')
230 except bundle2.UnknownPartError, exc:
230 except bundle2.UnknownPartError, exc:
231 raise util.Abort('missing support for %s' % exc)
231 raise util.Abort('missing support for %s' % exc)
232 try:
232 try:
233 op = bundle2.processbundle(pushop.repo, reply)
233 op = bundle2.processbundle(pushop.repo, reply)
234 except bundle2.UnknownPartError, exc:
234 except bundle2.UnknownPartError, exc:
235 raise util.Abort('missing support for %s' % exc)
235 raise util.Abort('missing support for %s' % exc)
236 cgreplies = op.records.getreplies(cgpart.id)
236 cgreplies = op.records.getreplies(cgpart.id)
237 assert len(cgreplies['changegroup']) == 1
237 assert len(cgreplies['changegroup']) == 1
238 pushop.ret = cgreplies['changegroup'][0]['return']
238 pushop.ret = cgreplies['changegroup'][0]['return']
239 _pushbundle2extrareply(pushop, op, extrainfo)
239 _pushbundle2extrareply(pushop, op, extrainfo)
240
240
241 def _pushbundle2extraparts(pushop, bundler):
241 def _pushbundle2extraparts(pushop, bundler):
242 """hook function to let extensions add parts
242 """hook function to let extensions add parts
243
243
244 Return a dict to let extensions pass data to the reply processing.
244 Return a dict to let extensions pass data to the reply processing.
245 """
245 """
246 return {}
246 return {}
247
247
248 def _pushbundle2extrareply(pushop, op, extrainfo):
248 def _pushbundle2extrareply(pushop, op, extrainfo):
249 """hook function to let extensions react to part replies
249 """hook function to let extensions react to part replies
250
250
251 The dict from _pushbundle2extrareply is fed to this function.
251 The dict from _pushbundle2extrareply is fed to this function.
252 """
252 """
253 pass
253 pass
254
254
255 def _pushchangeset(pushop):
255 def _pushchangeset(pushop):
256 """Make the actual push of changeset bundle to remote repo"""
256 """Make the actual push of changeset bundle to remote repo"""
257 outgoing = pushop.outgoing
257 outgoing = pushop.outgoing
258 unbundle = pushop.remote.capable('unbundle')
258 unbundle = pushop.remote.capable('unbundle')
259 # TODO: get bundlecaps from remote
259 # TODO: get bundlecaps from remote
260 bundlecaps = None
260 bundlecaps = None
261 # create a changegroup from local
261 # create a changegroup from local
262 if pushop.revs is None and not (outgoing.excluded
262 if pushop.revs is None and not (outgoing.excluded
263 or pushop.repo.changelog.filteredrevs):
263 or pushop.repo.changelog.filteredrevs):
264 # push everything,
264 # push everything,
265 # use the fast path, no race possible on push
265 # use the fast path, no race possible on push
266 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
266 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
267 cg = changegroup.getsubset(pushop.repo,
267 cg = changegroup.getsubset(pushop.repo,
268 outgoing,
268 outgoing,
269 bundler,
269 bundler,
270 'push',
270 'push',
271 fastpath=True)
271 fastpath=True)
272 else:
272 else:
273 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
273 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
274 bundlecaps)
274 bundlecaps)
275
275
276 # apply changegroup to remote
276 # apply changegroup to remote
277 if unbundle:
277 if unbundle:
278 # local repo finds heads on server, finds out what
278 # local repo finds heads on server, finds out what
279 # revs it must push. once revs transferred, if server
279 # revs it must push. once revs transferred, if server
280 # finds it has different heads (someone else won
280 # finds it has different heads (someone else won
281 # commit/push race), server aborts.
281 # commit/push race), server aborts.
282 if pushop.force:
282 if pushop.force:
283 remoteheads = ['force']
283 remoteheads = ['force']
284 else:
284 else:
285 remoteheads = pushop.remoteheads
285 remoteheads = pushop.remoteheads
286 # ssh: return remote's addchangegroup()
286 # ssh: return remote's addchangegroup()
287 # http: return remote's addchangegroup() or 0 for error
287 # http: return remote's addchangegroup() or 0 for error
288 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
288 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
289 'push')
289 pushop.repo.url())
290 else:
290 else:
291 # we return an integer indicating remote head count
291 # we return an integer indicating remote head count
292 # change
292 # change
293 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
293 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
294
294
295 def _pushcomputecommonheads(pushop):
295 def _pushcomputecommonheads(pushop):
296 unfi = pushop.repo.unfiltered()
296 unfi = pushop.repo.unfiltered()
297 if pushop.ret:
297 if pushop.ret:
298 # push succeed, synchronize target of the push
298 # push succeed, synchronize target of the push
299 cheads = pushop.outgoing.missingheads
299 cheads = pushop.outgoing.missingheads
300 elif pushop.revs is None:
300 elif pushop.revs is None:
301 # All out push fails. synchronize all common
301 # All out push fails. synchronize all common
302 cheads = pushop.outgoing.commonheads
302 cheads = pushop.outgoing.commonheads
303 else:
303 else:
304 # I want cheads = heads(::missingheads and ::commonheads)
304 # I want cheads = heads(::missingheads and ::commonheads)
305 # (missingheads is revs with secret changeset filtered out)
305 # (missingheads is revs with secret changeset filtered out)
306 #
306 #
307 # This can be expressed as:
307 # This can be expressed as:
308 # cheads = ( (missingheads and ::commonheads)
308 # cheads = ( (missingheads and ::commonheads)
309 # + (commonheads and ::missingheads))"
309 # + (commonheads and ::missingheads))"
310 # )
310 # )
311 #
311 #
312 # while trying to push we already computed the following:
312 # while trying to push we already computed the following:
313 # common = (::commonheads)
313 # common = (::commonheads)
314 # missing = ((commonheads::missingheads) - commonheads)
314 # missing = ((commonheads::missingheads) - commonheads)
315 #
315 #
316 # We can pick:
316 # We can pick:
317 # * missingheads part of common (::commonheads)
317 # * missingheads part of common (::commonheads)
318 common = set(pushop.outgoing.common)
318 common = set(pushop.outgoing.common)
319 nm = pushop.repo.changelog.nodemap
319 nm = pushop.repo.changelog.nodemap
320 cheads = [node for node in pushop.revs if nm[node] in common]
320 cheads = [node for node in pushop.revs if nm[node] in common]
321 # and
321 # and
322 # * commonheads parents on missing
322 # * commonheads parents on missing
323 revset = unfi.set('%ln and parents(roots(%ln))',
323 revset = unfi.set('%ln and parents(roots(%ln))',
324 pushop.outgoing.commonheads,
324 pushop.outgoing.commonheads,
325 pushop.outgoing.missing)
325 pushop.outgoing.missing)
326 cheads.extend(c.node() for c in revset)
326 cheads.extend(c.node() for c in revset)
327 pushop.commonheads = cheads
327 pushop.commonheads = cheads
328
328
329 def _pushsyncphase(pushop):
329 def _pushsyncphase(pushop):
330 """synchronise phase information locally and remotely"""
330 """synchronise phase information locally and remotely"""
331 unfi = pushop.repo.unfiltered()
331 unfi = pushop.repo.unfiltered()
332 cheads = pushop.commonheads
332 cheads = pushop.commonheads
333 if pushop.ret:
333 if pushop.ret:
334 # push succeed, synchronize target of the push
334 # push succeed, synchronize target of the push
335 cheads = pushop.outgoing.missingheads
335 cheads = pushop.outgoing.missingheads
336 elif pushop.revs is None:
336 elif pushop.revs is None:
337 # All out push fails. synchronize all common
337 # All out push fails. synchronize all common
338 cheads = pushop.outgoing.commonheads
338 cheads = pushop.outgoing.commonheads
339 else:
339 else:
340 # I want cheads = heads(::missingheads and ::commonheads)
340 # I want cheads = heads(::missingheads and ::commonheads)
341 # (missingheads is revs with secret changeset filtered out)
341 # (missingheads is revs with secret changeset filtered out)
342 #
342 #
343 # This can be expressed as:
343 # This can be expressed as:
344 # cheads = ( (missingheads and ::commonheads)
344 # cheads = ( (missingheads and ::commonheads)
345 # + (commonheads and ::missingheads))"
345 # + (commonheads and ::missingheads))"
346 # )
346 # )
347 #
347 #
348 # while trying to push we already computed the following:
348 # while trying to push we already computed the following:
349 # common = (::commonheads)
349 # common = (::commonheads)
350 # missing = ((commonheads::missingheads) - commonheads)
350 # missing = ((commonheads::missingheads) - commonheads)
351 #
351 #
352 # We can pick:
352 # We can pick:
353 # * missingheads part of common (::commonheads)
353 # * missingheads part of common (::commonheads)
354 common = set(pushop.outgoing.common)
354 common = set(pushop.outgoing.common)
355 nm = pushop.repo.changelog.nodemap
355 nm = pushop.repo.changelog.nodemap
356 cheads = [node for node in pushop.revs if nm[node] in common]
356 cheads = [node for node in pushop.revs if nm[node] in common]
357 # and
357 # and
358 # * commonheads parents on missing
358 # * commonheads parents on missing
359 revset = unfi.set('%ln and parents(roots(%ln))',
359 revset = unfi.set('%ln and parents(roots(%ln))',
360 pushop.outgoing.commonheads,
360 pushop.outgoing.commonheads,
361 pushop.outgoing.missing)
361 pushop.outgoing.missing)
362 cheads.extend(c.node() for c in revset)
362 cheads.extend(c.node() for c in revset)
363 pushop.commonheads = cheads
363 pushop.commonheads = cheads
364 # even when we don't push, exchanging phase data is useful
364 # even when we don't push, exchanging phase data is useful
365 remotephases = pushop.remote.listkeys('phases')
365 remotephases = pushop.remote.listkeys('phases')
366 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
366 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
367 and remotephases # server supports phases
367 and remotephases # server supports phases
368 and pushop.ret is None # nothing was pushed
368 and pushop.ret is None # nothing was pushed
369 and remotephases.get('publishing', False)):
369 and remotephases.get('publishing', False)):
370 # When:
370 # When:
371 # - this is a subrepo push
371 # - this is a subrepo push
372 # - and remote support phase
372 # - and remote support phase
373 # - and no changeset was pushed
373 # - and no changeset was pushed
374 # - and remote is publishing
374 # - and remote is publishing
375 # We may be in issue 3871 case!
375 # We may be in issue 3871 case!
376 # We drop the possible phase synchronisation done by
376 # We drop the possible phase synchronisation done by
377 # courtesy to publish changesets possibly locally draft
377 # courtesy to publish changesets possibly locally draft
378 # on the remote.
378 # on the remote.
379 remotephases = {'publishing': 'True'}
379 remotephases = {'publishing': 'True'}
380 if not remotephases: # old server or public only reply from non-publishing
380 if not remotephases: # old server or public only reply from non-publishing
381 _localphasemove(pushop, cheads)
381 _localphasemove(pushop, cheads)
382 # don't push any phase data as there is nothing to push
382 # don't push any phase data as there is nothing to push
383 else:
383 else:
384 ana = phases.analyzeremotephases(pushop.repo, cheads,
384 ana = phases.analyzeremotephases(pushop.repo, cheads,
385 remotephases)
385 remotephases)
386 pheads, droots = ana
386 pheads, droots = ana
387 ### Apply remote phase on local
387 ### Apply remote phase on local
388 if remotephases.get('publishing', False):
388 if remotephases.get('publishing', False):
389 _localphasemove(pushop, cheads)
389 _localphasemove(pushop, cheads)
390 else: # publish = False
390 else: # publish = False
391 _localphasemove(pushop, pheads)
391 _localphasemove(pushop, pheads)
392 _localphasemove(pushop, cheads, phases.draft)
392 _localphasemove(pushop, cheads, phases.draft)
393 ### Apply local phase on remote
393 ### Apply local phase on remote
394
394
395 # Get the list of all revs draft on remote by public here.
395 # Get the list of all revs draft on remote by public here.
396 # XXX Beware that revset break if droots is not strictly
396 # XXX Beware that revset break if droots is not strictly
397 # XXX root we may want to ensure it is but it is costly
397 # XXX root we may want to ensure it is but it is costly
398 outdated = unfi.set('heads((%ln::%ln) and public())',
398 outdated = unfi.set('heads((%ln::%ln) and public())',
399 droots, cheads)
399 droots, cheads)
400 for newremotehead in outdated:
400 for newremotehead in outdated:
401 r = pushop.remote.pushkey('phases',
401 r = pushop.remote.pushkey('phases',
402 newremotehead.hex(),
402 newremotehead.hex(),
403 str(phases.draft),
403 str(phases.draft),
404 str(phases.public))
404 str(phases.public))
405 if not r:
405 if not r:
406 pushop.ui.warn(_('updating %s to public failed!\n')
406 pushop.ui.warn(_('updating %s to public failed!\n')
407 % newremotehead)
407 % newremotehead)
408
408
409 def _localphasemove(pushop, nodes, phase=phases.public):
409 def _localphasemove(pushop, nodes, phase=phases.public):
410 """move <nodes> to <phase> in the local source repo"""
410 """move <nodes> to <phase> in the local source repo"""
411 if pushop.locallocked:
411 if pushop.locallocked:
412 phases.advanceboundary(pushop.repo, phase, nodes)
412 phases.advanceboundary(pushop.repo, phase, nodes)
413 else:
413 else:
414 # repo is not locked, do not change any phases!
414 # repo is not locked, do not change any phases!
415 # Informs the user that phases should have been moved when
415 # Informs the user that phases should have been moved when
416 # applicable.
416 # applicable.
417 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
417 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
418 phasestr = phases.phasenames[phase]
418 phasestr = phases.phasenames[phase]
419 if actualmoves:
419 if actualmoves:
420 pushop.ui.status(_('cannot lock source repo, skipping '
420 pushop.ui.status(_('cannot lock source repo, skipping '
421 'local %s phase update\n') % phasestr)
421 'local %s phase update\n') % phasestr)
422
422
423 def _pushobsolete(pushop):
423 def _pushobsolete(pushop):
424 """utility function to push obsolete markers to a remote"""
424 """utility function to push obsolete markers to a remote"""
425 pushop.ui.debug('try to push obsolete markers to remote\n')
425 pushop.ui.debug('try to push obsolete markers to remote\n')
426 repo = pushop.repo
426 repo = pushop.repo
427 remote = pushop.remote
427 remote = pushop.remote
428 if (obsolete._enabled and repo.obsstore and
428 if (obsolete._enabled and repo.obsstore and
429 'obsolete' in remote.listkeys('namespaces')):
429 'obsolete' in remote.listkeys('namespaces')):
430 rslts = []
430 rslts = []
431 remotedata = repo.listkeys('obsolete')
431 remotedata = repo.listkeys('obsolete')
432 for key in sorted(remotedata, reverse=True):
432 for key in sorted(remotedata, reverse=True):
433 # reverse sort to ensure we end with dump0
433 # reverse sort to ensure we end with dump0
434 data = remotedata[key]
434 data = remotedata[key]
435 rslts.append(remote.pushkey('obsolete', key, '', data))
435 rslts.append(remote.pushkey('obsolete', key, '', data))
436 if [r for r in rslts if not r]:
436 if [r for r in rslts if not r]:
437 msg = _('failed to push some obsolete markers!\n')
437 msg = _('failed to push some obsolete markers!\n')
438 repo.ui.warn(msg)
438 repo.ui.warn(msg)
439
439
440 def _pushbookmark(pushop):
440 def _pushbookmark(pushop):
441 """Update bookmark position on remote"""
441 """Update bookmark position on remote"""
442 ui = pushop.ui
442 ui = pushop.ui
443 repo = pushop.repo.unfiltered()
443 repo = pushop.repo.unfiltered()
444 remote = pushop.remote
444 remote = pushop.remote
445 ui.debug("checking for updated bookmarks\n")
445 ui.debug("checking for updated bookmarks\n")
446 revnums = map(repo.changelog.rev, pushop.revs or [])
446 revnums = map(repo.changelog.rev, pushop.revs or [])
447 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
447 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
448 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
448 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
449 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
449 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
450 srchex=hex)
450 srchex=hex)
451
451
452 for b, scid, dcid in advsrc:
452 for b, scid, dcid in advsrc:
453 if ancestors and repo[scid].rev() not in ancestors:
453 if ancestors and repo[scid].rev() not in ancestors:
454 continue
454 continue
455 if remote.pushkey('bookmarks', b, dcid, scid):
455 if remote.pushkey('bookmarks', b, dcid, scid):
456 ui.status(_("updating bookmark %s\n") % b)
456 ui.status(_("updating bookmark %s\n") % b)
457 else:
457 else:
458 ui.warn(_('updating bookmark %s failed!\n') % b)
458 ui.warn(_('updating bookmark %s failed!\n') % b)
459
459
460 class pulloperation(object):
460 class pulloperation(object):
461 """A object that represent a single pull operation
461 """A object that represent a single pull operation
462
462
463 It purpose is to carry push related state and very common operation.
463 It purpose is to carry push related state and very common operation.
464
464
465 A new should be created at the beginning of each pull and discarded
465 A new should be created at the beginning of each pull and discarded
466 afterward.
466 afterward.
467 """
467 """
468
468
469 def __init__(self, repo, remote, heads=None, force=False):
469 def __init__(self, repo, remote, heads=None, force=False):
470 # repo we pull into
470 # repo we pull into
471 self.repo = repo
471 self.repo = repo
472 # repo we pull from
472 # repo we pull from
473 self.remote = remote
473 self.remote = remote
474 # revision we try to pull (None is "all")
474 # revision we try to pull (None is "all")
475 self.heads = heads
475 self.heads = heads
476 # do we force pull?
476 # do we force pull?
477 self.force = force
477 self.force = force
478 # the name the pull transaction
478 # the name the pull transaction
479 self._trname = 'pull\n' + util.hidepassword(remote.url())
479 self._trname = 'pull\n' + util.hidepassword(remote.url())
480 # hold the transaction once created
480 # hold the transaction once created
481 self._tr = None
481 self._tr = None
482 # set of common changeset between local and remote before pull
482 # set of common changeset between local and remote before pull
483 self.common = None
483 self.common = None
484 # set of pulled head
484 # set of pulled head
485 self.rheads = None
485 self.rheads = None
486 # list of missing changeset to fetch remotely
486 # list of missing changeset to fetch remotely
487 self.fetch = None
487 self.fetch = None
488 # result of changegroup pulling (used as return code by pull)
488 # result of changegroup pulling (used as return code by pull)
489 self.cgresult = None
489 self.cgresult = None
490 # list of step remaining todo (related to future bundle2 usage)
490 # list of step remaining todo (related to future bundle2 usage)
491 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
491 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
492
492
493 @util.propertycache
493 @util.propertycache
494 def pulledsubset(self):
494 def pulledsubset(self):
495 """heads of the set of changeset target by the pull"""
495 """heads of the set of changeset target by the pull"""
496 # compute target subset
496 # compute target subset
497 if self.heads is None:
497 if self.heads is None:
498 # We pulled every thing possible
498 # We pulled every thing possible
499 # sync on everything common
499 # sync on everything common
500 c = set(self.common)
500 c = set(self.common)
501 ret = list(self.common)
501 ret = list(self.common)
502 for n in self.rheads:
502 for n in self.rheads:
503 if n not in c:
503 if n not in c:
504 ret.append(n)
504 ret.append(n)
505 return ret
505 return ret
506 else:
506 else:
507 # We pulled a specific subset
507 # We pulled a specific subset
508 # sync on this subset
508 # sync on this subset
509 return self.heads
509 return self.heads
510
510
511 def gettransaction(self):
511 def gettransaction(self):
512 """get appropriate pull transaction, creating it if needed"""
512 """get appropriate pull transaction, creating it if needed"""
513 if self._tr is None:
513 if self._tr is None:
514 self._tr = self.repo.transaction(self._trname)
514 self._tr = self.repo.transaction(self._trname)
515 return self._tr
515 return self._tr
516
516
517 def closetransaction(self):
517 def closetransaction(self):
518 """close transaction if created"""
518 """close transaction if created"""
519 if self._tr is not None:
519 if self._tr is not None:
520 self._tr.close()
520 self._tr.close()
521
521
522 def releasetransaction(self):
522 def releasetransaction(self):
523 """release transaction if created"""
523 """release transaction if created"""
524 if self._tr is not None:
524 if self._tr is not None:
525 self._tr.release()
525 self._tr.release()
526
526
527 def pull(repo, remote, heads=None, force=False):
527 def pull(repo, remote, heads=None, force=False):
528 pullop = pulloperation(repo, remote, heads, force)
528 pullop = pulloperation(repo, remote, heads, force)
529 if pullop.remote.local():
529 if pullop.remote.local():
530 missing = set(pullop.remote.requirements) - pullop.repo.supported
530 missing = set(pullop.remote.requirements) - pullop.repo.supported
531 if missing:
531 if missing:
532 msg = _("required features are not"
532 msg = _("required features are not"
533 " supported in the destination:"
533 " supported in the destination:"
534 " %s") % (', '.join(sorted(missing)))
534 " %s") % (', '.join(sorted(missing)))
535 raise util.Abort(msg)
535 raise util.Abort(msg)
536
536
537 lock = pullop.repo.lock()
537 lock = pullop.repo.lock()
538 try:
538 try:
539 _pulldiscovery(pullop)
539 _pulldiscovery(pullop)
540 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
540 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
541 and pullop.remote.capable('bundle2-exp')):
541 and pullop.remote.capable('bundle2-exp')):
542 _pullbundle2(pullop)
542 _pullbundle2(pullop)
543 if 'changegroup' in pullop.todosteps:
543 if 'changegroup' in pullop.todosteps:
544 _pullchangeset(pullop)
544 _pullchangeset(pullop)
545 if 'phases' in pullop.todosteps:
545 if 'phases' in pullop.todosteps:
546 _pullphase(pullop)
546 _pullphase(pullop)
547 if 'obsmarkers' in pullop.todosteps:
547 if 'obsmarkers' in pullop.todosteps:
548 _pullobsolete(pullop)
548 _pullobsolete(pullop)
549 pullop.closetransaction()
549 pullop.closetransaction()
550 finally:
550 finally:
551 pullop.releasetransaction()
551 pullop.releasetransaction()
552 lock.release()
552 lock.release()
553
553
554 return pullop.cgresult
554 return pullop.cgresult
555
555
556 def _pulldiscovery(pullop):
556 def _pulldiscovery(pullop):
557 """discovery phase for the pull
557 """discovery phase for the pull
558
558
559 Current handle changeset discovery only, will change handle all discovery
559 Current handle changeset discovery only, will change handle all discovery
560 at some point."""
560 at some point."""
561 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
561 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
562 pullop.remote,
562 pullop.remote,
563 heads=pullop.heads,
563 heads=pullop.heads,
564 force=pullop.force)
564 force=pullop.force)
565 pullop.common, pullop.fetch, pullop.rheads = tmp
565 pullop.common, pullop.fetch, pullop.rheads = tmp
566
566
567 def _pullbundle2(pullop):
567 def _pullbundle2(pullop):
568 """pull data using bundle2
568 """pull data using bundle2
569
569
570 For now, the only supported data are changegroup."""
570 For now, the only supported data are changegroup."""
571 kwargs = {'bundlecaps': set(['HG2X'])}
571 kwargs = {'bundlecaps': set(['HG2X'])}
572 capsblob = bundle2.encodecaps(pullop.repo.bundle2caps)
572 capsblob = bundle2.encodecaps(pullop.repo.bundle2caps)
573 kwargs['bundlecaps'].add('bundle2=' + urllib.quote(capsblob))
573 kwargs['bundlecaps'].add('bundle2=' + urllib.quote(capsblob))
574 # pulling changegroup
574 # pulling changegroup
575 pullop.todosteps.remove('changegroup')
575 pullop.todosteps.remove('changegroup')
576
576
577 kwargs['common'] = pullop.common
577 kwargs['common'] = pullop.common
578 kwargs['heads'] = pullop.heads or pullop.rheads
578 kwargs['heads'] = pullop.heads or pullop.rheads
579 if not pullop.fetch:
579 if not pullop.fetch:
580 pullop.repo.ui.status(_("no changes found\n"))
580 pullop.repo.ui.status(_("no changes found\n"))
581 pullop.cgresult = 0
581 pullop.cgresult = 0
582 else:
582 else:
583 if pullop.heads is None and list(pullop.common) == [nullid]:
583 if pullop.heads is None and list(pullop.common) == [nullid]:
584 pullop.repo.ui.status(_("requesting all changes\n"))
584 pullop.repo.ui.status(_("requesting all changes\n"))
585 _pullbundle2extraprepare(pullop, kwargs)
585 _pullbundle2extraprepare(pullop, kwargs)
586 if kwargs.keys() == ['format']:
586 if kwargs.keys() == ['format']:
587 return # nothing to pull
587 return # nothing to pull
588 bundle = pullop.remote.getbundle('pull', **kwargs)
588 bundle = pullop.remote.getbundle('pull', **kwargs)
589 try:
589 try:
590 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
590 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
591 except bundle2.UnknownPartError, exc:
591 except bundle2.UnknownPartError, exc:
592 raise util.Abort('missing support for %s' % exc)
592 raise util.Abort('missing support for %s' % exc)
593
593
594 if pullop.fetch:
594 if pullop.fetch:
595 assert len(op.records['changegroup']) == 1
595 assert len(op.records['changegroup']) == 1
596 pullop.cgresult = op.records['changegroup'][0]['return']
596 pullop.cgresult = op.records['changegroup'][0]['return']
597
597
598 def _pullbundle2extraprepare(pullop, kwargs):
598 def _pullbundle2extraprepare(pullop, kwargs):
599 """hook function so that extensions can extend the getbundle call"""
599 """hook function so that extensions can extend the getbundle call"""
600 pass
600 pass
601
601
602 def _pullchangeset(pullop):
602 def _pullchangeset(pullop):
603 """pull changeset from unbundle into the local repo"""
603 """pull changeset from unbundle into the local repo"""
604 # We delay the open of the transaction as late as possible so we
604 # We delay the open of the transaction as late as possible so we
605 # don't open transaction for nothing or you break future useful
605 # don't open transaction for nothing or you break future useful
606 # rollback call
606 # rollback call
607 pullop.todosteps.remove('changegroup')
607 pullop.todosteps.remove('changegroup')
608 if not pullop.fetch:
608 if not pullop.fetch:
609 pullop.repo.ui.status(_("no changes found\n"))
609 pullop.repo.ui.status(_("no changes found\n"))
610 pullop.cgresult = 0
610 pullop.cgresult = 0
611 return
611 return
612 pullop.gettransaction()
612 pullop.gettransaction()
613 if pullop.heads is None and list(pullop.common) == [nullid]:
613 if pullop.heads is None and list(pullop.common) == [nullid]:
614 pullop.repo.ui.status(_("requesting all changes\n"))
614 pullop.repo.ui.status(_("requesting all changes\n"))
615 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
615 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
616 # issue1320, avoid a race if remote changed after discovery
616 # issue1320, avoid a race if remote changed after discovery
617 pullop.heads = pullop.rheads
617 pullop.heads = pullop.rheads
618
618
619 if pullop.remote.capable('getbundle'):
619 if pullop.remote.capable('getbundle'):
620 # TODO: get bundlecaps from remote
620 # TODO: get bundlecaps from remote
621 cg = pullop.remote.getbundle('pull', common=pullop.common,
621 cg = pullop.remote.getbundle('pull', common=pullop.common,
622 heads=pullop.heads or pullop.rheads)
622 heads=pullop.heads or pullop.rheads)
623 elif pullop.heads is None:
623 elif pullop.heads is None:
624 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
624 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
625 elif not pullop.remote.capable('changegroupsubset'):
625 elif not pullop.remote.capable('changegroupsubset'):
626 raise util.Abort(_("partial pull cannot be done because "
626 raise util.Abort(_("partial pull cannot be done because "
627 "other repository doesn't support "
627 "other repository doesn't support "
628 "changegroupsubset."))
628 "changegroupsubset."))
629 else:
629 else:
630 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
630 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
631 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
631 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
632 pullop.remote.url())
632 pullop.remote.url())
633
633
634 def _pullphase(pullop):
634 def _pullphase(pullop):
635 # Get remote phases data from remote
635 # Get remote phases data from remote
636 pullop.todosteps.remove('phases')
636 pullop.todosteps.remove('phases')
637 remotephases = pullop.remote.listkeys('phases')
637 remotephases = pullop.remote.listkeys('phases')
638 publishing = bool(remotephases.get('publishing', False))
638 publishing = bool(remotephases.get('publishing', False))
639 if remotephases and not publishing:
639 if remotephases and not publishing:
640 # remote is new and unpublishing
640 # remote is new and unpublishing
641 pheads, _dr = phases.analyzeremotephases(pullop.repo,
641 pheads, _dr = phases.analyzeremotephases(pullop.repo,
642 pullop.pulledsubset,
642 pullop.pulledsubset,
643 remotephases)
643 remotephases)
644 phases.advanceboundary(pullop.repo, phases.public, pheads)
644 phases.advanceboundary(pullop.repo, phases.public, pheads)
645 phases.advanceboundary(pullop.repo, phases.draft,
645 phases.advanceboundary(pullop.repo, phases.draft,
646 pullop.pulledsubset)
646 pullop.pulledsubset)
647 else:
647 else:
648 # Remote is old or publishing all common changesets
648 # Remote is old or publishing all common changesets
649 # should be seen as public
649 # should be seen as public
650 phases.advanceboundary(pullop.repo, phases.public,
650 phases.advanceboundary(pullop.repo, phases.public,
651 pullop.pulledsubset)
651 pullop.pulledsubset)
652
652
653 def _pullobsolete(pullop):
653 def _pullobsolete(pullop):
654 """utility function to pull obsolete markers from a remote
654 """utility function to pull obsolete markers from a remote
655
655
656 The `gettransaction` is function that return the pull transaction, creating
656 The `gettransaction` is function that return the pull transaction, creating
657 one if necessary. We return the transaction to inform the calling code that
657 one if necessary. We return the transaction to inform the calling code that
658 a new transaction have been created (when applicable).
658 a new transaction have been created (when applicable).
659
659
660 Exists mostly to allow overriding for experimentation purpose"""
660 Exists mostly to allow overriding for experimentation purpose"""
661 pullop.todosteps.remove('obsmarkers')
661 pullop.todosteps.remove('obsmarkers')
662 tr = None
662 tr = None
663 if obsolete._enabled:
663 if obsolete._enabled:
664 pullop.repo.ui.debug('fetching remote obsolete markers\n')
664 pullop.repo.ui.debug('fetching remote obsolete markers\n')
665 remoteobs = pullop.remote.listkeys('obsolete')
665 remoteobs = pullop.remote.listkeys('obsolete')
666 if 'dump0' in remoteobs:
666 if 'dump0' in remoteobs:
667 tr = pullop.gettransaction()
667 tr = pullop.gettransaction()
668 for key in sorted(remoteobs, reverse=True):
668 for key in sorted(remoteobs, reverse=True):
669 if key.startswith('dump'):
669 if key.startswith('dump'):
670 data = base85.b85decode(remoteobs[key])
670 data = base85.b85decode(remoteobs[key])
671 pullop.repo.obsstore.mergemarkers(tr, data)
671 pullop.repo.obsstore.mergemarkers(tr, data)
672 pullop.repo.invalidatevolatilesets()
672 pullop.repo.invalidatevolatilesets()
673 return tr
673 return tr
674
674
675 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
675 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
676 **kwargs):
676 **kwargs):
677 """return a full bundle (with potentially multiple kind of parts)
677 """return a full bundle (with potentially multiple kind of parts)
678
678
679 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
679 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
680 passed. For now, the bundle can contain only changegroup, but this will
680 passed. For now, the bundle can contain only changegroup, but this will
681 changes when more part type will be available for bundle2.
681 changes when more part type will be available for bundle2.
682
682
683 This is different from changegroup.getbundle that only returns an HG10
683 This is different from changegroup.getbundle that only returns an HG10
684 changegroup bundle. They may eventually get reunited in the future when we
684 changegroup bundle. They may eventually get reunited in the future when we
685 have a clearer idea of the API we what to query different data.
685 have a clearer idea of the API we what to query different data.
686
686
687 The implementation is at a very early stage and will get massive rework
687 The implementation is at a very early stage and will get massive rework
688 when the API of bundle is refined.
688 when the API of bundle is refined.
689 """
689 """
690 # build changegroup bundle here.
690 # build changegroup bundle here.
691 cg = changegroup.getbundle(repo, source, heads=heads,
691 cg = changegroup.getbundle(repo, source, heads=heads,
692 common=common, bundlecaps=bundlecaps)
692 common=common, bundlecaps=bundlecaps)
693 if bundlecaps is None or 'HG2X' not in bundlecaps:
693 if bundlecaps is None or 'HG2X' not in bundlecaps:
694 return cg
694 return cg
695 # very crude first implementation,
695 # very crude first implementation,
696 # the bundle API will change and the generation will be done lazily.
696 # the bundle API will change and the generation will be done lazily.
697 b2caps = {}
697 b2caps = {}
698 for bcaps in bundlecaps:
698 for bcaps in bundlecaps:
699 if bcaps.startswith('bundle2='):
699 if bcaps.startswith('bundle2='):
700 blob = urllib.unquote(bcaps[len('bundle2='):])
700 blob = urllib.unquote(bcaps[len('bundle2='):])
701 b2caps.update(bundle2.decodecaps(blob))
701 b2caps.update(bundle2.decodecaps(blob))
702 bundler = bundle2.bundle20(repo.ui, b2caps)
702 bundler = bundle2.bundle20(repo.ui, b2caps)
703 if cg:
703 if cg:
704 part = bundle2.bundlepart('b2x:changegroup', data=cg.getchunks())
704 part = bundle2.bundlepart('b2x:changegroup', data=cg.getchunks())
705 bundler.addpart(part)
705 bundler.addpart(part)
706 _getbundleextrapart(bundler, repo, source, heads=heads, common=common,
706 _getbundleextrapart(bundler, repo, source, heads=heads, common=common,
707 bundlecaps=bundlecaps, **kwargs)
707 bundlecaps=bundlecaps, **kwargs)
708 return util.chunkbuffer(bundler.getchunks())
708 return util.chunkbuffer(bundler.getchunks())
709
709
710 def _getbundleextrapart(bundler, repo, source, heads=None, common=None,
710 def _getbundleextrapart(bundler, repo, source, heads=None, common=None,
711 bundlecaps=None, **kwargs):
711 bundlecaps=None, **kwargs):
712 """hook function to let extensions add parts to the requested bundle"""
712 """hook function to let extensions add parts to the requested bundle"""
713 pass
713 pass
714
714
715 def check_heads(repo, their_heads, context):
715 def check_heads(repo, their_heads, context):
716 """check if the heads of a repo have been modified
716 """check if the heads of a repo have been modified
717
717
718 Used by peer for unbundling.
718 Used by peer for unbundling.
719 """
719 """
720 heads = repo.heads()
720 heads = repo.heads()
721 heads_hash = util.sha1(''.join(sorted(heads))).digest()
721 heads_hash = util.sha1(''.join(sorted(heads))).digest()
722 if not (their_heads == ['force'] or their_heads == heads or
722 if not (their_heads == ['force'] or their_heads == heads or
723 their_heads == ['hashed', heads_hash]):
723 their_heads == ['hashed', heads_hash]):
724 # someone else committed/pushed/unbundled while we
724 # someone else committed/pushed/unbundled while we
725 # were transferring data
725 # were transferring data
726 raise error.PushRaced('repository changed while %s - '
726 raise error.PushRaced('repository changed while %s - '
727 'please try again' % context)
727 'please try again' % context)
728
728
729 def unbundle(repo, cg, heads, source, url):
729 def unbundle(repo, cg, heads, source, url):
730 """Apply a bundle to a repo.
730 """Apply a bundle to a repo.
731
731
732 this function makes sure the repo is locked during the application and have
732 this function makes sure the repo is locked during the application and have
733 mechanism to check that no push race occurred between the creation of the
733 mechanism to check that no push race occurred between the creation of the
734 bundle and its application.
734 bundle and its application.
735
735
736 If the push was raced as PushRaced exception is raised."""
736 If the push was raced as PushRaced exception is raised."""
737 r = 0
737 r = 0
738 # need a transaction when processing a bundle2 stream
738 # need a transaction when processing a bundle2 stream
739 tr = None
739 tr = None
740 lock = repo.lock()
740 lock = repo.lock()
741 try:
741 try:
742 check_heads(repo, heads, 'uploading changes')
742 check_heads(repo, heads, 'uploading changes')
743 # push can proceed
743 # push can proceed
744 if util.safehasattr(cg, 'params'):
744 if util.safehasattr(cg, 'params'):
745 try:
745 try:
746 tr = repo.transaction('unbundle')
746 tr = repo.transaction('unbundle')
747 tr.hookargs['bundle2-exp'] = '1'
747 tr.hookargs['bundle2-exp'] = '1'
748 r = bundle2.processbundle(repo, cg, lambda: tr).reply
748 r = bundle2.processbundle(repo, cg, lambda: tr).reply
749 cl = repo.unfiltered().changelog
749 cl = repo.unfiltered().changelog
750 p = cl.writepending() and repo.root or ""
750 p = cl.writepending() and repo.root or ""
751 repo.hook('b2x-pretransactionclose', throw=True, source=source,
751 repo.hook('b2x-pretransactionclose', throw=True, source=source,
752 url=url, pending=p, **tr.hookargs)
752 url=url, pending=p, **tr.hookargs)
753 tr.close()
753 tr.close()
754 repo.hook('b2x-transactionclose', source=source, url=url,
754 repo.hook('b2x-transactionclose', source=source, url=url,
755 **tr.hookargs)
755 **tr.hookargs)
756 except Exception, exc:
756 except Exception, exc:
757 exc.duringunbundle2 = True
757 exc.duringunbundle2 = True
758 raise
758 raise
759 else:
759 else:
760 r = changegroup.addchangegroup(repo, cg, source, url)
760 r = changegroup.addchangegroup(repo, cg, source, url)
761 finally:
761 finally:
762 if tr is not None:
762 if tr is not None:
763 tr.release()
763 tr.release()
764 lock.release()
764 lock.release()
765 return r
765 return r
General Comments 0
You need to be logged in to leave comments. Login now