##// END OF EJS Templates
exchange: remove duplicated addition to pushop.stepdone...
Pierre-Yves David -
r22244:172036d6 default
parent child Browse files
Show More
@@ -1,1030 +1,1029 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno, urllib
10 import errno, urllib
11 import util, scmutil, changegroup, base85, error
11 import util, scmutil, changegroup, base85, error
12 import discovery, phases, obsolete, bookmarks, bundle2, pushkey
12 import discovery, phases, obsolete, bookmarks, bundle2, pushkey
13
13
14 def readbundle(ui, fh, fname, vfs=None):
14 def readbundle(ui, fh, fname, vfs=None):
15 header = changegroup.readexactly(fh, 4)
15 header = changegroup.readexactly(fh, 4)
16
16
17 alg = None
17 alg = None
18 if not fname:
18 if not fname:
19 fname = "stream"
19 fname = "stream"
20 if not header.startswith('HG') and header.startswith('\0'):
20 if not header.startswith('HG') and header.startswith('\0'):
21 fh = changegroup.headerlessfixup(fh, header)
21 fh = changegroup.headerlessfixup(fh, header)
22 header = "HG10"
22 header = "HG10"
23 alg = 'UN'
23 alg = 'UN'
24 elif vfs:
24 elif vfs:
25 fname = vfs.join(fname)
25 fname = vfs.join(fname)
26
26
27 magic, version = header[0:2], header[2:4]
27 magic, version = header[0:2], header[2:4]
28
28
29 if magic != 'HG':
29 if magic != 'HG':
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 if version == '10':
31 if version == '10':
32 if alg is None:
32 if alg is None:
33 alg = changegroup.readexactly(fh, 2)
33 alg = changegroup.readexactly(fh, 2)
34 return changegroup.unbundle10(fh, alg)
34 return changegroup.unbundle10(fh, alg)
35 elif version == '2X':
35 elif version == '2X':
36 return bundle2.unbundle20(ui, fh, header=magic + version)
36 return bundle2.unbundle20(ui, fh, header=magic + version)
37 else:
37 else:
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39
39
40
40
41 class pushoperation(object):
41 class pushoperation(object):
42 """A object that represent a single push operation
42 """A object that represent a single push operation
43
43
44 It purpose is to carry push related state and very common operation.
44 It purpose is to carry push related state and very common operation.
45
45
46 A new should be created at the beginning of each push and discarded
46 A new should be created at the beginning of each push and discarded
47 afterward.
47 afterward.
48 """
48 """
49
49
50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
51 # repo we push from
51 # repo we push from
52 self.repo = repo
52 self.repo = repo
53 self.ui = repo.ui
53 self.ui = repo.ui
54 # repo we push to
54 # repo we push to
55 self.remote = remote
55 self.remote = remote
56 # force option provided
56 # force option provided
57 self.force = force
57 self.force = force
58 # revs to be pushed (None is "all")
58 # revs to be pushed (None is "all")
59 self.revs = revs
59 self.revs = revs
60 # allow push of new branch
60 # allow push of new branch
61 self.newbranch = newbranch
61 self.newbranch = newbranch
62 # did a local lock get acquired?
62 # did a local lock get acquired?
63 self.locallocked = None
63 self.locallocked = None
64 # step already performed
64 # step already performed
65 # (used to check what steps have been already performed through bundle2)
65 # (used to check what steps have been already performed through bundle2)
66 self.stepsdone = set()
66 self.stepsdone = set()
67 # Integer version of the push result
67 # Integer version of the push result
68 # - None means nothing to push
68 # - None means nothing to push
69 # - 0 means HTTP error
69 # - 0 means HTTP error
70 # - 1 means we pushed and remote head count is unchanged *or*
70 # - 1 means we pushed and remote head count is unchanged *or*
71 # we have outgoing changesets but refused to push
71 # we have outgoing changesets but refused to push
72 # - other values as described by addchangegroup()
72 # - other values as described by addchangegroup()
73 self.ret = None
73 self.ret = None
74 # discover.outgoing object (contains common and outgoing data)
74 # discover.outgoing object (contains common and outgoing data)
75 self.outgoing = None
75 self.outgoing = None
76 # all remote heads before the push
76 # all remote heads before the push
77 self.remoteheads = None
77 self.remoteheads = None
78 # testable as a boolean indicating if any nodes are missing locally.
78 # testable as a boolean indicating if any nodes are missing locally.
79 self.incoming = None
79 self.incoming = None
80 # phases changes that must be pushed along side the changesets
80 # phases changes that must be pushed along side the changesets
81 self.outdatedphases = None
81 self.outdatedphases = None
82 # phases changes that must be pushed if changeset push fails
82 # phases changes that must be pushed if changeset push fails
83 self.fallbackoutdatedphases = None
83 self.fallbackoutdatedphases = None
84 # outgoing obsmarkers
84 # outgoing obsmarkers
85 self.outobsmarkers = set()
85 self.outobsmarkers = set()
86 # outgoing bookmarks
86 # outgoing bookmarks
87 self.outbookmarks = []
87 self.outbookmarks = []
88
88
89 @util.propertycache
89 @util.propertycache
90 def futureheads(self):
90 def futureheads(self):
91 """future remote heads if the changeset push succeeds"""
91 """future remote heads if the changeset push succeeds"""
92 return self.outgoing.missingheads
92 return self.outgoing.missingheads
93
93
94 @util.propertycache
94 @util.propertycache
95 def fallbackheads(self):
95 def fallbackheads(self):
96 """future remote heads if the changeset push fails"""
96 """future remote heads if the changeset push fails"""
97 if self.revs is None:
97 if self.revs is None:
98 # not target to push, all common are relevant
98 # not target to push, all common are relevant
99 return self.outgoing.commonheads
99 return self.outgoing.commonheads
100 unfi = self.repo.unfiltered()
100 unfi = self.repo.unfiltered()
101 # I want cheads = heads(::missingheads and ::commonheads)
101 # I want cheads = heads(::missingheads and ::commonheads)
102 # (missingheads is revs with secret changeset filtered out)
102 # (missingheads is revs with secret changeset filtered out)
103 #
103 #
104 # This can be expressed as:
104 # This can be expressed as:
105 # cheads = ( (missingheads and ::commonheads)
105 # cheads = ( (missingheads and ::commonheads)
106 # + (commonheads and ::missingheads))"
106 # + (commonheads and ::missingheads))"
107 # )
107 # )
108 #
108 #
109 # while trying to push we already computed the following:
109 # while trying to push we already computed the following:
110 # common = (::commonheads)
110 # common = (::commonheads)
111 # missing = ((commonheads::missingheads) - commonheads)
111 # missing = ((commonheads::missingheads) - commonheads)
112 #
112 #
113 # We can pick:
113 # We can pick:
114 # * missingheads part of common (::commonheads)
114 # * missingheads part of common (::commonheads)
115 common = set(self.outgoing.common)
115 common = set(self.outgoing.common)
116 nm = self.repo.changelog.nodemap
116 nm = self.repo.changelog.nodemap
117 cheads = [node for node in self.revs if nm[node] in common]
117 cheads = [node for node in self.revs if nm[node] in common]
118 # and
118 # and
119 # * commonheads parents on missing
119 # * commonheads parents on missing
120 revset = unfi.set('%ln and parents(roots(%ln))',
120 revset = unfi.set('%ln and parents(roots(%ln))',
121 self.outgoing.commonheads,
121 self.outgoing.commonheads,
122 self.outgoing.missing)
122 self.outgoing.missing)
123 cheads.extend(c.node() for c in revset)
123 cheads.extend(c.node() for c in revset)
124 return cheads
124 return cheads
125
125
126 @property
126 @property
127 def commonheads(self):
127 def commonheads(self):
128 """set of all common heads after changeset bundle push"""
128 """set of all common heads after changeset bundle push"""
129 if self.ret:
129 if self.ret:
130 return self.futureheads
130 return self.futureheads
131 else:
131 else:
132 return self.fallbackheads
132 return self.fallbackheads
133
133
134 def push(repo, remote, force=False, revs=None, newbranch=False):
134 def push(repo, remote, force=False, revs=None, newbranch=False):
135 '''Push outgoing changesets (limited by revs) from a local
135 '''Push outgoing changesets (limited by revs) from a local
136 repository to remote. Return an integer:
136 repository to remote. Return an integer:
137 - None means nothing to push
137 - None means nothing to push
138 - 0 means HTTP error
138 - 0 means HTTP error
139 - 1 means we pushed and remote head count is unchanged *or*
139 - 1 means we pushed and remote head count is unchanged *or*
140 we have outgoing changesets but refused to push
140 we have outgoing changesets but refused to push
141 - other values as described by addchangegroup()
141 - other values as described by addchangegroup()
142 '''
142 '''
143 pushop = pushoperation(repo, remote, force, revs, newbranch)
143 pushop = pushoperation(repo, remote, force, revs, newbranch)
144 if pushop.remote.local():
144 if pushop.remote.local():
145 missing = (set(pushop.repo.requirements)
145 missing = (set(pushop.repo.requirements)
146 - pushop.remote.local().supported)
146 - pushop.remote.local().supported)
147 if missing:
147 if missing:
148 msg = _("required features are not"
148 msg = _("required features are not"
149 " supported in the destination:"
149 " supported in the destination:"
150 " %s") % (', '.join(sorted(missing)))
150 " %s") % (', '.join(sorted(missing)))
151 raise util.Abort(msg)
151 raise util.Abort(msg)
152
152
153 # there are two ways to push to remote repo:
153 # there are two ways to push to remote repo:
154 #
154 #
155 # addchangegroup assumes local user can lock remote
155 # addchangegroup assumes local user can lock remote
156 # repo (local filesystem, old ssh servers).
156 # repo (local filesystem, old ssh servers).
157 #
157 #
158 # unbundle assumes local user cannot lock remote repo (new ssh
158 # unbundle assumes local user cannot lock remote repo (new ssh
159 # servers, http servers).
159 # servers, http servers).
160
160
161 if not pushop.remote.canpush():
161 if not pushop.remote.canpush():
162 raise util.Abort(_("destination does not support push"))
162 raise util.Abort(_("destination does not support push"))
163 # get local lock as we might write phase data
163 # get local lock as we might write phase data
164 locallock = None
164 locallock = None
165 try:
165 try:
166 locallock = pushop.repo.lock()
166 locallock = pushop.repo.lock()
167 pushop.locallocked = True
167 pushop.locallocked = True
168 except IOError, err:
168 except IOError, err:
169 pushop.locallocked = False
169 pushop.locallocked = False
170 if err.errno != errno.EACCES:
170 if err.errno != errno.EACCES:
171 raise
171 raise
172 # source repo cannot be locked.
172 # source repo cannot be locked.
173 # We do not abort the push, but just disable the local phase
173 # We do not abort the push, but just disable the local phase
174 # synchronisation.
174 # synchronisation.
175 msg = 'cannot lock source repository: %s\n' % err
175 msg = 'cannot lock source repository: %s\n' % err
176 pushop.ui.debug(msg)
176 pushop.ui.debug(msg)
177 try:
177 try:
178 pushop.repo.checkpush(pushop)
178 pushop.repo.checkpush(pushop)
179 lock = None
179 lock = None
180 unbundle = pushop.remote.capable('unbundle')
180 unbundle = pushop.remote.capable('unbundle')
181 if not unbundle:
181 if not unbundle:
182 lock = pushop.remote.lock()
182 lock = pushop.remote.lock()
183 try:
183 try:
184 _pushdiscovery(pushop)
184 _pushdiscovery(pushop)
185 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
185 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
186 False)
186 False)
187 and pushop.remote.capable('bundle2-exp')):
187 and pushop.remote.capable('bundle2-exp')):
188 _pushbundle2(pushop)
188 _pushbundle2(pushop)
189 _pushchangeset(pushop)
189 _pushchangeset(pushop)
190 _pushsyncphase(pushop)
190 _pushsyncphase(pushop)
191 _pushobsolete(pushop)
191 _pushobsolete(pushop)
192 _pushbookmark(pushop)
192 _pushbookmark(pushop)
193 finally:
193 finally:
194 if lock is not None:
194 if lock is not None:
195 lock.release()
195 lock.release()
196 finally:
196 finally:
197 if locallock is not None:
197 if locallock is not None:
198 locallock.release()
198 locallock.release()
199
199
200 return pushop.ret
200 return pushop.ret
201
201
202 # list of steps to perform discovery before push
202 # list of steps to perform discovery before push
203 pushdiscoveryorder = []
203 pushdiscoveryorder = []
204
204
205 # Mapping between step name and function
205 # Mapping between step name and function
206 #
206 #
207 # This exists to help extensions wrap steps if necessary
207 # This exists to help extensions wrap steps if necessary
208 pushdiscoverymapping = {}
208 pushdiscoverymapping = {}
209
209
210 def pushdiscovery(stepname):
210 def pushdiscovery(stepname):
211 """decorator for function performing discovery before push
211 """decorator for function performing discovery before push
212
212
213 The function is added to the step -> function mapping and appended to the
213 The function is added to the step -> function mapping and appended to the
214 list of steps. Beware that decorated function will be added in order (this
214 list of steps. Beware that decorated function will be added in order (this
215 may matter).
215 may matter).
216
216
217 You can only use this decorator for a new step, if you want to wrap a step
217 You can only use this decorator for a new step, if you want to wrap a step
218 from an extension, change the pushdiscovery dictionary directly."""
218 from an extension, change the pushdiscovery dictionary directly."""
219 def dec(func):
219 def dec(func):
220 assert stepname not in pushdiscoverymapping
220 assert stepname not in pushdiscoverymapping
221 pushdiscoverymapping[stepname] = func
221 pushdiscoverymapping[stepname] = func
222 pushdiscoveryorder.append(stepname)
222 pushdiscoveryorder.append(stepname)
223 return func
223 return func
224 return dec
224 return dec
225
225
226 def _pushdiscovery(pushop):
226 def _pushdiscovery(pushop):
227 """Run all discovery steps"""
227 """Run all discovery steps"""
228 for stepname in pushdiscoveryorder:
228 for stepname in pushdiscoveryorder:
229 step = pushdiscoverymapping[stepname]
229 step = pushdiscoverymapping[stepname]
230 step(pushop)
230 step(pushop)
231
231
232 @pushdiscovery('changeset')
232 @pushdiscovery('changeset')
233 def _pushdiscoverychangeset(pushop):
233 def _pushdiscoverychangeset(pushop):
234 """discover the changeset that need to be pushed"""
234 """discover the changeset that need to be pushed"""
235 unfi = pushop.repo.unfiltered()
235 unfi = pushop.repo.unfiltered()
236 fci = discovery.findcommonincoming
236 fci = discovery.findcommonincoming
237 commoninc = fci(unfi, pushop.remote, force=pushop.force)
237 commoninc = fci(unfi, pushop.remote, force=pushop.force)
238 common, inc, remoteheads = commoninc
238 common, inc, remoteheads = commoninc
239 fco = discovery.findcommonoutgoing
239 fco = discovery.findcommonoutgoing
240 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
240 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
241 commoninc=commoninc, force=pushop.force)
241 commoninc=commoninc, force=pushop.force)
242 pushop.outgoing = outgoing
242 pushop.outgoing = outgoing
243 pushop.remoteheads = remoteheads
243 pushop.remoteheads = remoteheads
244 pushop.incoming = inc
244 pushop.incoming = inc
245
245
246 @pushdiscovery('phase')
246 @pushdiscovery('phase')
247 def _pushdiscoveryphase(pushop):
247 def _pushdiscoveryphase(pushop):
248 """discover the phase that needs to be pushed
248 """discover the phase that needs to be pushed
249
249
250 (computed for both success and failure case for changesets push)"""
250 (computed for both success and failure case for changesets push)"""
251 outgoing = pushop.outgoing
251 outgoing = pushop.outgoing
252 unfi = pushop.repo.unfiltered()
252 unfi = pushop.repo.unfiltered()
253 remotephases = pushop.remote.listkeys('phases')
253 remotephases = pushop.remote.listkeys('phases')
254 publishing = remotephases.get('publishing', False)
254 publishing = remotephases.get('publishing', False)
255 ana = phases.analyzeremotephases(pushop.repo,
255 ana = phases.analyzeremotephases(pushop.repo,
256 pushop.fallbackheads,
256 pushop.fallbackheads,
257 remotephases)
257 remotephases)
258 pheads, droots = ana
258 pheads, droots = ana
259 extracond = ''
259 extracond = ''
260 if not publishing:
260 if not publishing:
261 extracond = ' and public()'
261 extracond = ' and public()'
262 revset = 'heads((%%ln::%%ln) %s)' % extracond
262 revset = 'heads((%%ln::%%ln) %s)' % extracond
263 # Get the list of all revs draft on remote by public here.
263 # Get the list of all revs draft on remote by public here.
264 # XXX Beware that revset break if droots is not strictly
264 # XXX Beware that revset break if droots is not strictly
265 # XXX root we may want to ensure it is but it is costly
265 # XXX root we may want to ensure it is but it is costly
266 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
266 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
267 if not outgoing.missing:
267 if not outgoing.missing:
268 future = fallback
268 future = fallback
269 else:
269 else:
270 # adds changeset we are going to push as draft
270 # adds changeset we are going to push as draft
271 #
271 #
272 # should not be necessary for pushblishing server, but because of an
272 # should not be necessary for pushblishing server, but because of an
273 # issue fixed in xxxxx we have to do it anyway.
273 # issue fixed in xxxxx we have to do it anyway.
274 fdroots = list(unfi.set('roots(%ln + %ln::)',
274 fdroots = list(unfi.set('roots(%ln + %ln::)',
275 outgoing.missing, droots))
275 outgoing.missing, droots))
276 fdroots = [f.node() for f in fdroots]
276 fdroots = [f.node() for f in fdroots]
277 future = list(unfi.set(revset, fdroots, pushop.futureheads))
277 future = list(unfi.set(revset, fdroots, pushop.futureheads))
278 pushop.outdatedphases = future
278 pushop.outdatedphases = future
279 pushop.fallbackoutdatedphases = fallback
279 pushop.fallbackoutdatedphases = fallback
280
280
281 @pushdiscovery('obsmarker')
281 @pushdiscovery('obsmarker')
282 def _pushdiscoveryobsmarkers(pushop):
282 def _pushdiscoveryobsmarkers(pushop):
283 pushop.outobsmarkers = pushop.repo.obsstore
283 pushop.outobsmarkers = pushop.repo.obsstore
284
284
285 @pushdiscovery('bookmarks')
285 @pushdiscovery('bookmarks')
286 def _pushdiscoverybookmarks(pushop):
286 def _pushdiscoverybookmarks(pushop):
287 ui = pushop.ui
287 ui = pushop.ui
288 repo = pushop.repo.unfiltered()
288 repo = pushop.repo.unfiltered()
289 remote = pushop.remote
289 remote = pushop.remote
290 ui.debug("checking for updated bookmarks\n")
290 ui.debug("checking for updated bookmarks\n")
291 ancestors = ()
291 ancestors = ()
292 if pushop.revs:
292 if pushop.revs:
293 revnums = map(repo.changelog.rev, pushop.revs)
293 revnums = map(repo.changelog.rev, pushop.revs)
294 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
294 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
295 remotebookmark = remote.listkeys('bookmarks')
295 remotebookmark = remote.listkeys('bookmarks')
296
296
297 comp = bookmarks.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
297 comp = bookmarks.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
298 addsrc, adddst, advsrc, advdst, diverge, differ, invalid = comp
298 addsrc, adddst, advsrc, advdst, diverge, differ, invalid = comp
299 for b, scid, dcid in advsrc:
299 for b, scid, dcid in advsrc:
300 if not ancestors or repo[scid].rev() in ancestors:
300 if not ancestors or repo[scid].rev() in ancestors:
301 pushop.outbookmarks.append((b, dcid, scid))
301 pushop.outbookmarks.append((b, dcid, scid))
302
302
303 def _pushcheckoutgoing(pushop):
303 def _pushcheckoutgoing(pushop):
304 outgoing = pushop.outgoing
304 outgoing = pushop.outgoing
305 unfi = pushop.repo.unfiltered()
305 unfi = pushop.repo.unfiltered()
306 if not outgoing.missing:
306 if not outgoing.missing:
307 # nothing to push
307 # nothing to push
308 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
308 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
309 return False
309 return False
310 # something to push
310 # something to push
311 if not pushop.force:
311 if not pushop.force:
312 # if repo.obsstore == False --> no obsolete
312 # if repo.obsstore == False --> no obsolete
313 # then, save the iteration
313 # then, save the iteration
314 if unfi.obsstore:
314 if unfi.obsstore:
315 # this message are here for 80 char limit reason
315 # this message are here for 80 char limit reason
316 mso = _("push includes obsolete changeset: %s!")
316 mso = _("push includes obsolete changeset: %s!")
317 mst = "push includes %s changeset: %s!"
317 mst = "push includes %s changeset: %s!"
318 # plain versions for i18n tool to detect them
318 # plain versions for i18n tool to detect them
319 _("push includes unstable changeset: %s!")
319 _("push includes unstable changeset: %s!")
320 _("push includes bumped changeset: %s!")
320 _("push includes bumped changeset: %s!")
321 _("push includes divergent changeset: %s!")
321 _("push includes divergent changeset: %s!")
322 # If we are to push if there is at least one
322 # If we are to push if there is at least one
323 # obsolete or unstable changeset in missing, at
323 # obsolete or unstable changeset in missing, at
324 # least one of the missinghead will be obsolete or
324 # least one of the missinghead will be obsolete or
325 # unstable. So checking heads only is ok
325 # unstable. So checking heads only is ok
326 for node in outgoing.missingheads:
326 for node in outgoing.missingheads:
327 ctx = unfi[node]
327 ctx = unfi[node]
328 if ctx.obsolete():
328 if ctx.obsolete():
329 raise util.Abort(mso % ctx)
329 raise util.Abort(mso % ctx)
330 elif ctx.troubled():
330 elif ctx.troubled():
331 raise util.Abort(_(mst)
331 raise util.Abort(_(mst)
332 % (ctx.troubles()[0],
332 % (ctx.troubles()[0],
333 ctx))
333 ctx))
334 newbm = pushop.ui.configlist('bookmarks', 'pushing')
334 newbm = pushop.ui.configlist('bookmarks', 'pushing')
335 discovery.checkheads(unfi, pushop.remote, outgoing,
335 discovery.checkheads(unfi, pushop.remote, outgoing,
336 pushop.remoteheads,
336 pushop.remoteheads,
337 pushop.newbranch,
337 pushop.newbranch,
338 bool(pushop.incoming),
338 bool(pushop.incoming),
339 newbm)
339 newbm)
340 return True
340 return True
341
341
342 # List of names of steps to perform for an outgoing bundle2, order matters.
342 # List of names of steps to perform for an outgoing bundle2, order matters.
343 b2partsgenorder = []
343 b2partsgenorder = []
344
344
345 # Mapping between step name and function
345 # Mapping between step name and function
346 #
346 #
347 # This exists to help extensions wrap steps if necessary
347 # This exists to help extensions wrap steps if necessary
348 b2partsgenmapping = {}
348 b2partsgenmapping = {}
349
349
350 def b2partsgenerator(stepname):
350 def b2partsgenerator(stepname):
351 """decorator for function generating bundle2 part
351 """decorator for function generating bundle2 part
352
352
353 The function is added to the step -> function mapping and appended to the
353 The function is added to the step -> function mapping and appended to the
354 list of steps. Beware that decorated functions will be added in order
354 list of steps. Beware that decorated functions will be added in order
355 (this may matter).
355 (this may matter).
356
356
357 You can only use this decorator for new steps, if you want to wrap a step
357 You can only use this decorator for new steps, if you want to wrap a step
358 from an extension, attack the b2partsgenmapping dictionary directly."""
358 from an extension, attack the b2partsgenmapping dictionary directly."""
359 def dec(func):
359 def dec(func):
360 assert stepname not in b2partsgenmapping
360 assert stepname not in b2partsgenmapping
361 b2partsgenmapping[stepname] = func
361 b2partsgenmapping[stepname] = func
362 b2partsgenorder.append(stepname)
362 b2partsgenorder.append(stepname)
363 return func
363 return func
364 return dec
364 return dec
365
365
366 @b2partsgenerator('changeset')
366 @b2partsgenerator('changeset')
367 def _pushb2ctx(pushop, bundler):
367 def _pushb2ctx(pushop, bundler):
368 """handle changegroup push through bundle2
368 """handle changegroup push through bundle2
369
369
370 addchangegroup result is stored in the ``pushop.ret`` attribute.
370 addchangegroup result is stored in the ``pushop.ret`` attribute.
371 """
371 """
372 if 'changesets' in pushop.stepsdone:
372 if 'changesets' in pushop.stepsdone:
373 return
373 return
374 pushop.stepsdone.add('changesets')
374 pushop.stepsdone.add('changesets')
375 # Send known heads to the server for race detection.
375 # Send known heads to the server for race detection.
376 pushop.stepsdone.add('changesets')
377 if not _pushcheckoutgoing(pushop):
376 if not _pushcheckoutgoing(pushop):
378 return
377 return
379 pushop.repo.prepushoutgoinghooks(pushop.repo,
378 pushop.repo.prepushoutgoinghooks(pushop.repo,
380 pushop.remote,
379 pushop.remote,
381 pushop.outgoing)
380 pushop.outgoing)
382 if not pushop.force:
381 if not pushop.force:
383 bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads))
382 bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads))
384 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
383 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
385 cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks())
384 cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks())
386 def handlereply(op):
385 def handlereply(op):
387 """extract addchangroup returns from server reply"""
386 """extract addchangroup returns from server reply"""
388 cgreplies = op.records.getreplies(cgpart.id)
387 cgreplies = op.records.getreplies(cgpart.id)
389 assert len(cgreplies['changegroup']) == 1
388 assert len(cgreplies['changegroup']) == 1
390 pushop.ret = cgreplies['changegroup'][0]['return']
389 pushop.ret = cgreplies['changegroup'][0]['return']
391 return handlereply
390 return handlereply
392
391
393 @b2partsgenerator('phase')
392 @b2partsgenerator('phase')
394 def _pushb2phases(pushop, bundler):
393 def _pushb2phases(pushop, bundler):
395 """handle phase push through bundle2"""
394 """handle phase push through bundle2"""
396 if 'phases' in pushop.stepsdone:
395 if 'phases' in pushop.stepsdone:
397 return
396 return
398 b2caps = bundle2.bundle2caps(pushop.remote)
397 b2caps = bundle2.bundle2caps(pushop.remote)
399 if not 'b2x:pushkey' in b2caps:
398 if not 'b2x:pushkey' in b2caps:
400 return
399 return
401 pushop.stepsdone.add('phases')
400 pushop.stepsdone.add('phases')
402 part2node = []
401 part2node = []
403 enc = pushkey.encode
402 enc = pushkey.encode
404 for newremotehead in pushop.outdatedphases:
403 for newremotehead in pushop.outdatedphases:
405 part = bundler.newpart('b2x:pushkey')
404 part = bundler.newpart('b2x:pushkey')
406 part.addparam('namespace', enc('phases'))
405 part.addparam('namespace', enc('phases'))
407 part.addparam('key', enc(newremotehead.hex()))
406 part.addparam('key', enc(newremotehead.hex()))
408 part.addparam('old', enc(str(phases.draft)))
407 part.addparam('old', enc(str(phases.draft)))
409 part.addparam('new', enc(str(phases.public)))
408 part.addparam('new', enc(str(phases.public)))
410 part2node.append((part.id, newremotehead))
409 part2node.append((part.id, newremotehead))
411 def handlereply(op):
410 def handlereply(op):
412 for partid, node in part2node:
411 for partid, node in part2node:
413 partrep = op.records.getreplies(partid)
412 partrep = op.records.getreplies(partid)
414 results = partrep['pushkey']
413 results = partrep['pushkey']
415 assert len(results) <= 1
414 assert len(results) <= 1
416 msg = None
415 msg = None
417 if not results:
416 if not results:
418 msg = _('server ignored update of %s to public!\n') % node
417 msg = _('server ignored update of %s to public!\n') % node
419 elif not int(results[0]['return']):
418 elif not int(results[0]['return']):
420 msg = _('updating %s to public failed!\n') % node
419 msg = _('updating %s to public failed!\n') % node
421 if msg is not None:
420 if msg is not None:
422 pushop.ui.warn(msg)
421 pushop.ui.warn(msg)
423 return handlereply
422 return handlereply
424
423
425 @b2partsgenerator('bookmarks')
424 @b2partsgenerator('bookmarks')
426 def _pushb2bookmarks(pushop, bundler):
425 def _pushb2bookmarks(pushop, bundler):
427 """handle phase push through bundle2"""
426 """handle phase push through bundle2"""
428 if 'bookmarks' in pushop.stepsdone:
427 if 'bookmarks' in pushop.stepsdone:
429 return
428 return
430 b2caps = bundle2.bundle2caps(pushop.remote)
429 b2caps = bundle2.bundle2caps(pushop.remote)
431 if 'b2x:pushkey' not in b2caps:
430 if 'b2x:pushkey' not in b2caps:
432 return
431 return
433 pushop.stepsdone.add('bookmarks')
432 pushop.stepsdone.add('bookmarks')
434 part2book = []
433 part2book = []
435 enc = pushkey.encode
434 enc = pushkey.encode
436 for book, old, new in pushop.outbookmarks:
435 for book, old, new in pushop.outbookmarks:
437 part = bundler.newpart('b2x:pushkey')
436 part = bundler.newpart('b2x:pushkey')
438 part.addparam('namespace', enc('bookmarks'))
437 part.addparam('namespace', enc('bookmarks'))
439 part.addparam('key', enc(book))
438 part.addparam('key', enc(book))
440 part.addparam('old', enc(old))
439 part.addparam('old', enc(old))
441 part.addparam('new', enc(new))
440 part.addparam('new', enc(new))
442 part2book.append((part.id, book))
441 part2book.append((part.id, book))
443 def handlereply(op):
442 def handlereply(op):
444 for partid, book in part2book:
443 for partid, book in part2book:
445 partrep = op.records.getreplies(partid)
444 partrep = op.records.getreplies(partid)
446 results = partrep['pushkey']
445 results = partrep['pushkey']
447 assert len(results) <= 1
446 assert len(results) <= 1
448 if not results:
447 if not results:
449 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
448 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
450 else:
449 else:
451 ret = int(results[0]['return'])
450 ret = int(results[0]['return'])
452 if ret:
451 if ret:
453 pushop.ui.status(_("updating bookmark %s\n") % book)
452 pushop.ui.status(_("updating bookmark %s\n") % book)
454 else:
453 else:
455 pushop.ui.warn(_('updating bookmark %s failed!\n') % book)
454 pushop.ui.warn(_('updating bookmark %s failed!\n') % book)
456 return handlereply
455 return handlereply
457
456
458
457
459 def _pushbundle2(pushop):
458 def _pushbundle2(pushop):
460 """push data to the remote using bundle2
459 """push data to the remote using bundle2
461
460
462 The only currently supported type of data is changegroup but this will
461 The only currently supported type of data is changegroup but this will
463 evolve in the future."""
462 evolve in the future."""
464 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
463 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
465 # create reply capability
464 # create reply capability
466 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
465 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
467 bundler.newpart('b2x:replycaps', data=capsblob)
466 bundler.newpart('b2x:replycaps', data=capsblob)
468 replyhandlers = []
467 replyhandlers = []
469 for partgenname in b2partsgenorder:
468 for partgenname in b2partsgenorder:
470 partgen = b2partsgenmapping[partgenname]
469 partgen = b2partsgenmapping[partgenname]
471 ret = partgen(pushop, bundler)
470 ret = partgen(pushop, bundler)
472 if callable(ret):
471 if callable(ret):
473 replyhandlers.append(ret)
472 replyhandlers.append(ret)
474 # do not push if nothing to push
473 # do not push if nothing to push
475 if bundler.nbparts <= 1:
474 if bundler.nbparts <= 1:
476 return
475 return
477 stream = util.chunkbuffer(bundler.getchunks())
476 stream = util.chunkbuffer(bundler.getchunks())
478 try:
477 try:
479 reply = pushop.remote.unbundle(stream, ['force'], 'push')
478 reply = pushop.remote.unbundle(stream, ['force'], 'push')
480 except error.BundleValueError, exc:
479 except error.BundleValueError, exc:
481 raise util.Abort('missing support for %s' % exc)
480 raise util.Abort('missing support for %s' % exc)
482 try:
481 try:
483 op = bundle2.processbundle(pushop.repo, reply)
482 op = bundle2.processbundle(pushop.repo, reply)
484 except error.BundleValueError, exc:
483 except error.BundleValueError, exc:
485 raise util.Abort('missing support for %s' % exc)
484 raise util.Abort('missing support for %s' % exc)
486 for rephand in replyhandlers:
485 for rephand in replyhandlers:
487 rephand(op)
486 rephand(op)
488
487
489 def _pushchangeset(pushop):
488 def _pushchangeset(pushop):
490 """Make the actual push of changeset bundle to remote repo"""
489 """Make the actual push of changeset bundle to remote repo"""
491 if 'changesets' in pushop.stepsdone:
490 if 'changesets' in pushop.stepsdone:
492 return
491 return
493 pushop.stepsdone.add('changesets')
492 pushop.stepsdone.add('changesets')
494 if not _pushcheckoutgoing(pushop):
493 if not _pushcheckoutgoing(pushop):
495 return
494 return
496 pushop.repo.prepushoutgoinghooks(pushop.repo,
495 pushop.repo.prepushoutgoinghooks(pushop.repo,
497 pushop.remote,
496 pushop.remote,
498 pushop.outgoing)
497 pushop.outgoing)
499 outgoing = pushop.outgoing
498 outgoing = pushop.outgoing
500 unbundle = pushop.remote.capable('unbundle')
499 unbundle = pushop.remote.capable('unbundle')
501 # TODO: get bundlecaps from remote
500 # TODO: get bundlecaps from remote
502 bundlecaps = None
501 bundlecaps = None
503 # create a changegroup from local
502 # create a changegroup from local
504 if pushop.revs is None and not (outgoing.excluded
503 if pushop.revs is None and not (outgoing.excluded
505 or pushop.repo.changelog.filteredrevs):
504 or pushop.repo.changelog.filteredrevs):
506 # push everything,
505 # push everything,
507 # use the fast path, no race possible on push
506 # use the fast path, no race possible on push
508 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
507 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
509 cg = changegroup.getsubset(pushop.repo,
508 cg = changegroup.getsubset(pushop.repo,
510 outgoing,
509 outgoing,
511 bundler,
510 bundler,
512 'push',
511 'push',
513 fastpath=True)
512 fastpath=True)
514 else:
513 else:
515 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
514 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
516 bundlecaps)
515 bundlecaps)
517
516
518 # apply changegroup to remote
517 # apply changegroup to remote
519 if unbundle:
518 if unbundle:
520 # local repo finds heads on server, finds out what
519 # local repo finds heads on server, finds out what
521 # revs it must push. once revs transferred, if server
520 # revs it must push. once revs transferred, if server
522 # finds it has different heads (someone else won
521 # finds it has different heads (someone else won
523 # commit/push race), server aborts.
522 # commit/push race), server aborts.
524 if pushop.force:
523 if pushop.force:
525 remoteheads = ['force']
524 remoteheads = ['force']
526 else:
525 else:
527 remoteheads = pushop.remoteheads
526 remoteheads = pushop.remoteheads
528 # ssh: return remote's addchangegroup()
527 # ssh: return remote's addchangegroup()
529 # http: return remote's addchangegroup() or 0 for error
528 # http: return remote's addchangegroup() or 0 for error
530 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
529 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
531 pushop.repo.url())
530 pushop.repo.url())
532 else:
531 else:
533 # we return an integer indicating remote head count
532 # we return an integer indicating remote head count
534 # change
533 # change
535 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
534 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
536
535
537 def _pushsyncphase(pushop):
536 def _pushsyncphase(pushop):
538 """synchronise phase information locally and remotely"""
537 """synchronise phase information locally and remotely"""
539 cheads = pushop.commonheads
538 cheads = pushop.commonheads
540 # even when we don't push, exchanging phase data is useful
539 # even when we don't push, exchanging phase data is useful
541 remotephases = pushop.remote.listkeys('phases')
540 remotephases = pushop.remote.listkeys('phases')
542 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
541 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
543 and remotephases # server supports phases
542 and remotephases # server supports phases
544 and pushop.ret is None # nothing was pushed
543 and pushop.ret is None # nothing was pushed
545 and remotephases.get('publishing', False)):
544 and remotephases.get('publishing', False)):
546 # When:
545 # When:
547 # - this is a subrepo push
546 # - this is a subrepo push
548 # - and remote support phase
547 # - and remote support phase
549 # - and no changeset was pushed
548 # - and no changeset was pushed
550 # - and remote is publishing
549 # - and remote is publishing
551 # We may be in issue 3871 case!
550 # We may be in issue 3871 case!
552 # We drop the possible phase synchronisation done by
551 # We drop the possible phase synchronisation done by
553 # courtesy to publish changesets possibly locally draft
552 # courtesy to publish changesets possibly locally draft
554 # on the remote.
553 # on the remote.
555 remotephases = {'publishing': 'True'}
554 remotephases = {'publishing': 'True'}
556 if not remotephases: # old server or public only reply from non-publishing
555 if not remotephases: # old server or public only reply from non-publishing
557 _localphasemove(pushop, cheads)
556 _localphasemove(pushop, cheads)
558 # don't push any phase data as there is nothing to push
557 # don't push any phase data as there is nothing to push
559 else:
558 else:
560 ana = phases.analyzeremotephases(pushop.repo, cheads,
559 ana = phases.analyzeremotephases(pushop.repo, cheads,
561 remotephases)
560 remotephases)
562 pheads, droots = ana
561 pheads, droots = ana
563 ### Apply remote phase on local
562 ### Apply remote phase on local
564 if remotephases.get('publishing', False):
563 if remotephases.get('publishing', False):
565 _localphasemove(pushop, cheads)
564 _localphasemove(pushop, cheads)
566 else: # publish = False
565 else: # publish = False
567 _localphasemove(pushop, pheads)
566 _localphasemove(pushop, pheads)
568 _localphasemove(pushop, cheads, phases.draft)
567 _localphasemove(pushop, cheads, phases.draft)
569 ### Apply local phase on remote
568 ### Apply local phase on remote
570
569
571 if pushop.ret:
570 if pushop.ret:
572 if 'phases' in pushop.stepsdone:
571 if 'phases' in pushop.stepsdone:
573 # phases already pushed though bundle2
572 # phases already pushed though bundle2
574 return
573 return
575 outdated = pushop.outdatedphases
574 outdated = pushop.outdatedphases
576 else:
575 else:
577 outdated = pushop.fallbackoutdatedphases
576 outdated = pushop.fallbackoutdatedphases
578
577
579 pushop.stepsdone.add('phases')
578 pushop.stepsdone.add('phases')
580
579
581 # filter heads already turned public by the push
580 # filter heads already turned public by the push
582 outdated = [c for c in outdated if c.node() not in pheads]
581 outdated = [c for c in outdated if c.node() not in pheads]
583 b2caps = bundle2.bundle2caps(pushop.remote)
582 b2caps = bundle2.bundle2caps(pushop.remote)
584 if 'b2x:pushkey' in b2caps:
583 if 'b2x:pushkey' in b2caps:
585 # server supports bundle2, let's do a batched push through it
584 # server supports bundle2, let's do a batched push through it
586 #
585 #
587 # This will eventually be unified with the changesets bundle2 push
586 # This will eventually be unified with the changesets bundle2 push
588 bundler = bundle2.bundle20(pushop.ui, b2caps)
587 bundler = bundle2.bundle20(pushop.ui, b2caps)
589 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
588 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
590 bundler.newpart('b2x:replycaps', data=capsblob)
589 bundler.newpart('b2x:replycaps', data=capsblob)
591 part2node = []
590 part2node = []
592 enc = pushkey.encode
591 enc = pushkey.encode
593 for newremotehead in outdated:
592 for newremotehead in outdated:
594 part = bundler.newpart('b2x:pushkey')
593 part = bundler.newpart('b2x:pushkey')
595 part.addparam('namespace', enc('phases'))
594 part.addparam('namespace', enc('phases'))
596 part.addparam('key', enc(newremotehead.hex()))
595 part.addparam('key', enc(newremotehead.hex()))
597 part.addparam('old', enc(str(phases.draft)))
596 part.addparam('old', enc(str(phases.draft)))
598 part.addparam('new', enc(str(phases.public)))
597 part.addparam('new', enc(str(phases.public)))
599 part2node.append((part.id, newremotehead))
598 part2node.append((part.id, newremotehead))
600 stream = util.chunkbuffer(bundler.getchunks())
599 stream = util.chunkbuffer(bundler.getchunks())
601 try:
600 try:
602 reply = pushop.remote.unbundle(stream, ['force'], 'push')
601 reply = pushop.remote.unbundle(stream, ['force'], 'push')
603 op = bundle2.processbundle(pushop.repo, reply)
602 op = bundle2.processbundle(pushop.repo, reply)
604 except error.BundleValueError, exc:
603 except error.BundleValueError, exc:
605 raise util.Abort('missing support for %s' % exc)
604 raise util.Abort('missing support for %s' % exc)
606 for partid, node in part2node:
605 for partid, node in part2node:
607 partrep = op.records.getreplies(partid)
606 partrep = op.records.getreplies(partid)
608 results = partrep['pushkey']
607 results = partrep['pushkey']
609 assert len(results) <= 1
608 assert len(results) <= 1
610 msg = None
609 msg = None
611 if not results:
610 if not results:
612 msg = _('server ignored update of %s to public!\n') % node
611 msg = _('server ignored update of %s to public!\n') % node
613 elif not int(results[0]['return']):
612 elif not int(results[0]['return']):
614 msg = _('updating %s to public failed!\n') % node
613 msg = _('updating %s to public failed!\n') % node
615 if msg is not None:
614 if msg is not None:
616 pushop.ui.warn(msg)
615 pushop.ui.warn(msg)
617
616
618 else:
617 else:
619 # fallback to independant pushkey command
618 # fallback to independant pushkey command
620 for newremotehead in outdated:
619 for newremotehead in outdated:
621 r = pushop.remote.pushkey('phases',
620 r = pushop.remote.pushkey('phases',
622 newremotehead.hex(),
621 newremotehead.hex(),
623 str(phases.draft),
622 str(phases.draft),
624 str(phases.public))
623 str(phases.public))
625 if not r:
624 if not r:
626 pushop.ui.warn(_('updating %s to public failed!\n')
625 pushop.ui.warn(_('updating %s to public failed!\n')
627 % newremotehead)
626 % newremotehead)
628
627
629 def _localphasemove(pushop, nodes, phase=phases.public):
628 def _localphasemove(pushop, nodes, phase=phases.public):
630 """move <nodes> to <phase> in the local source repo"""
629 """move <nodes> to <phase> in the local source repo"""
631 if pushop.locallocked:
630 if pushop.locallocked:
632 tr = pushop.repo.transaction('push-phase-sync')
631 tr = pushop.repo.transaction('push-phase-sync')
633 try:
632 try:
634 phases.advanceboundary(pushop.repo, tr, phase, nodes)
633 phases.advanceboundary(pushop.repo, tr, phase, nodes)
635 tr.close()
634 tr.close()
636 finally:
635 finally:
637 tr.release()
636 tr.release()
638 else:
637 else:
639 # repo is not locked, do not change any phases!
638 # repo is not locked, do not change any phases!
640 # Informs the user that phases should have been moved when
639 # Informs the user that phases should have been moved when
641 # applicable.
640 # applicable.
642 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
641 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
643 phasestr = phases.phasenames[phase]
642 phasestr = phases.phasenames[phase]
644 if actualmoves:
643 if actualmoves:
645 pushop.ui.status(_('cannot lock source repo, skipping '
644 pushop.ui.status(_('cannot lock source repo, skipping '
646 'local %s phase update\n') % phasestr)
645 'local %s phase update\n') % phasestr)
647
646
648 def _pushobsolete(pushop):
647 def _pushobsolete(pushop):
649 """utility function to push obsolete markers to a remote"""
648 """utility function to push obsolete markers to a remote"""
650 if 'obsmarkers' in pushop.stepsdone:
649 if 'obsmarkers' in pushop.stepsdone:
651 return
650 return
652 pushop.ui.debug('try to push obsolete markers to remote\n')
651 pushop.ui.debug('try to push obsolete markers to remote\n')
653 repo = pushop.repo
652 repo = pushop.repo
654 remote = pushop.remote
653 remote = pushop.remote
655 pushop.stepsdone.add('obsmarkers')
654 pushop.stepsdone.add('obsmarkers')
656 if (obsolete._enabled and repo.obsstore and
655 if (obsolete._enabled and repo.obsstore and
657 'obsolete' in remote.listkeys('namespaces')):
656 'obsolete' in remote.listkeys('namespaces')):
658 rslts = []
657 rslts = []
659 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
658 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
660 for key in sorted(remotedata, reverse=True):
659 for key in sorted(remotedata, reverse=True):
661 # reverse sort to ensure we end with dump0
660 # reverse sort to ensure we end with dump0
662 data = remotedata[key]
661 data = remotedata[key]
663 rslts.append(remote.pushkey('obsolete', key, '', data))
662 rslts.append(remote.pushkey('obsolete', key, '', data))
664 if [r for r in rslts if not r]:
663 if [r for r in rslts if not r]:
665 msg = _('failed to push some obsolete markers!\n')
664 msg = _('failed to push some obsolete markers!\n')
666 repo.ui.warn(msg)
665 repo.ui.warn(msg)
667
666
668 def _pushbookmark(pushop):
667 def _pushbookmark(pushop):
669 """Update bookmark position on remote"""
668 """Update bookmark position on remote"""
670 if pushop.ret == 0 or 'bookmarks' in pushop.stepsdone:
669 if pushop.ret == 0 or 'bookmarks' in pushop.stepsdone:
671 return
670 return
672 pushop.stepsdone.add('bookmarks')
671 pushop.stepsdone.add('bookmarks')
673 ui = pushop.ui
672 ui = pushop.ui
674 remote = pushop.remote
673 remote = pushop.remote
675 for b, old, new in pushop.outbookmarks:
674 for b, old, new in pushop.outbookmarks:
676 if remote.pushkey('bookmarks', b, old, new):
675 if remote.pushkey('bookmarks', b, old, new):
677 ui.status(_("updating bookmark %s\n") % b)
676 ui.status(_("updating bookmark %s\n") % b)
678 else:
677 else:
679 ui.warn(_('updating bookmark %s failed!\n') % b)
678 ui.warn(_('updating bookmark %s failed!\n') % b)
680
679
681 class pulloperation(object):
680 class pulloperation(object):
682 """A object that represent a single pull operation
681 """A object that represent a single pull operation
683
682
684 It purpose is to carry push related state and very common operation.
683 It purpose is to carry push related state and very common operation.
685
684
686 A new should be created at the beginning of each pull and discarded
685 A new should be created at the beginning of each pull and discarded
687 afterward.
686 afterward.
688 """
687 """
689
688
690 def __init__(self, repo, remote, heads=None, force=False):
689 def __init__(self, repo, remote, heads=None, force=False):
691 # repo we pull into
690 # repo we pull into
692 self.repo = repo
691 self.repo = repo
693 # repo we pull from
692 # repo we pull from
694 self.remote = remote
693 self.remote = remote
695 # revision we try to pull (None is "all")
694 # revision we try to pull (None is "all")
696 self.heads = heads
695 self.heads = heads
697 # do we force pull?
696 # do we force pull?
698 self.force = force
697 self.force = force
699 # the name the pull transaction
698 # the name the pull transaction
700 self._trname = 'pull\n' + util.hidepassword(remote.url())
699 self._trname = 'pull\n' + util.hidepassword(remote.url())
701 # hold the transaction once created
700 # hold the transaction once created
702 self._tr = None
701 self._tr = None
703 # set of common changeset between local and remote before pull
702 # set of common changeset between local and remote before pull
704 self.common = None
703 self.common = None
705 # set of pulled head
704 # set of pulled head
706 self.rheads = None
705 self.rheads = None
707 # list of missing changeset to fetch remotely
706 # list of missing changeset to fetch remotely
708 self.fetch = None
707 self.fetch = None
709 # result of changegroup pulling (used as return code by pull)
708 # result of changegroup pulling (used as return code by pull)
710 self.cgresult = None
709 self.cgresult = None
711 # list of step remaining todo (related to future bundle2 usage)
710 # list of step remaining todo (related to future bundle2 usage)
712 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
711 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
713
712
714 @util.propertycache
713 @util.propertycache
715 def pulledsubset(self):
714 def pulledsubset(self):
716 """heads of the set of changeset target by the pull"""
715 """heads of the set of changeset target by the pull"""
717 # compute target subset
716 # compute target subset
718 if self.heads is None:
717 if self.heads is None:
719 # We pulled every thing possible
718 # We pulled every thing possible
720 # sync on everything common
719 # sync on everything common
721 c = set(self.common)
720 c = set(self.common)
722 ret = list(self.common)
721 ret = list(self.common)
723 for n in self.rheads:
722 for n in self.rheads:
724 if n not in c:
723 if n not in c:
725 ret.append(n)
724 ret.append(n)
726 return ret
725 return ret
727 else:
726 else:
728 # We pulled a specific subset
727 # We pulled a specific subset
729 # sync on this subset
728 # sync on this subset
730 return self.heads
729 return self.heads
731
730
732 def gettransaction(self):
731 def gettransaction(self):
733 """get appropriate pull transaction, creating it if needed"""
732 """get appropriate pull transaction, creating it if needed"""
734 if self._tr is None:
733 if self._tr is None:
735 self._tr = self.repo.transaction(self._trname)
734 self._tr = self.repo.transaction(self._trname)
736 return self._tr
735 return self._tr
737
736
738 def closetransaction(self):
737 def closetransaction(self):
739 """close transaction if created"""
738 """close transaction if created"""
740 if self._tr is not None:
739 if self._tr is not None:
741 self._tr.close()
740 self._tr.close()
742
741
743 def releasetransaction(self):
742 def releasetransaction(self):
744 """release transaction if created"""
743 """release transaction if created"""
745 if self._tr is not None:
744 if self._tr is not None:
746 self._tr.release()
745 self._tr.release()
747
746
748 def pull(repo, remote, heads=None, force=False):
747 def pull(repo, remote, heads=None, force=False):
749 pullop = pulloperation(repo, remote, heads, force)
748 pullop = pulloperation(repo, remote, heads, force)
750 if pullop.remote.local():
749 if pullop.remote.local():
751 missing = set(pullop.remote.requirements) - pullop.repo.supported
750 missing = set(pullop.remote.requirements) - pullop.repo.supported
752 if missing:
751 if missing:
753 msg = _("required features are not"
752 msg = _("required features are not"
754 " supported in the destination:"
753 " supported in the destination:"
755 " %s") % (', '.join(sorted(missing)))
754 " %s") % (', '.join(sorted(missing)))
756 raise util.Abort(msg)
755 raise util.Abort(msg)
757
756
758 lock = pullop.repo.lock()
757 lock = pullop.repo.lock()
759 try:
758 try:
760 _pulldiscovery(pullop)
759 _pulldiscovery(pullop)
761 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
760 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
762 and pullop.remote.capable('bundle2-exp')):
761 and pullop.remote.capable('bundle2-exp')):
763 _pullbundle2(pullop)
762 _pullbundle2(pullop)
764 if 'changegroup' in pullop.todosteps:
763 if 'changegroup' in pullop.todosteps:
765 _pullchangeset(pullop)
764 _pullchangeset(pullop)
766 if 'phases' in pullop.todosteps:
765 if 'phases' in pullop.todosteps:
767 _pullphase(pullop)
766 _pullphase(pullop)
768 if 'obsmarkers' in pullop.todosteps:
767 if 'obsmarkers' in pullop.todosteps:
769 _pullobsolete(pullop)
768 _pullobsolete(pullop)
770 pullop.closetransaction()
769 pullop.closetransaction()
771 finally:
770 finally:
772 pullop.releasetransaction()
771 pullop.releasetransaction()
773 lock.release()
772 lock.release()
774
773
775 return pullop.cgresult
774 return pullop.cgresult
776
775
777 def _pulldiscovery(pullop):
776 def _pulldiscovery(pullop):
778 """discovery phase for the pull
777 """discovery phase for the pull
779
778
780 Current handle changeset discovery only, will change handle all discovery
779 Current handle changeset discovery only, will change handle all discovery
781 at some point."""
780 at some point."""
782 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
781 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
783 pullop.remote,
782 pullop.remote,
784 heads=pullop.heads,
783 heads=pullop.heads,
785 force=pullop.force)
784 force=pullop.force)
786 pullop.common, pullop.fetch, pullop.rheads = tmp
785 pullop.common, pullop.fetch, pullop.rheads = tmp
787
786
788 def _pullbundle2(pullop):
787 def _pullbundle2(pullop):
789 """pull data using bundle2
788 """pull data using bundle2
790
789
791 For now, the only supported data are changegroup."""
790 For now, the only supported data are changegroup."""
792 remotecaps = bundle2.bundle2caps(pullop.remote)
791 remotecaps = bundle2.bundle2caps(pullop.remote)
793 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
792 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
794 # pulling changegroup
793 # pulling changegroup
795 pullop.todosteps.remove('changegroup')
794 pullop.todosteps.remove('changegroup')
796
795
797 kwargs['common'] = pullop.common
796 kwargs['common'] = pullop.common
798 kwargs['heads'] = pullop.heads or pullop.rheads
797 kwargs['heads'] = pullop.heads or pullop.rheads
799 if 'b2x:listkeys' in remotecaps:
798 if 'b2x:listkeys' in remotecaps:
800 kwargs['listkeys'] = ['phase']
799 kwargs['listkeys'] = ['phase']
801 if not pullop.fetch:
800 if not pullop.fetch:
802 pullop.repo.ui.status(_("no changes found\n"))
801 pullop.repo.ui.status(_("no changes found\n"))
803 pullop.cgresult = 0
802 pullop.cgresult = 0
804 else:
803 else:
805 if pullop.heads is None and list(pullop.common) == [nullid]:
804 if pullop.heads is None and list(pullop.common) == [nullid]:
806 pullop.repo.ui.status(_("requesting all changes\n"))
805 pullop.repo.ui.status(_("requesting all changes\n"))
807 _pullbundle2extraprepare(pullop, kwargs)
806 _pullbundle2extraprepare(pullop, kwargs)
808 if kwargs.keys() == ['format']:
807 if kwargs.keys() == ['format']:
809 return # nothing to pull
808 return # nothing to pull
810 bundle = pullop.remote.getbundle('pull', **kwargs)
809 bundle = pullop.remote.getbundle('pull', **kwargs)
811 try:
810 try:
812 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
811 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
813 except error.BundleValueError, exc:
812 except error.BundleValueError, exc:
814 raise util.Abort('missing support for %s' % exc)
813 raise util.Abort('missing support for %s' % exc)
815
814
816 if pullop.fetch:
815 if pullop.fetch:
817 assert len(op.records['changegroup']) == 1
816 assert len(op.records['changegroup']) == 1
818 pullop.cgresult = op.records['changegroup'][0]['return']
817 pullop.cgresult = op.records['changegroup'][0]['return']
819
818
820 # processing phases change
819 # processing phases change
821 for namespace, value in op.records['listkeys']:
820 for namespace, value in op.records['listkeys']:
822 if namespace == 'phases':
821 if namespace == 'phases':
823 _pullapplyphases(pullop, value)
822 _pullapplyphases(pullop, value)
824
823
825 def _pullbundle2extraprepare(pullop, kwargs):
824 def _pullbundle2extraprepare(pullop, kwargs):
826 """hook function so that extensions can extend the getbundle call"""
825 """hook function so that extensions can extend the getbundle call"""
827 pass
826 pass
828
827
829 def _pullchangeset(pullop):
828 def _pullchangeset(pullop):
830 """pull changeset from unbundle into the local repo"""
829 """pull changeset from unbundle into the local repo"""
831 # We delay the open of the transaction as late as possible so we
830 # We delay the open of the transaction as late as possible so we
832 # don't open transaction for nothing or you break future useful
831 # don't open transaction for nothing or you break future useful
833 # rollback call
832 # rollback call
834 pullop.todosteps.remove('changegroup')
833 pullop.todosteps.remove('changegroup')
835 if not pullop.fetch:
834 if not pullop.fetch:
836 pullop.repo.ui.status(_("no changes found\n"))
835 pullop.repo.ui.status(_("no changes found\n"))
837 pullop.cgresult = 0
836 pullop.cgresult = 0
838 return
837 return
839 pullop.gettransaction()
838 pullop.gettransaction()
840 if pullop.heads is None and list(pullop.common) == [nullid]:
839 if pullop.heads is None and list(pullop.common) == [nullid]:
841 pullop.repo.ui.status(_("requesting all changes\n"))
840 pullop.repo.ui.status(_("requesting all changes\n"))
842 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
841 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
843 # issue1320, avoid a race if remote changed after discovery
842 # issue1320, avoid a race if remote changed after discovery
844 pullop.heads = pullop.rheads
843 pullop.heads = pullop.rheads
845
844
846 if pullop.remote.capable('getbundle'):
845 if pullop.remote.capable('getbundle'):
847 # TODO: get bundlecaps from remote
846 # TODO: get bundlecaps from remote
848 cg = pullop.remote.getbundle('pull', common=pullop.common,
847 cg = pullop.remote.getbundle('pull', common=pullop.common,
849 heads=pullop.heads or pullop.rheads)
848 heads=pullop.heads or pullop.rheads)
850 elif pullop.heads is None:
849 elif pullop.heads is None:
851 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
850 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
852 elif not pullop.remote.capable('changegroupsubset'):
851 elif not pullop.remote.capable('changegroupsubset'):
853 raise util.Abort(_("partial pull cannot be done because "
852 raise util.Abort(_("partial pull cannot be done because "
854 "other repository doesn't support "
853 "other repository doesn't support "
855 "changegroupsubset."))
854 "changegroupsubset."))
856 else:
855 else:
857 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
856 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
858 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
857 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
859 pullop.remote.url())
858 pullop.remote.url())
860
859
861 def _pullphase(pullop):
860 def _pullphase(pullop):
862 # Get remote phases data from remote
861 # Get remote phases data from remote
863 remotephases = pullop.remote.listkeys('phases')
862 remotephases = pullop.remote.listkeys('phases')
864 _pullapplyphases(pullop, remotephases)
863 _pullapplyphases(pullop, remotephases)
865
864
866 def _pullapplyphases(pullop, remotephases):
865 def _pullapplyphases(pullop, remotephases):
867 """apply phase movement from observed remote state"""
866 """apply phase movement from observed remote state"""
868 pullop.todosteps.remove('phases')
867 pullop.todosteps.remove('phases')
869 publishing = bool(remotephases.get('publishing', False))
868 publishing = bool(remotephases.get('publishing', False))
870 if remotephases and not publishing:
869 if remotephases and not publishing:
871 # remote is new and unpublishing
870 # remote is new and unpublishing
872 pheads, _dr = phases.analyzeremotephases(pullop.repo,
871 pheads, _dr = phases.analyzeremotephases(pullop.repo,
873 pullop.pulledsubset,
872 pullop.pulledsubset,
874 remotephases)
873 remotephases)
875 dheads = pullop.pulledsubset
874 dheads = pullop.pulledsubset
876 else:
875 else:
877 # Remote is old or publishing all common changesets
876 # Remote is old or publishing all common changesets
878 # should be seen as public
877 # should be seen as public
879 pheads = pullop.pulledsubset
878 pheads = pullop.pulledsubset
880 dheads = []
879 dheads = []
881 unfi = pullop.repo.unfiltered()
880 unfi = pullop.repo.unfiltered()
882 phase = unfi._phasecache.phase
881 phase = unfi._phasecache.phase
883 rev = unfi.changelog.nodemap.get
882 rev = unfi.changelog.nodemap.get
884 public = phases.public
883 public = phases.public
885 draft = phases.draft
884 draft = phases.draft
886
885
887 # exclude changesets already public locally and update the others
886 # exclude changesets already public locally and update the others
888 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
887 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
889 if pheads:
888 if pheads:
890 tr = pullop.gettransaction()
889 tr = pullop.gettransaction()
891 phases.advanceboundary(pullop.repo, tr, public, pheads)
890 phases.advanceboundary(pullop.repo, tr, public, pheads)
892
891
893 # exclude changesets already draft locally and update the others
892 # exclude changesets already draft locally and update the others
894 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
893 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
895 if dheads:
894 if dheads:
896 tr = pullop.gettransaction()
895 tr = pullop.gettransaction()
897 phases.advanceboundary(pullop.repo, tr, draft, dheads)
896 phases.advanceboundary(pullop.repo, tr, draft, dheads)
898
897
899 def _pullobsolete(pullop):
898 def _pullobsolete(pullop):
900 """utility function to pull obsolete markers from a remote
899 """utility function to pull obsolete markers from a remote
901
900
902 The `gettransaction` is function that return the pull transaction, creating
901 The `gettransaction` is function that return the pull transaction, creating
903 one if necessary. We return the transaction to inform the calling code that
902 one if necessary. We return the transaction to inform the calling code that
904 a new transaction have been created (when applicable).
903 a new transaction have been created (when applicable).
905
904
906 Exists mostly to allow overriding for experimentation purpose"""
905 Exists mostly to allow overriding for experimentation purpose"""
907 pullop.todosteps.remove('obsmarkers')
906 pullop.todosteps.remove('obsmarkers')
908 tr = None
907 tr = None
909 if obsolete._enabled:
908 if obsolete._enabled:
910 pullop.repo.ui.debug('fetching remote obsolete markers\n')
909 pullop.repo.ui.debug('fetching remote obsolete markers\n')
911 remoteobs = pullop.remote.listkeys('obsolete')
910 remoteobs = pullop.remote.listkeys('obsolete')
912 if 'dump0' in remoteobs:
911 if 'dump0' in remoteobs:
913 tr = pullop.gettransaction()
912 tr = pullop.gettransaction()
914 for key in sorted(remoteobs, reverse=True):
913 for key in sorted(remoteobs, reverse=True):
915 if key.startswith('dump'):
914 if key.startswith('dump'):
916 data = base85.b85decode(remoteobs[key])
915 data = base85.b85decode(remoteobs[key])
917 pullop.repo.obsstore.mergemarkers(tr, data)
916 pullop.repo.obsstore.mergemarkers(tr, data)
918 pullop.repo.invalidatevolatilesets()
917 pullop.repo.invalidatevolatilesets()
919 return tr
918 return tr
920
919
921 def caps20to10(repo):
920 def caps20to10(repo):
922 """return a set with appropriate options to use bundle20 during getbundle"""
921 """return a set with appropriate options to use bundle20 during getbundle"""
923 caps = set(['HG2X'])
922 caps = set(['HG2X'])
924 capsblob = bundle2.encodecaps(repo.bundle2caps)
923 capsblob = bundle2.encodecaps(repo.bundle2caps)
925 caps.add('bundle2=' + urllib.quote(capsblob))
924 caps.add('bundle2=' + urllib.quote(capsblob))
926 return caps
925 return caps
927
926
928 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
927 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
929 **kwargs):
928 **kwargs):
930 """return a full bundle (with potentially multiple kind of parts)
929 """return a full bundle (with potentially multiple kind of parts)
931
930
932 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
931 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
933 passed. For now, the bundle can contain only changegroup, but this will
932 passed. For now, the bundle can contain only changegroup, but this will
934 changes when more part type will be available for bundle2.
933 changes when more part type will be available for bundle2.
935
934
936 This is different from changegroup.getbundle that only returns an HG10
935 This is different from changegroup.getbundle that only returns an HG10
937 changegroup bundle. They may eventually get reunited in the future when we
936 changegroup bundle. They may eventually get reunited in the future when we
938 have a clearer idea of the API we what to query different data.
937 have a clearer idea of the API we what to query different data.
939
938
940 The implementation is at a very early stage and will get massive rework
939 The implementation is at a very early stage and will get massive rework
941 when the API of bundle is refined.
940 when the API of bundle is refined.
942 """
941 """
943 cg = None
942 cg = None
944 if kwargs.get('cg', True):
943 if kwargs.get('cg', True):
945 # build changegroup bundle here.
944 # build changegroup bundle here.
946 cg = changegroup.getbundle(repo, source, heads=heads,
945 cg = changegroup.getbundle(repo, source, heads=heads,
947 common=common, bundlecaps=bundlecaps)
946 common=common, bundlecaps=bundlecaps)
948 elif 'HG2X' not in bundlecaps:
947 elif 'HG2X' not in bundlecaps:
949 raise ValueError(_('request for bundle10 must include changegroup'))
948 raise ValueError(_('request for bundle10 must include changegroup'))
950 if bundlecaps is None or 'HG2X' not in bundlecaps:
949 if bundlecaps is None or 'HG2X' not in bundlecaps:
951 if kwargs:
950 if kwargs:
952 raise ValueError(_('unsupported getbundle arguments: %s')
951 raise ValueError(_('unsupported getbundle arguments: %s')
953 % ', '.join(sorted(kwargs.keys())))
952 % ', '.join(sorted(kwargs.keys())))
954 return cg
953 return cg
955 # very crude first implementation,
954 # very crude first implementation,
956 # the bundle API will change and the generation will be done lazily.
955 # the bundle API will change and the generation will be done lazily.
957 b2caps = {}
956 b2caps = {}
958 for bcaps in bundlecaps:
957 for bcaps in bundlecaps:
959 if bcaps.startswith('bundle2='):
958 if bcaps.startswith('bundle2='):
960 blob = urllib.unquote(bcaps[len('bundle2='):])
959 blob = urllib.unquote(bcaps[len('bundle2='):])
961 b2caps.update(bundle2.decodecaps(blob))
960 b2caps.update(bundle2.decodecaps(blob))
962 bundler = bundle2.bundle20(repo.ui, b2caps)
961 bundler = bundle2.bundle20(repo.ui, b2caps)
963 if cg:
962 if cg:
964 bundler.newpart('b2x:changegroup', data=cg.getchunks())
963 bundler.newpart('b2x:changegroup', data=cg.getchunks())
965 listkeys = kwargs.get('listkeys', ())
964 listkeys = kwargs.get('listkeys', ())
966 for namespace in listkeys:
965 for namespace in listkeys:
967 part = bundler.newpart('b2x:listkeys')
966 part = bundler.newpart('b2x:listkeys')
968 part.addparam('namespace', namespace)
967 part.addparam('namespace', namespace)
969 keys = repo.listkeys(namespace).items()
968 keys = repo.listkeys(namespace).items()
970 part.data = pushkey.encodekeys(keys)
969 part.data = pushkey.encodekeys(keys)
971 _getbundleextrapart(bundler, repo, source, heads=heads, common=common,
970 _getbundleextrapart(bundler, repo, source, heads=heads, common=common,
972 bundlecaps=bundlecaps, **kwargs)
971 bundlecaps=bundlecaps, **kwargs)
973 return util.chunkbuffer(bundler.getchunks())
972 return util.chunkbuffer(bundler.getchunks())
974
973
975 def _getbundleextrapart(bundler, repo, source, heads=None, common=None,
974 def _getbundleextrapart(bundler, repo, source, heads=None, common=None,
976 bundlecaps=None, **kwargs):
975 bundlecaps=None, **kwargs):
977 """hook function to let extensions add parts to the requested bundle"""
976 """hook function to let extensions add parts to the requested bundle"""
978 pass
977 pass
979
978
980 def check_heads(repo, their_heads, context):
979 def check_heads(repo, their_heads, context):
981 """check if the heads of a repo have been modified
980 """check if the heads of a repo have been modified
982
981
983 Used by peer for unbundling.
982 Used by peer for unbundling.
984 """
983 """
985 heads = repo.heads()
984 heads = repo.heads()
986 heads_hash = util.sha1(''.join(sorted(heads))).digest()
985 heads_hash = util.sha1(''.join(sorted(heads))).digest()
987 if not (their_heads == ['force'] or their_heads == heads or
986 if not (their_heads == ['force'] or their_heads == heads or
988 their_heads == ['hashed', heads_hash]):
987 their_heads == ['hashed', heads_hash]):
989 # someone else committed/pushed/unbundled while we
988 # someone else committed/pushed/unbundled while we
990 # were transferring data
989 # were transferring data
991 raise error.PushRaced('repository changed while %s - '
990 raise error.PushRaced('repository changed while %s - '
992 'please try again' % context)
991 'please try again' % context)
993
992
994 def unbundle(repo, cg, heads, source, url):
993 def unbundle(repo, cg, heads, source, url):
995 """Apply a bundle to a repo.
994 """Apply a bundle to a repo.
996
995
997 this function makes sure the repo is locked during the application and have
996 this function makes sure the repo is locked during the application and have
998 mechanism to check that no push race occurred between the creation of the
997 mechanism to check that no push race occurred between the creation of the
999 bundle and its application.
998 bundle and its application.
1000
999
1001 If the push was raced as PushRaced exception is raised."""
1000 If the push was raced as PushRaced exception is raised."""
1002 r = 0
1001 r = 0
1003 # need a transaction when processing a bundle2 stream
1002 # need a transaction when processing a bundle2 stream
1004 tr = None
1003 tr = None
1005 lock = repo.lock()
1004 lock = repo.lock()
1006 try:
1005 try:
1007 check_heads(repo, heads, 'uploading changes')
1006 check_heads(repo, heads, 'uploading changes')
1008 # push can proceed
1007 # push can proceed
1009 if util.safehasattr(cg, 'params'):
1008 if util.safehasattr(cg, 'params'):
1010 try:
1009 try:
1011 tr = repo.transaction('unbundle')
1010 tr = repo.transaction('unbundle')
1012 tr.hookargs['bundle2-exp'] = '1'
1011 tr.hookargs['bundle2-exp'] = '1'
1013 r = bundle2.processbundle(repo, cg, lambda: tr).reply
1012 r = bundle2.processbundle(repo, cg, lambda: tr).reply
1014 cl = repo.unfiltered().changelog
1013 cl = repo.unfiltered().changelog
1015 p = cl.writepending() and repo.root or ""
1014 p = cl.writepending() and repo.root or ""
1016 repo.hook('b2x-pretransactionclose', throw=True, source=source,
1015 repo.hook('b2x-pretransactionclose', throw=True, source=source,
1017 url=url, pending=p, **tr.hookargs)
1016 url=url, pending=p, **tr.hookargs)
1018 tr.close()
1017 tr.close()
1019 repo.hook('b2x-transactionclose', source=source, url=url,
1018 repo.hook('b2x-transactionclose', source=source, url=url,
1020 **tr.hookargs)
1019 **tr.hookargs)
1021 except Exception, exc:
1020 except Exception, exc:
1022 exc.duringunbundle2 = True
1021 exc.duringunbundle2 = True
1023 raise
1022 raise
1024 else:
1023 else:
1025 r = changegroup.addchangegroup(repo, cg, source, url)
1024 r = changegroup.addchangegroup(repo, cg, source, url)
1026 finally:
1025 finally:
1027 if tr is not None:
1026 if tr is not None:
1028 tr.release()
1027 tr.release()
1029 lock.release()
1028 lock.release()
1030 return r
1029 return r
General Comments 0
You need to be logged in to leave comments. Login now