##// END OF EJS Templates
push: add bookmarks to the unified bundle2 push...
Pierre-Yves David -
r22242:ed222ebd default
parent child Browse files
Show More
@@ -1,996 +1,1030 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno, urllib
10 import errno, urllib
11 import util, scmutil, changegroup, base85, error
11 import util, scmutil, changegroup, base85, error
12 import discovery, phases, obsolete, bookmarks, bundle2, pushkey
12 import discovery, phases, obsolete, bookmarks, bundle2, pushkey
13
13
14 def readbundle(ui, fh, fname, vfs=None):
14 def readbundle(ui, fh, fname, vfs=None):
15 header = changegroup.readexactly(fh, 4)
15 header = changegroup.readexactly(fh, 4)
16
16
17 alg = None
17 alg = None
18 if not fname:
18 if not fname:
19 fname = "stream"
19 fname = "stream"
20 if not header.startswith('HG') and header.startswith('\0'):
20 if not header.startswith('HG') and header.startswith('\0'):
21 fh = changegroup.headerlessfixup(fh, header)
21 fh = changegroup.headerlessfixup(fh, header)
22 header = "HG10"
22 header = "HG10"
23 alg = 'UN'
23 alg = 'UN'
24 elif vfs:
24 elif vfs:
25 fname = vfs.join(fname)
25 fname = vfs.join(fname)
26
26
27 magic, version = header[0:2], header[2:4]
27 magic, version = header[0:2], header[2:4]
28
28
29 if magic != 'HG':
29 if magic != 'HG':
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 if version == '10':
31 if version == '10':
32 if alg is None:
32 if alg is None:
33 alg = changegroup.readexactly(fh, 2)
33 alg = changegroup.readexactly(fh, 2)
34 return changegroup.unbundle10(fh, alg)
34 return changegroup.unbundle10(fh, alg)
35 elif version == '2X':
35 elif version == '2X':
36 return bundle2.unbundle20(ui, fh, header=magic + version)
36 return bundle2.unbundle20(ui, fh, header=magic + version)
37 else:
37 else:
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39
39
40
40
41 class pushoperation(object):
41 class pushoperation(object):
42 """A object that represent a single push operation
42 """A object that represent a single push operation
43
43
44 It purpose is to carry push related state and very common operation.
44 It purpose is to carry push related state and very common operation.
45
45
46 A new should be created at the beginning of each push and discarded
46 A new should be created at the beginning of each push and discarded
47 afterward.
47 afterward.
48 """
48 """
49
49
50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
51 # repo we push from
51 # repo we push from
52 self.repo = repo
52 self.repo = repo
53 self.ui = repo.ui
53 self.ui = repo.ui
54 # repo we push to
54 # repo we push to
55 self.remote = remote
55 self.remote = remote
56 # force option provided
56 # force option provided
57 self.force = force
57 self.force = force
58 # revs to be pushed (None is "all")
58 # revs to be pushed (None is "all")
59 self.revs = revs
59 self.revs = revs
60 # allow push of new branch
60 # allow push of new branch
61 self.newbranch = newbranch
61 self.newbranch = newbranch
62 # did a local lock get acquired?
62 # did a local lock get acquired?
63 self.locallocked = None
63 self.locallocked = None
64 # step already performed
64 # step already performed
65 # (used to check what steps have been already performed through bundle2)
65 # (used to check what steps have been already performed through bundle2)
66 self.stepsdone = set()
66 self.stepsdone = set()
67 # Integer version of the push result
67 # Integer version of the push result
68 # - None means nothing to push
68 # - None means nothing to push
69 # - 0 means HTTP error
69 # - 0 means HTTP error
70 # - 1 means we pushed and remote head count is unchanged *or*
70 # - 1 means we pushed and remote head count is unchanged *or*
71 # we have outgoing changesets but refused to push
71 # we have outgoing changesets but refused to push
72 # - other values as described by addchangegroup()
72 # - other values as described by addchangegroup()
73 self.ret = None
73 self.ret = None
74 # discover.outgoing object (contains common and outgoing data)
74 # discover.outgoing object (contains common and outgoing data)
75 self.outgoing = None
75 self.outgoing = None
76 # all remote heads before the push
76 # all remote heads before the push
77 self.remoteheads = None
77 self.remoteheads = None
78 # testable as a boolean indicating if any nodes are missing locally.
78 # testable as a boolean indicating if any nodes are missing locally.
79 self.incoming = None
79 self.incoming = None
80 # phases changes that must be pushed along side the changesets
80 # phases changes that must be pushed along side the changesets
81 self.outdatedphases = None
81 self.outdatedphases = None
82 # phases changes that must be pushed if changeset push fails
82 # phases changes that must be pushed if changeset push fails
83 self.fallbackoutdatedphases = None
83 self.fallbackoutdatedphases = None
84 # outgoing obsmarkers
84 # outgoing obsmarkers
85 self.outobsmarkers = set()
85 self.outobsmarkers = set()
86 # outgoing bookmarks
86 # outgoing bookmarks
87 self.outbookmarks = []
87 self.outbookmarks = []
88
88
89 @util.propertycache
89 @util.propertycache
90 def futureheads(self):
90 def futureheads(self):
91 """future remote heads if the changeset push succeeds"""
91 """future remote heads if the changeset push succeeds"""
92 return self.outgoing.missingheads
92 return self.outgoing.missingheads
93
93
94 @util.propertycache
94 @util.propertycache
95 def fallbackheads(self):
95 def fallbackheads(self):
96 """future remote heads if the changeset push fails"""
96 """future remote heads if the changeset push fails"""
97 if self.revs is None:
97 if self.revs is None:
98 # not target to push, all common are relevant
98 # not target to push, all common are relevant
99 return self.outgoing.commonheads
99 return self.outgoing.commonheads
100 unfi = self.repo.unfiltered()
100 unfi = self.repo.unfiltered()
101 # I want cheads = heads(::missingheads and ::commonheads)
101 # I want cheads = heads(::missingheads and ::commonheads)
102 # (missingheads is revs with secret changeset filtered out)
102 # (missingheads is revs with secret changeset filtered out)
103 #
103 #
104 # This can be expressed as:
104 # This can be expressed as:
105 # cheads = ( (missingheads and ::commonheads)
105 # cheads = ( (missingheads and ::commonheads)
106 # + (commonheads and ::missingheads))"
106 # + (commonheads and ::missingheads))"
107 # )
107 # )
108 #
108 #
109 # while trying to push we already computed the following:
109 # while trying to push we already computed the following:
110 # common = (::commonheads)
110 # common = (::commonheads)
111 # missing = ((commonheads::missingheads) - commonheads)
111 # missing = ((commonheads::missingheads) - commonheads)
112 #
112 #
113 # We can pick:
113 # We can pick:
114 # * missingheads part of common (::commonheads)
114 # * missingheads part of common (::commonheads)
115 common = set(self.outgoing.common)
115 common = set(self.outgoing.common)
116 nm = self.repo.changelog.nodemap
116 nm = self.repo.changelog.nodemap
117 cheads = [node for node in self.revs if nm[node] in common]
117 cheads = [node for node in self.revs if nm[node] in common]
118 # and
118 # and
119 # * commonheads parents on missing
119 # * commonheads parents on missing
120 revset = unfi.set('%ln and parents(roots(%ln))',
120 revset = unfi.set('%ln and parents(roots(%ln))',
121 self.outgoing.commonheads,
121 self.outgoing.commonheads,
122 self.outgoing.missing)
122 self.outgoing.missing)
123 cheads.extend(c.node() for c in revset)
123 cheads.extend(c.node() for c in revset)
124 return cheads
124 return cheads
125
125
126 @property
126 @property
127 def commonheads(self):
127 def commonheads(self):
128 """set of all common heads after changeset bundle push"""
128 """set of all common heads after changeset bundle push"""
129 if self.ret:
129 if self.ret:
130 return self.futureheads
130 return self.futureheads
131 else:
131 else:
132 return self.fallbackheads
132 return self.fallbackheads
133
133
134 def push(repo, remote, force=False, revs=None, newbranch=False):
134 def push(repo, remote, force=False, revs=None, newbranch=False):
135 '''Push outgoing changesets (limited by revs) from a local
135 '''Push outgoing changesets (limited by revs) from a local
136 repository to remote. Return an integer:
136 repository to remote. Return an integer:
137 - None means nothing to push
137 - None means nothing to push
138 - 0 means HTTP error
138 - 0 means HTTP error
139 - 1 means we pushed and remote head count is unchanged *or*
139 - 1 means we pushed and remote head count is unchanged *or*
140 we have outgoing changesets but refused to push
140 we have outgoing changesets but refused to push
141 - other values as described by addchangegroup()
141 - other values as described by addchangegroup()
142 '''
142 '''
143 pushop = pushoperation(repo, remote, force, revs, newbranch)
143 pushop = pushoperation(repo, remote, force, revs, newbranch)
144 if pushop.remote.local():
144 if pushop.remote.local():
145 missing = (set(pushop.repo.requirements)
145 missing = (set(pushop.repo.requirements)
146 - pushop.remote.local().supported)
146 - pushop.remote.local().supported)
147 if missing:
147 if missing:
148 msg = _("required features are not"
148 msg = _("required features are not"
149 " supported in the destination:"
149 " supported in the destination:"
150 " %s") % (', '.join(sorted(missing)))
150 " %s") % (', '.join(sorted(missing)))
151 raise util.Abort(msg)
151 raise util.Abort(msg)
152
152
153 # there are two ways to push to remote repo:
153 # there are two ways to push to remote repo:
154 #
154 #
155 # addchangegroup assumes local user can lock remote
155 # addchangegroup assumes local user can lock remote
156 # repo (local filesystem, old ssh servers).
156 # repo (local filesystem, old ssh servers).
157 #
157 #
158 # unbundle assumes local user cannot lock remote repo (new ssh
158 # unbundle assumes local user cannot lock remote repo (new ssh
159 # servers, http servers).
159 # servers, http servers).
160
160
161 if not pushop.remote.canpush():
161 if not pushop.remote.canpush():
162 raise util.Abort(_("destination does not support push"))
162 raise util.Abort(_("destination does not support push"))
163 # get local lock as we might write phase data
163 # get local lock as we might write phase data
164 locallock = None
164 locallock = None
165 try:
165 try:
166 locallock = pushop.repo.lock()
166 locallock = pushop.repo.lock()
167 pushop.locallocked = True
167 pushop.locallocked = True
168 except IOError, err:
168 except IOError, err:
169 pushop.locallocked = False
169 pushop.locallocked = False
170 if err.errno != errno.EACCES:
170 if err.errno != errno.EACCES:
171 raise
171 raise
172 # source repo cannot be locked.
172 # source repo cannot be locked.
173 # We do not abort the push, but just disable the local phase
173 # We do not abort the push, but just disable the local phase
174 # synchronisation.
174 # synchronisation.
175 msg = 'cannot lock source repository: %s\n' % err
175 msg = 'cannot lock source repository: %s\n' % err
176 pushop.ui.debug(msg)
176 pushop.ui.debug(msg)
177 try:
177 try:
178 pushop.repo.checkpush(pushop)
178 pushop.repo.checkpush(pushop)
179 lock = None
179 lock = None
180 unbundle = pushop.remote.capable('unbundle')
180 unbundle = pushop.remote.capable('unbundle')
181 if not unbundle:
181 if not unbundle:
182 lock = pushop.remote.lock()
182 lock = pushop.remote.lock()
183 try:
183 try:
184 _pushdiscovery(pushop)
184 _pushdiscovery(pushop)
185 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
185 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
186 False)
186 False)
187 and pushop.remote.capable('bundle2-exp')):
187 and pushop.remote.capable('bundle2-exp')):
188 _pushbundle2(pushop)
188 _pushbundle2(pushop)
189 _pushchangeset(pushop)
189 _pushchangeset(pushop)
190 _pushsyncphase(pushop)
190 _pushsyncphase(pushop)
191 _pushobsolete(pushop)
191 _pushobsolete(pushop)
192 _pushbookmark(pushop)
192 _pushbookmark(pushop)
193 finally:
193 finally:
194 if lock is not None:
194 if lock is not None:
195 lock.release()
195 lock.release()
196 finally:
196 finally:
197 if locallock is not None:
197 if locallock is not None:
198 locallock.release()
198 locallock.release()
199
199
200 return pushop.ret
200 return pushop.ret
201
201
202 # list of steps to perform discovery before push
202 # list of steps to perform discovery before push
203 pushdiscoveryorder = []
203 pushdiscoveryorder = []
204
204
205 # Mapping between step name and function
205 # Mapping between step name and function
206 #
206 #
207 # This exists to help extensions wrap steps if necessary
207 # This exists to help extensions wrap steps if necessary
208 pushdiscoverymapping = {}
208 pushdiscoverymapping = {}
209
209
210 def pushdiscovery(stepname):
210 def pushdiscovery(stepname):
211 """decorator for function performing discovery before push
211 """decorator for function performing discovery before push
212
212
213 The function is added to the step -> function mapping and appended to the
213 The function is added to the step -> function mapping and appended to the
214 list of steps. Beware that decorated function will be added in order (this
214 list of steps. Beware that decorated function will be added in order (this
215 may matter).
215 may matter).
216
216
217 You can only use this decorator for a new step, if you want to wrap a step
217 You can only use this decorator for a new step, if you want to wrap a step
218 from an extension, change the pushdiscovery dictionary directly."""
218 from an extension, change the pushdiscovery dictionary directly."""
219 def dec(func):
219 def dec(func):
220 assert stepname not in pushdiscoverymapping
220 assert stepname not in pushdiscoverymapping
221 pushdiscoverymapping[stepname] = func
221 pushdiscoverymapping[stepname] = func
222 pushdiscoveryorder.append(stepname)
222 pushdiscoveryorder.append(stepname)
223 return func
223 return func
224 return dec
224 return dec
225
225
226 def _pushdiscovery(pushop):
226 def _pushdiscovery(pushop):
227 """Run all discovery steps"""
227 """Run all discovery steps"""
228 for stepname in pushdiscoveryorder:
228 for stepname in pushdiscoveryorder:
229 step = pushdiscoverymapping[stepname]
229 step = pushdiscoverymapping[stepname]
230 step(pushop)
230 step(pushop)
231
231
232 @pushdiscovery('changeset')
232 @pushdiscovery('changeset')
233 def _pushdiscoverychangeset(pushop):
233 def _pushdiscoverychangeset(pushop):
234 """discover the changeset that need to be pushed"""
234 """discover the changeset that need to be pushed"""
235 unfi = pushop.repo.unfiltered()
235 unfi = pushop.repo.unfiltered()
236 fci = discovery.findcommonincoming
236 fci = discovery.findcommonincoming
237 commoninc = fci(unfi, pushop.remote, force=pushop.force)
237 commoninc = fci(unfi, pushop.remote, force=pushop.force)
238 common, inc, remoteheads = commoninc
238 common, inc, remoteheads = commoninc
239 fco = discovery.findcommonoutgoing
239 fco = discovery.findcommonoutgoing
240 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
240 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
241 commoninc=commoninc, force=pushop.force)
241 commoninc=commoninc, force=pushop.force)
242 pushop.outgoing = outgoing
242 pushop.outgoing = outgoing
243 pushop.remoteheads = remoteheads
243 pushop.remoteheads = remoteheads
244 pushop.incoming = inc
244 pushop.incoming = inc
245
245
246 @pushdiscovery('phase')
246 @pushdiscovery('phase')
247 def _pushdiscoveryphase(pushop):
247 def _pushdiscoveryphase(pushop):
248 """discover the phase that needs to be pushed
248 """discover the phase that needs to be pushed
249
249
250 (computed for both success and failure case for changesets push)"""
250 (computed for both success and failure case for changesets push)"""
251 outgoing = pushop.outgoing
251 outgoing = pushop.outgoing
252 unfi = pushop.repo.unfiltered()
252 unfi = pushop.repo.unfiltered()
253 remotephases = pushop.remote.listkeys('phases')
253 remotephases = pushop.remote.listkeys('phases')
254 publishing = remotephases.get('publishing', False)
254 publishing = remotephases.get('publishing', False)
255 ana = phases.analyzeremotephases(pushop.repo,
255 ana = phases.analyzeremotephases(pushop.repo,
256 pushop.fallbackheads,
256 pushop.fallbackheads,
257 remotephases)
257 remotephases)
258 pheads, droots = ana
258 pheads, droots = ana
259 extracond = ''
259 extracond = ''
260 if not publishing:
260 if not publishing:
261 extracond = ' and public()'
261 extracond = ' and public()'
262 revset = 'heads((%%ln::%%ln) %s)' % extracond
262 revset = 'heads((%%ln::%%ln) %s)' % extracond
263 # Get the list of all revs draft on remote by public here.
263 # Get the list of all revs draft on remote by public here.
264 # XXX Beware that revset break if droots is not strictly
264 # XXX Beware that revset break if droots is not strictly
265 # XXX root we may want to ensure it is but it is costly
265 # XXX root we may want to ensure it is but it is costly
266 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
266 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
267 if not outgoing.missing:
267 if not outgoing.missing:
268 future = fallback
268 future = fallback
269 else:
269 else:
270 # adds changeset we are going to push as draft
270 # adds changeset we are going to push as draft
271 #
271 #
272 # should not be necessary for pushblishing server, but because of an
272 # should not be necessary for pushblishing server, but because of an
273 # issue fixed in xxxxx we have to do it anyway.
273 # issue fixed in xxxxx we have to do it anyway.
274 fdroots = list(unfi.set('roots(%ln + %ln::)',
274 fdroots = list(unfi.set('roots(%ln + %ln::)',
275 outgoing.missing, droots))
275 outgoing.missing, droots))
276 fdroots = [f.node() for f in fdroots]
276 fdroots = [f.node() for f in fdroots]
277 future = list(unfi.set(revset, fdroots, pushop.futureheads))
277 future = list(unfi.set(revset, fdroots, pushop.futureheads))
278 pushop.outdatedphases = future
278 pushop.outdatedphases = future
279 pushop.fallbackoutdatedphases = fallback
279 pushop.fallbackoutdatedphases = fallback
280
280
281 @pushdiscovery('obsmarker')
281 @pushdiscovery('obsmarker')
282 def _pushdiscoveryobsmarkers(pushop):
282 def _pushdiscoveryobsmarkers(pushop):
283 pushop.outobsmarkers = pushop.repo.obsstore
283 pushop.outobsmarkers = pushop.repo.obsstore
284
284
285 @pushdiscovery('bookmarks')
285 @pushdiscovery('bookmarks')
286 def _pushdiscoverybookmarks(pushop):
286 def _pushdiscoverybookmarks(pushop):
287 ui = pushop.ui
287 ui = pushop.ui
288 repo = pushop.repo.unfiltered()
288 repo = pushop.repo.unfiltered()
289 remote = pushop.remote
289 remote = pushop.remote
290 ui.debug("checking for updated bookmarks\n")
290 ui.debug("checking for updated bookmarks\n")
291 ancestors = ()
291 ancestors = ()
292 if pushop.revs:
292 if pushop.revs:
293 revnums = map(repo.changelog.rev, pushop.revs)
293 revnums = map(repo.changelog.rev, pushop.revs)
294 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
294 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
295 remotebookmark = remote.listkeys('bookmarks')
295 remotebookmark = remote.listkeys('bookmarks')
296
296
297 comp = bookmarks.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
297 comp = bookmarks.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
298 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid) = comp
298 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid) = comp
299 for b, scid, dcid in advsrc:
299 for b, scid, dcid in advsrc:
300 if not ancestors or repo[scid].rev() in ancestors:
300 if not ancestors or repo[scid].rev() in ancestors:
301 pushop.outbookmarks.append((b, dcid, scid))
301 pushop.outbookmarks.append((b, dcid, scid))
302
302
303 def _pushcheckoutgoing(pushop):
303 def _pushcheckoutgoing(pushop):
304 outgoing = pushop.outgoing
304 outgoing = pushop.outgoing
305 unfi = pushop.repo.unfiltered()
305 unfi = pushop.repo.unfiltered()
306 if not outgoing.missing:
306 if not outgoing.missing:
307 # nothing to push
307 # nothing to push
308 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
308 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
309 return False
309 return False
310 # something to push
310 # something to push
311 if not pushop.force:
311 if not pushop.force:
312 # if repo.obsstore == False --> no obsolete
312 # if repo.obsstore == False --> no obsolete
313 # then, save the iteration
313 # then, save the iteration
314 if unfi.obsstore:
314 if unfi.obsstore:
315 # this message are here for 80 char limit reason
315 # this message are here for 80 char limit reason
316 mso = _("push includes obsolete changeset: %s!")
316 mso = _("push includes obsolete changeset: %s!")
317 mst = "push includes %s changeset: %s!"
317 mst = "push includes %s changeset: %s!"
318 # plain versions for i18n tool to detect them
318 # plain versions for i18n tool to detect them
319 _("push includes unstable changeset: %s!")
319 _("push includes unstable changeset: %s!")
320 _("push includes bumped changeset: %s!")
320 _("push includes bumped changeset: %s!")
321 _("push includes divergent changeset: %s!")
321 _("push includes divergent changeset: %s!")
322 # If we are to push if there is at least one
322 # If we are to push if there is at least one
323 # obsolete or unstable changeset in missing, at
323 # obsolete or unstable changeset in missing, at
324 # least one of the missinghead will be obsolete or
324 # least one of the missinghead will be obsolete or
325 # unstable. So checking heads only is ok
325 # unstable. So checking heads only is ok
326 for node in outgoing.missingheads:
326 for node in outgoing.missingheads:
327 ctx = unfi[node]
327 ctx = unfi[node]
328 if ctx.obsolete():
328 if ctx.obsolete():
329 raise util.Abort(mso % ctx)
329 raise util.Abort(mso % ctx)
330 elif ctx.troubled():
330 elif ctx.troubled():
331 raise util.Abort(_(mst)
331 raise util.Abort(_(mst)
332 % (ctx.troubles()[0],
332 % (ctx.troubles()[0],
333 ctx))
333 ctx))
334 newbm = pushop.ui.configlist('bookmarks', 'pushing')
334 newbm = pushop.ui.configlist('bookmarks', 'pushing')
335 discovery.checkheads(unfi, pushop.remote, outgoing,
335 discovery.checkheads(unfi, pushop.remote, outgoing,
336 pushop.remoteheads,
336 pushop.remoteheads,
337 pushop.newbranch,
337 pushop.newbranch,
338 bool(pushop.incoming),
338 bool(pushop.incoming),
339 newbm)
339 newbm)
340 return True
340 return True
341
341
342 # List of names of steps to perform for an outgoing bundle2, order matters.
342 # List of names of steps to perform for an outgoing bundle2, order matters.
343 b2partsgenorder = []
343 b2partsgenorder = []
344
344
345 # Mapping between step name and function
345 # Mapping between step name and function
346 #
346 #
347 # This exists to help extensions wrap steps if necessary
347 # This exists to help extensions wrap steps if necessary
348 b2partsgenmapping = {}
348 b2partsgenmapping = {}
349
349
350 def b2partsgenerator(stepname):
350 def b2partsgenerator(stepname):
351 """decorator for function generating bundle2 part
351 """decorator for function generating bundle2 part
352
352
353 The function is added to the step -> function mapping and appended to the
353 The function is added to the step -> function mapping and appended to the
354 list of steps. Beware that decorated functions will be added in order
354 list of steps. Beware that decorated functions will be added in order
355 (this may matter).
355 (this may matter).
356
356
357 You can only use this decorator for new steps, if you want to wrap a step
357 You can only use this decorator for new steps, if you want to wrap a step
358 from an extension, attack the b2partsgenmapping dictionary directly."""
358 from an extension, attack the b2partsgenmapping dictionary directly."""
359 def dec(func):
359 def dec(func):
360 assert stepname not in b2partsgenmapping
360 assert stepname not in b2partsgenmapping
361 b2partsgenmapping[stepname] = func
361 b2partsgenmapping[stepname] = func
362 b2partsgenorder.append(stepname)
362 b2partsgenorder.append(stepname)
363 return func
363 return func
364 return dec
364 return dec
365
365
366 @b2partsgenerator('changeset')
366 @b2partsgenerator('changeset')
367 def _pushb2ctx(pushop, bundler):
367 def _pushb2ctx(pushop, bundler):
368 """handle changegroup push through bundle2
368 """handle changegroup push through bundle2
369
369
370 addchangegroup result is stored in the ``pushop.ret`` attribute.
370 addchangegroup result is stored in the ``pushop.ret`` attribute.
371 """
371 """
372 if 'changesets' in pushop.stepsdone:
372 if 'changesets' in pushop.stepsdone:
373 return
373 return
374 pushop.stepsdone.add('changesets')
374 pushop.stepsdone.add('changesets')
375 # Send known heads to the server for race detection.
375 # Send known heads to the server for race detection.
376 pushop.stepsdone.add('changesets')
376 pushop.stepsdone.add('changesets')
377 if not _pushcheckoutgoing(pushop):
377 if not _pushcheckoutgoing(pushop):
378 return
378 return
379 pushop.repo.prepushoutgoinghooks(pushop.repo,
379 pushop.repo.prepushoutgoinghooks(pushop.repo,
380 pushop.remote,
380 pushop.remote,
381 pushop.outgoing)
381 pushop.outgoing)
382 if not pushop.force:
382 if not pushop.force:
383 bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads))
383 bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads))
384 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
384 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
385 cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks())
385 cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks())
386 def handlereply(op):
386 def handlereply(op):
387 """extract addchangroup returns from server reply"""
387 """extract addchangroup returns from server reply"""
388 cgreplies = op.records.getreplies(cgpart.id)
388 cgreplies = op.records.getreplies(cgpart.id)
389 assert len(cgreplies['changegroup']) == 1
389 assert len(cgreplies['changegroup']) == 1
390 pushop.ret = cgreplies['changegroup'][0]['return']
390 pushop.ret = cgreplies['changegroup'][0]['return']
391 return handlereply
391 return handlereply
392
392
393 @b2partsgenerator('phase')
393 @b2partsgenerator('phase')
394 def _pushb2phases(pushop, bundler):
394 def _pushb2phases(pushop, bundler):
395 """handle phase push through bundle2"""
395 """handle phase push through bundle2"""
396 if 'phases' in pushop.stepsdone:
396 if 'phases' in pushop.stepsdone:
397 return
397 return
398 b2caps = bundle2.bundle2caps(pushop.remote)
398 b2caps = bundle2.bundle2caps(pushop.remote)
399 if not 'b2x:pushkey' in b2caps:
399 if not 'b2x:pushkey' in b2caps:
400 return
400 return
401 pushop.stepsdone.add('phases')
401 pushop.stepsdone.add('phases')
402 part2node = []
402 part2node = []
403 enc = pushkey.encode
403 enc = pushkey.encode
404 for newremotehead in pushop.outdatedphases:
404 for newremotehead in pushop.outdatedphases:
405 part = bundler.newpart('b2x:pushkey')
405 part = bundler.newpart('b2x:pushkey')
406 part.addparam('namespace', enc('phases'))
406 part.addparam('namespace', enc('phases'))
407 part.addparam('key', enc(newremotehead.hex()))
407 part.addparam('key', enc(newremotehead.hex()))
408 part.addparam('old', enc(str(phases.draft)))
408 part.addparam('old', enc(str(phases.draft)))
409 part.addparam('new', enc(str(phases.public)))
409 part.addparam('new', enc(str(phases.public)))
410 part2node.append((part.id, newremotehead))
410 part2node.append((part.id, newremotehead))
411 def handlereply(op):
411 def handlereply(op):
412 for partid, node in part2node:
412 for partid, node in part2node:
413 partrep = op.records.getreplies(partid)
413 partrep = op.records.getreplies(partid)
414 results = partrep['pushkey']
414 results = partrep['pushkey']
415 assert len(results) <= 1
415 assert len(results) <= 1
416 msg = None
416 msg = None
417 if not results:
417 if not results:
418 msg = _('server ignored update of %s to public!\n') % node
418 msg = _('server ignored update of %s to public!\n') % node
419 elif not int(results[0]['return']):
419 elif not int(results[0]['return']):
420 msg = _('updating %s to public failed!\n') % node
420 msg = _('updating %s to public failed!\n') % node
421 if msg is not None:
421 if msg is not None:
422 pushop.ui.warn(msg)
422 pushop.ui.warn(msg)
423 return handlereply
423 return handlereply
424
424
425 @b2partsgenerator('bookmarks')
426 def _pushb2bookmarks(pushop, bundler):
427 """handle phase push through bundle2"""
428 if 'bookmarks' in pushop.stepsdone:
429 return
430 b2caps = bundle2.bundle2caps(pushop.remote)
431 if 'b2x:pushkey' not in b2caps:
432 return
433 pushop.stepsdone.add('bookmarks')
434 part2book = []
435 enc = pushkey.encode
436 for book, old, new in pushop.outbookmarks:
437 part = bundler.newpart('b2x:pushkey')
438 part.addparam('namespace', enc('bookmarks'))
439 part.addparam('key', enc(book))
440 part.addparam('old', enc(old))
441 part.addparam('new', enc(new))
442 part2book.append((part.id, book))
443 def handlereply(op):
444 for partid, book in part2book:
445 partrep = op.records.getreplies(partid)
446 results = partrep['pushkey']
447 assert len(results) <= 1
448 if not results:
449 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
450 else:
451 ret = int(results[0]['return'])
452 if ret:
453 pushop.ui.status(_("updating bookmark %s\n") % book)
454 else:
455 pushop.ui.warn(_('updating bookmark %s failed!\n') % book)
456 return handlereply
457
458
425 def _pushbundle2(pushop):
459 def _pushbundle2(pushop):
426 """push data to the remote using bundle2
460 """push data to the remote using bundle2
427
461
428 The only currently supported type of data is changegroup but this will
462 The only currently supported type of data is changegroup but this will
429 evolve in the future."""
463 evolve in the future."""
430 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
464 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
431 # create reply capability
465 # create reply capability
432 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
466 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
433 bundler.newpart('b2x:replycaps', data=capsblob)
467 bundler.newpart('b2x:replycaps', data=capsblob)
434 replyhandlers = []
468 replyhandlers = []
435 for partgenname in b2partsgenorder:
469 for partgenname in b2partsgenorder:
436 partgen = b2partsgenmapping[partgenname]
470 partgen = b2partsgenmapping[partgenname]
437 ret = partgen(pushop, bundler)
471 ret = partgen(pushop, bundler)
438 if callable(ret):
472 if callable(ret):
439 replyhandlers.append(ret)
473 replyhandlers.append(ret)
440 # do not push if nothing to push
474 # do not push if nothing to push
441 if bundler.nbparts <= 1:
475 if bundler.nbparts <= 1:
442 return
476 return
443 stream = util.chunkbuffer(bundler.getchunks())
477 stream = util.chunkbuffer(bundler.getchunks())
444 try:
478 try:
445 reply = pushop.remote.unbundle(stream, ['force'], 'push')
479 reply = pushop.remote.unbundle(stream, ['force'], 'push')
446 except error.BundleValueError, exc:
480 except error.BundleValueError, exc:
447 raise util.Abort('missing support for %s' % exc)
481 raise util.Abort('missing support for %s' % exc)
448 try:
482 try:
449 op = bundle2.processbundle(pushop.repo, reply)
483 op = bundle2.processbundle(pushop.repo, reply)
450 except error.BundleValueError, exc:
484 except error.BundleValueError, exc:
451 raise util.Abort('missing support for %s' % exc)
485 raise util.Abort('missing support for %s' % exc)
452 for rephand in replyhandlers:
486 for rephand in replyhandlers:
453 rephand(op)
487 rephand(op)
454
488
455 def _pushchangeset(pushop):
489 def _pushchangeset(pushop):
456 """Make the actual push of changeset bundle to remote repo"""
490 """Make the actual push of changeset bundle to remote repo"""
457 if 'changesets' in pushop.stepsdone:
491 if 'changesets' in pushop.stepsdone:
458 return
492 return
459 pushop.stepsdone.add('changesets')
493 pushop.stepsdone.add('changesets')
460 if not _pushcheckoutgoing(pushop):
494 if not _pushcheckoutgoing(pushop):
461 return
495 return
462 pushop.repo.prepushoutgoinghooks(pushop.repo,
496 pushop.repo.prepushoutgoinghooks(pushop.repo,
463 pushop.remote,
497 pushop.remote,
464 pushop.outgoing)
498 pushop.outgoing)
465 outgoing = pushop.outgoing
499 outgoing = pushop.outgoing
466 unbundle = pushop.remote.capable('unbundle')
500 unbundle = pushop.remote.capable('unbundle')
467 # TODO: get bundlecaps from remote
501 # TODO: get bundlecaps from remote
468 bundlecaps = None
502 bundlecaps = None
469 # create a changegroup from local
503 # create a changegroup from local
470 if pushop.revs is None and not (outgoing.excluded
504 if pushop.revs is None and not (outgoing.excluded
471 or pushop.repo.changelog.filteredrevs):
505 or pushop.repo.changelog.filteredrevs):
472 # push everything,
506 # push everything,
473 # use the fast path, no race possible on push
507 # use the fast path, no race possible on push
474 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
508 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
475 cg = changegroup.getsubset(pushop.repo,
509 cg = changegroup.getsubset(pushop.repo,
476 outgoing,
510 outgoing,
477 bundler,
511 bundler,
478 'push',
512 'push',
479 fastpath=True)
513 fastpath=True)
480 else:
514 else:
481 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
515 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
482 bundlecaps)
516 bundlecaps)
483
517
484 # apply changegroup to remote
518 # apply changegroup to remote
485 if unbundle:
519 if unbundle:
486 # local repo finds heads on server, finds out what
520 # local repo finds heads on server, finds out what
487 # revs it must push. once revs transferred, if server
521 # revs it must push. once revs transferred, if server
488 # finds it has different heads (someone else won
522 # finds it has different heads (someone else won
489 # commit/push race), server aborts.
523 # commit/push race), server aborts.
490 if pushop.force:
524 if pushop.force:
491 remoteheads = ['force']
525 remoteheads = ['force']
492 else:
526 else:
493 remoteheads = pushop.remoteheads
527 remoteheads = pushop.remoteheads
494 # ssh: return remote's addchangegroup()
528 # ssh: return remote's addchangegroup()
495 # http: return remote's addchangegroup() or 0 for error
529 # http: return remote's addchangegroup() or 0 for error
496 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
530 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
497 pushop.repo.url())
531 pushop.repo.url())
498 else:
532 else:
499 # we return an integer indicating remote head count
533 # we return an integer indicating remote head count
500 # change
534 # change
501 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
535 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
502
536
503 def _pushsyncphase(pushop):
537 def _pushsyncphase(pushop):
504 """synchronise phase information locally and remotely"""
538 """synchronise phase information locally and remotely"""
505 cheads = pushop.commonheads
539 cheads = pushop.commonheads
506 # even when we don't push, exchanging phase data is useful
540 # even when we don't push, exchanging phase data is useful
507 remotephases = pushop.remote.listkeys('phases')
541 remotephases = pushop.remote.listkeys('phases')
508 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
542 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
509 and remotephases # server supports phases
543 and remotephases # server supports phases
510 and pushop.ret is None # nothing was pushed
544 and pushop.ret is None # nothing was pushed
511 and remotephases.get('publishing', False)):
545 and remotephases.get('publishing', False)):
512 # When:
546 # When:
513 # - this is a subrepo push
547 # - this is a subrepo push
514 # - and remote support phase
548 # - and remote support phase
515 # - and no changeset was pushed
549 # - and no changeset was pushed
516 # - and remote is publishing
550 # - and remote is publishing
517 # We may be in issue 3871 case!
551 # We may be in issue 3871 case!
518 # We drop the possible phase synchronisation done by
552 # We drop the possible phase synchronisation done by
519 # courtesy to publish changesets possibly locally draft
553 # courtesy to publish changesets possibly locally draft
520 # on the remote.
554 # on the remote.
521 remotephases = {'publishing': 'True'}
555 remotephases = {'publishing': 'True'}
522 if not remotephases: # old server or public only reply from non-publishing
556 if not remotephases: # old server or public only reply from non-publishing
523 _localphasemove(pushop, cheads)
557 _localphasemove(pushop, cheads)
524 # don't push any phase data as there is nothing to push
558 # don't push any phase data as there is nothing to push
525 else:
559 else:
526 ana = phases.analyzeremotephases(pushop.repo, cheads,
560 ana = phases.analyzeremotephases(pushop.repo, cheads,
527 remotephases)
561 remotephases)
528 pheads, droots = ana
562 pheads, droots = ana
529 ### Apply remote phase on local
563 ### Apply remote phase on local
530 if remotephases.get('publishing', False):
564 if remotephases.get('publishing', False):
531 _localphasemove(pushop, cheads)
565 _localphasemove(pushop, cheads)
532 else: # publish = False
566 else: # publish = False
533 _localphasemove(pushop, pheads)
567 _localphasemove(pushop, pheads)
534 _localphasemove(pushop, cheads, phases.draft)
568 _localphasemove(pushop, cheads, phases.draft)
535 ### Apply local phase on remote
569 ### Apply local phase on remote
536
570
537 if pushop.ret:
571 if pushop.ret:
538 if 'phases' in pushop.stepsdone:
572 if 'phases' in pushop.stepsdone:
539 # phases already pushed though bundle2
573 # phases already pushed though bundle2
540 return
574 return
541 outdated = pushop.outdatedphases
575 outdated = pushop.outdatedphases
542 else:
576 else:
543 outdated = pushop.fallbackoutdatedphases
577 outdated = pushop.fallbackoutdatedphases
544
578
545 pushop.stepsdone.add('phases')
579 pushop.stepsdone.add('phases')
546
580
547 # filter heads already turned public by the push
581 # filter heads already turned public by the push
548 outdated = [c for c in outdated if c.node() not in pheads]
582 outdated = [c for c in outdated if c.node() not in pheads]
549 b2caps = bundle2.bundle2caps(pushop.remote)
583 b2caps = bundle2.bundle2caps(pushop.remote)
550 if 'b2x:pushkey' in b2caps:
584 if 'b2x:pushkey' in b2caps:
551 # server supports bundle2, let's do a batched push through it
585 # server supports bundle2, let's do a batched push through it
552 #
586 #
553 # This will eventually be unified with the changesets bundle2 push
587 # This will eventually be unified with the changesets bundle2 push
554 bundler = bundle2.bundle20(pushop.ui, b2caps)
588 bundler = bundle2.bundle20(pushop.ui, b2caps)
555 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
589 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
556 bundler.newpart('b2x:replycaps', data=capsblob)
590 bundler.newpart('b2x:replycaps', data=capsblob)
557 part2node = []
591 part2node = []
558 enc = pushkey.encode
592 enc = pushkey.encode
559 for newremotehead in outdated:
593 for newremotehead in outdated:
560 part = bundler.newpart('b2x:pushkey')
594 part = bundler.newpart('b2x:pushkey')
561 part.addparam('namespace', enc('phases'))
595 part.addparam('namespace', enc('phases'))
562 part.addparam('key', enc(newremotehead.hex()))
596 part.addparam('key', enc(newremotehead.hex()))
563 part.addparam('old', enc(str(phases.draft)))
597 part.addparam('old', enc(str(phases.draft)))
564 part.addparam('new', enc(str(phases.public)))
598 part.addparam('new', enc(str(phases.public)))
565 part2node.append((part.id, newremotehead))
599 part2node.append((part.id, newremotehead))
566 stream = util.chunkbuffer(bundler.getchunks())
600 stream = util.chunkbuffer(bundler.getchunks())
567 try:
601 try:
568 reply = pushop.remote.unbundle(stream, ['force'], 'push')
602 reply = pushop.remote.unbundle(stream, ['force'], 'push')
569 op = bundle2.processbundle(pushop.repo, reply)
603 op = bundle2.processbundle(pushop.repo, reply)
570 except error.BundleValueError, exc:
604 except error.BundleValueError, exc:
571 raise util.Abort('missing support for %s' % exc)
605 raise util.Abort('missing support for %s' % exc)
572 for partid, node in part2node:
606 for partid, node in part2node:
573 partrep = op.records.getreplies(partid)
607 partrep = op.records.getreplies(partid)
574 results = partrep['pushkey']
608 results = partrep['pushkey']
575 assert len(results) <= 1
609 assert len(results) <= 1
576 msg = None
610 msg = None
577 if not results:
611 if not results:
578 msg = _('server ignored update of %s to public!\n') % node
612 msg = _('server ignored update of %s to public!\n') % node
579 elif not int(results[0]['return']):
613 elif not int(results[0]['return']):
580 msg = _('updating %s to public failed!\n') % node
614 msg = _('updating %s to public failed!\n') % node
581 if msg is not None:
615 if msg is not None:
582 pushop.ui.warn(msg)
616 pushop.ui.warn(msg)
583
617
584 else:
618 else:
585 # fallback to independant pushkey command
619 # fallback to independant pushkey command
586 for newremotehead in outdated:
620 for newremotehead in outdated:
587 r = pushop.remote.pushkey('phases',
621 r = pushop.remote.pushkey('phases',
588 newremotehead.hex(),
622 newremotehead.hex(),
589 str(phases.draft),
623 str(phases.draft),
590 str(phases.public))
624 str(phases.public))
591 if not r:
625 if not r:
592 pushop.ui.warn(_('updating %s to public failed!\n')
626 pushop.ui.warn(_('updating %s to public failed!\n')
593 % newremotehead)
627 % newremotehead)
594
628
595 def _localphasemove(pushop, nodes, phase=phases.public):
629 def _localphasemove(pushop, nodes, phase=phases.public):
596 """move <nodes> to <phase> in the local source repo"""
630 """move <nodes> to <phase> in the local source repo"""
597 if pushop.locallocked:
631 if pushop.locallocked:
598 tr = pushop.repo.transaction('push-phase-sync')
632 tr = pushop.repo.transaction('push-phase-sync')
599 try:
633 try:
600 phases.advanceboundary(pushop.repo, tr, phase, nodes)
634 phases.advanceboundary(pushop.repo, tr, phase, nodes)
601 tr.close()
635 tr.close()
602 finally:
636 finally:
603 tr.release()
637 tr.release()
604 else:
638 else:
605 # repo is not locked, do not change any phases!
639 # repo is not locked, do not change any phases!
606 # Informs the user that phases should have been moved when
640 # Informs the user that phases should have been moved when
607 # applicable.
641 # applicable.
608 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
642 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
609 phasestr = phases.phasenames[phase]
643 phasestr = phases.phasenames[phase]
610 if actualmoves:
644 if actualmoves:
611 pushop.ui.status(_('cannot lock source repo, skipping '
645 pushop.ui.status(_('cannot lock source repo, skipping '
612 'local %s phase update\n') % phasestr)
646 'local %s phase update\n') % phasestr)
613
647
614 def _pushobsolete(pushop):
648 def _pushobsolete(pushop):
615 """utility function to push obsolete markers to a remote"""
649 """utility function to push obsolete markers to a remote"""
616 if 'obsmarkers' in pushop.stepsdone:
650 if 'obsmarkers' in pushop.stepsdone:
617 return
651 return
618 pushop.ui.debug('try to push obsolete markers to remote\n')
652 pushop.ui.debug('try to push obsolete markers to remote\n')
619 repo = pushop.repo
653 repo = pushop.repo
620 remote = pushop.remote
654 remote = pushop.remote
621 pushop.stepsdone.add('obsmarkers')
655 pushop.stepsdone.add('obsmarkers')
622 if (obsolete._enabled and repo.obsstore and
656 if (obsolete._enabled and repo.obsstore and
623 'obsolete' in remote.listkeys('namespaces')):
657 'obsolete' in remote.listkeys('namespaces')):
624 rslts = []
658 rslts = []
625 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
659 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
626 for key in sorted(remotedata, reverse=True):
660 for key in sorted(remotedata, reverse=True):
627 # reverse sort to ensure we end with dump0
661 # reverse sort to ensure we end with dump0
628 data = remotedata[key]
662 data = remotedata[key]
629 rslts.append(remote.pushkey('obsolete', key, '', data))
663 rslts.append(remote.pushkey('obsolete', key, '', data))
630 if [r for r in rslts if not r]:
664 if [r for r in rslts if not r]:
631 msg = _('failed to push some obsolete markers!\n')
665 msg = _('failed to push some obsolete markers!\n')
632 repo.ui.warn(msg)
666 repo.ui.warn(msg)
633
667
634 def _pushbookmark(pushop):
668 def _pushbookmark(pushop):
635 """Update bookmark position on remote"""
669 """Update bookmark position on remote"""
636 if pushop.ret == 0 or 'bookmarks' in pushop.stepsdone:
670 if pushop.ret == 0 or 'bookmarks' in pushop.stepsdone:
637 return
671 return
638 pushop.stepsdone.add('bookmarks')
672 pushop.stepsdone.add('bookmarks')
639 ui = pushop.ui
673 ui = pushop.ui
640 remote = pushop.remote
674 remote = pushop.remote
641 for b, old, new in pushop.outbookmarks:
675 for b, old, new in pushop.outbookmarks:
642 if remote.pushkey('bookmarks', b, old, new):
676 if remote.pushkey('bookmarks', b, old, new):
643 ui.status(_("updating bookmark %s\n") % b)
677 ui.status(_("updating bookmark %s\n") % b)
644 else:
678 else:
645 ui.warn(_('updating bookmark %s failed!\n') % b)
679 ui.warn(_('updating bookmark %s failed!\n') % b)
646
680
647 class pulloperation(object):
681 class pulloperation(object):
648 """A object that represent a single pull operation
682 """A object that represent a single pull operation
649
683
650 It purpose is to carry push related state and very common operation.
684 It purpose is to carry push related state and very common operation.
651
685
652 A new should be created at the beginning of each pull and discarded
686 A new should be created at the beginning of each pull and discarded
653 afterward.
687 afterward.
654 """
688 """
655
689
656 def __init__(self, repo, remote, heads=None, force=False):
690 def __init__(self, repo, remote, heads=None, force=False):
657 # repo we pull into
691 # repo we pull into
658 self.repo = repo
692 self.repo = repo
659 # repo we pull from
693 # repo we pull from
660 self.remote = remote
694 self.remote = remote
661 # revision we try to pull (None is "all")
695 # revision we try to pull (None is "all")
662 self.heads = heads
696 self.heads = heads
663 # do we force pull?
697 # do we force pull?
664 self.force = force
698 self.force = force
665 # the name the pull transaction
699 # the name the pull transaction
666 self._trname = 'pull\n' + util.hidepassword(remote.url())
700 self._trname = 'pull\n' + util.hidepassword(remote.url())
667 # hold the transaction once created
701 # hold the transaction once created
668 self._tr = None
702 self._tr = None
669 # set of common changeset between local and remote before pull
703 # set of common changeset between local and remote before pull
670 self.common = None
704 self.common = None
671 # set of pulled head
705 # set of pulled head
672 self.rheads = None
706 self.rheads = None
673 # list of missing changeset to fetch remotely
707 # list of missing changeset to fetch remotely
674 self.fetch = None
708 self.fetch = None
675 # result of changegroup pulling (used as return code by pull)
709 # result of changegroup pulling (used as return code by pull)
676 self.cgresult = None
710 self.cgresult = None
677 # list of step remaining todo (related to future bundle2 usage)
711 # list of step remaining todo (related to future bundle2 usage)
678 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
712 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
679
713
680 @util.propertycache
714 @util.propertycache
681 def pulledsubset(self):
715 def pulledsubset(self):
682 """heads of the set of changeset target by the pull"""
716 """heads of the set of changeset target by the pull"""
683 # compute target subset
717 # compute target subset
684 if self.heads is None:
718 if self.heads is None:
685 # We pulled every thing possible
719 # We pulled every thing possible
686 # sync on everything common
720 # sync on everything common
687 c = set(self.common)
721 c = set(self.common)
688 ret = list(self.common)
722 ret = list(self.common)
689 for n in self.rheads:
723 for n in self.rheads:
690 if n not in c:
724 if n not in c:
691 ret.append(n)
725 ret.append(n)
692 return ret
726 return ret
693 else:
727 else:
694 # We pulled a specific subset
728 # We pulled a specific subset
695 # sync on this subset
729 # sync on this subset
696 return self.heads
730 return self.heads
697
731
698 def gettransaction(self):
732 def gettransaction(self):
699 """get appropriate pull transaction, creating it if needed"""
733 """get appropriate pull transaction, creating it if needed"""
700 if self._tr is None:
734 if self._tr is None:
701 self._tr = self.repo.transaction(self._trname)
735 self._tr = self.repo.transaction(self._trname)
702 return self._tr
736 return self._tr
703
737
704 def closetransaction(self):
738 def closetransaction(self):
705 """close transaction if created"""
739 """close transaction if created"""
706 if self._tr is not None:
740 if self._tr is not None:
707 self._tr.close()
741 self._tr.close()
708
742
709 def releasetransaction(self):
743 def releasetransaction(self):
710 """release transaction if created"""
744 """release transaction if created"""
711 if self._tr is not None:
745 if self._tr is not None:
712 self._tr.release()
746 self._tr.release()
713
747
714 def pull(repo, remote, heads=None, force=False):
748 def pull(repo, remote, heads=None, force=False):
715 pullop = pulloperation(repo, remote, heads, force)
749 pullop = pulloperation(repo, remote, heads, force)
716 if pullop.remote.local():
750 if pullop.remote.local():
717 missing = set(pullop.remote.requirements) - pullop.repo.supported
751 missing = set(pullop.remote.requirements) - pullop.repo.supported
718 if missing:
752 if missing:
719 msg = _("required features are not"
753 msg = _("required features are not"
720 " supported in the destination:"
754 " supported in the destination:"
721 " %s") % (', '.join(sorted(missing)))
755 " %s") % (', '.join(sorted(missing)))
722 raise util.Abort(msg)
756 raise util.Abort(msg)
723
757
724 lock = pullop.repo.lock()
758 lock = pullop.repo.lock()
725 try:
759 try:
726 _pulldiscovery(pullop)
760 _pulldiscovery(pullop)
727 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
761 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
728 and pullop.remote.capable('bundle2-exp')):
762 and pullop.remote.capable('bundle2-exp')):
729 _pullbundle2(pullop)
763 _pullbundle2(pullop)
730 if 'changegroup' in pullop.todosteps:
764 if 'changegroup' in pullop.todosteps:
731 _pullchangeset(pullop)
765 _pullchangeset(pullop)
732 if 'phases' in pullop.todosteps:
766 if 'phases' in pullop.todosteps:
733 _pullphase(pullop)
767 _pullphase(pullop)
734 if 'obsmarkers' in pullop.todosteps:
768 if 'obsmarkers' in pullop.todosteps:
735 _pullobsolete(pullop)
769 _pullobsolete(pullop)
736 pullop.closetransaction()
770 pullop.closetransaction()
737 finally:
771 finally:
738 pullop.releasetransaction()
772 pullop.releasetransaction()
739 lock.release()
773 lock.release()
740
774
741 return pullop.cgresult
775 return pullop.cgresult
742
776
743 def _pulldiscovery(pullop):
777 def _pulldiscovery(pullop):
744 """discovery phase for the pull
778 """discovery phase for the pull
745
779
746 Current handle changeset discovery only, will change handle all discovery
780 Current handle changeset discovery only, will change handle all discovery
747 at some point."""
781 at some point."""
748 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
782 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
749 pullop.remote,
783 pullop.remote,
750 heads=pullop.heads,
784 heads=pullop.heads,
751 force=pullop.force)
785 force=pullop.force)
752 pullop.common, pullop.fetch, pullop.rheads = tmp
786 pullop.common, pullop.fetch, pullop.rheads = tmp
753
787
754 def _pullbundle2(pullop):
788 def _pullbundle2(pullop):
755 """pull data using bundle2
789 """pull data using bundle2
756
790
757 For now, the only supported data are changegroup."""
791 For now, the only supported data are changegroup."""
758 remotecaps = bundle2.bundle2caps(pullop.remote)
792 remotecaps = bundle2.bundle2caps(pullop.remote)
759 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
793 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
760 # pulling changegroup
794 # pulling changegroup
761 pullop.todosteps.remove('changegroup')
795 pullop.todosteps.remove('changegroup')
762
796
763 kwargs['common'] = pullop.common
797 kwargs['common'] = pullop.common
764 kwargs['heads'] = pullop.heads or pullop.rheads
798 kwargs['heads'] = pullop.heads or pullop.rheads
765 if 'b2x:listkeys' in remotecaps:
799 if 'b2x:listkeys' in remotecaps:
766 kwargs['listkeys'] = ['phase']
800 kwargs['listkeys'] = ['phase']
767 if not pullop.fetch:
801 if not pullop.fetch:
768 pullop.repo.ui.status(_("no changes found\n"))
802 pullop.repo.ui.status(_("no changes found\n"))
769 pullop.cgresult = 0
803 pullop.cgresult = 0
770 else:
804 else:
771 if pullop.heads is None and list(pullop.common) == [nullid]:
805 if pullop.heads is None and list(pullop.common) == [nullid]:
772 pullop.repo.ui.status(_("requesting all changes\n"))
806 pullop.repo.ui.status(_("requesting all changes\n"))
773 _pullbundle2extraprepare(pullop, kwargs)
807 _pullbundle2extraprepare(pullop, kwargs)
774 if kwargs.keys() == ['format']:
808 if kwargs.keys() == ['format']:
775 return # nothing to pull
809 return # nothing to pull
776 bundle = pullop.remote.getbundle('pull', **kwargs)
810 bundle = pullop.remote.getbundle('pull', **kwargs)
777 try:
811 try:
778 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
812 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
779 except error.BundleValueError, exc:
813 except error.BundleValueError, exc:
780 raise util.Abort('missing support for %s' % exc)
814 raise util.Abort('missing support for %s' % exc)
781
815
782 if pullop.fetch:
816 if pullop.fetch:
783 assert len(op.records['changegroup']) == 1
817 assert len(op.records['changegroup']) == 1
784 pullop.cgresult = op.records['changegroup'][0]['return']
818 pullop.cgresult = op.records['changegroup'][0]['return']
785
819
786 # processing phases change
820 # processing phases change
787 for namespace, value in op.records['listkeys']:
821 for namespace, value in op.records['listkeys']:
788 if namespace == 'phases':
822 if namespace == 'phases':
789 _pullapplyphases(pullop, value)
823 _pullapplyphases(pullop, value)
790
824
791 def _pullbundle2extraprepare(pullop, kwargs):
825 def _pullbundle2extraprepare(pullop, kwargs):
792 """hook function so that extensions can extend the getbundle call"""
826 """hook function so that extensions can extend the getbundle call"""
793 pass
827 pass
794
828
795 def _pullchangeset(pullop):
829 def _pullchangeset(pullop):
796 """pull changeset from unbundle into the local repo"""
830 """pull changeset from unbundle into the local repo"""
797 # We delay the open of the transaction as late as possible so we
831 # We delay the open of the transaction as late as possible so we
798 # don't open transaction for nothing or you break future useful
832 # don't open transaction for nothing or you break future useful
799 # rollback call
833 # rollback call
800 pullop.todosteps.remove('changegroup')
834 pullop.todosteps.remove('changegroup')
801 if not pullop.fetch:
835 if not pullop.fetch:
802 pullop.repo.ui.status(_("no changes found\n"))
836 pullop.repo.ui.status(_("no changes found\n"))
803 pullop.cgresult = 0
837 pullop.cgresult = 0
804 return
838 return
805 pullop.gettransaction()
839 pullop.gettransaction()
806 if pullop.heads is None and list(pullop.common) == [nullid]:
840 if pullop.heads is None and list(pullop.common) == [nullid]:
807 pullop.repo.ui.status(_("requesting all changes\n"))
841 pullop.repo.ui.status(_("requesting all changes\n"))
808 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
842 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
809 # issue1320, avoid a race if remote changed after discovery
843 # issue1320, avoid a race if remote changed after discovery
810 pullop.heads = pullop.rheads
844 pullop.heads = pullop.rheads
811
845
812 if pullop.remote.capable('getbundle'):
846 if pullop.remote.capable('getbundle'):
813 # TODO: get bundlecaps from remote
847 # TODO: get bundlecaps from remote
814 cg = pullop.remote.getbundle('pull', common=pullop.common,
848 cg = pullop.remote.getbundle('pull', common=pullop.common,
815 heads=pullop.heads or pullop.rheads)
849 heads=pullop.heads or pullop.rheads)
816 elif pullop.heads is None:
850 elif pullop.heads is None:
817 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
851 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
818 elif not pullop.remote.capable('changegroupsubset'):
852 elif not pullop.remote.capable('changegroupsubset'):
819 raise util.Abort(_("partial pull cannot be done because "
853 raise util.Abort(_("partial pull cannot be done because "
820 "other repository doesn't support "
854 "other repository doesn't support "
821 "changegroupsubset."))
855 "changegroupsubset."))
822 else:
856 else:
823 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
857 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
824 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
858 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
825 pullop.remote.url())
859 pullop.remote.url())
826
860
827 def _pullphase(pullop):
861 def _pullphase(pullop):
828 # Get remote phases data from remote
862 # Get remote phases data from remote
829 remotephases = pullop.remote.listkeys('phases')
863 remotephases = pullop.remote.listkeys('phases')
830 _pullapplyphases(pullop, remotephases)
864 _pullapplyphases(pullop, remotephases)
831
865
832 def _pullapplyphases(pullop, remotephases):
866 def _pullapplyphases(pullop, remotephases):
833 """apply phase movement from observed remote state"""
867 """apply phase movement from observed remote state"""
834 pullop.todosteps.remove('phases')
868 pullop.todosteps.remove('phases')
835 publishing = bool(remotephases.get('publishing', False))
869 publishing = bool(remotephases.get('publishing', False))
836 if remotephases and not publishing:
870 if remotephases and not publishing:
837 # remote is new and unpublishing
871 # remote is new and unpublishing
838 pheads, _dr = phases.analyzeremotephases(pullop.repo,
872 pheads, _dr = phases.analyzeremotephases(pullop.repo,
839 pullop.pulledsubset,
873 pullop.pulledsubset,
840 remotephases)
874 remotephases)
841 dheads = pullop.pulledsubset
875 dheads = pullop.pulledsubset
842 else:
876 else:
843 # Remote is old or publishing all common changesets
877 # Remote is old or publishing all common changesets
844 # should be seen as public
878 # should be seen as public
845 pheads = pullop.pulledsubset
879 pheads = pullop.pulledsubset
846 dheads = []
880 dheads = []
847 unfi = pullop.repo.unfiltered()
881 unfi = pullop.repo.unfiltered()
848 phase = unfi._phasecache.phase
882 phase = unfi._phasecache.phase
849 rev = unfi.changelog.nodemap.get
883 rev = unfi.changelog.nodemap.get
850 public = phases.public
884 public = phases.public
851 draft = phases.draft
885 draft = phases.draft
852
886
853 # exclude changesets already public locally and update the others
887 # exclude changesets already public locally and update the others
854 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
888 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
855 if pheads:
889 if pheads:
856 tr = pullop.gettransaction()
890 tr = pullop.gettransaction()
857 phases.advanceboundary(pullop.repo, tr, public, pheads)
891 phases.advanceboundary(pullop.repo, tr, public, pheads)
858
892
859 # exclude changesets already draft locally and update the others
893 # exclude changesets already draft locally and update the others
860 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
894 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
861 if dheads:
895 if dheads:
862 tr = pullop.gettransaction()
896 tr = pullop.gettransaction()
863 phases.advanceboundary(pullop.repo, tr, draft, dheads)
897 phases.advanceboundary(pullop.repo, tr, draft, dheads)
864
898
865 def _pullobsolete(pullop):
899 def _pullobsolete(pullop):
866 """utility function to pull obsolete markers from a remote
900 """utility function to pull obsolete markers from a remote
867
901
868 The `gettransaction` is function that return the pull transaction, creating
902 The `gettransaction` is function that return the pull transaction, creating
869 one if necessary. We return the transaction to inform the calling code that
903 one if necessary. We return the transaction to inform the calling code that
870 a new transaction have been created (when applicable).
904 a new transaction have been created (when applicable).
871
905
872 Exists mostly to allow overriding for experimentation purpose"""
906 Exists mostly to allow overriding for experimentation purpose"""
873 pullop.todosteps.remove('obsmarkers')
907 pullop.todosteps.remove('obsmarkers')
874 tr = None
908 tr = None
875 if obsolete._enabled:
909 if obsolete._enabled:
876 pullop.repo.ui.debug('fetching remote obsolete markers\n')
910 pullop.repo.ui.debug('fetching remote obsolete markers\n')
877 remoteobs = pullop.remote.listkeys('obsolete')
911 remoteobs = pullop.remote.listkeys('obsolete')
878 if 'dump0' in remoteobs:
912 if 'dump0' in remoteobs:
879 tr = pullop.gettransaction()
913 tr = pullop.gettransaction()
880 for key in sorted(remoteobs, reverse=True):
914 for key in sorted(remoteobs, reverse=True):
881 if key.startswith('dump'):
915 if key.startswith('dump'):
882 data = base85.b85decode(remoteobs[key])
916 data = base85.b85decode(remoteobs[key])
883 pullop.repo.obsstore.mergemarkers(tr, data)
917 pullop.repo.obsstore.mergemarkers(tr, data)
884 pullop.repo.invalidatevolatilesets()
918 pullop.repo.invalidatevolatilesets()
885 return tr
919 return tr
886
920
887 def caps20to10(repo):
921 def caps20to10(repo):
888 """return a set with appropriate options to use bundle20 during getbundle"""
922 """return a set with appropriate options to use bundle20 during getbundle"""
889 caps = set(['HG2X'])
923 caps = set(['HG2X'])
890 capsblob = bundle2.encodecaps(repo.bundle2caps)
924 capsblob = bundle2.encodecaps(repo.bundle2caps)
891 caps.add('bundle2=' + urllib.quote(capsblob))
925 caps.add('bundle2=' + urllib.quote(capsblob))
892 return caps
926 return caps
893
927
894 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
928 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
895 **kwargs):
929 **kwargs):
896 """return a full bundle (with potentially multiple kind of parts)
930 """return a full bundle (with potentially multiple kind of parts)
897
931
898 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
932 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
899 passed. For now, the bundle can contain only changegroup, but this will
933 passed. For now, the bundle can contain only changegroup, but this will
900 changes when more part type will be available for bundle2.
934 changes when more part type will be available for bundle2.
901
935
902 This is different from changegroup.getbundle that only returns an HG10
936 This is different from changegroup.getbundle that only returns an HG10
903 changegroup bundle. They may eventually get reunited in the future when we
937 changegroup bundle. They may eventually get reunited in the future when we
904 have a clearer idea of the API we what to query different data.
938 have a clearer idea of the API we what to query different data.
905
939
906 The implementation is at a very early stage and will get massive rework
940 The implementation is at a very early stage and will get massive rework
907 when the API of bundle is refined.
941 when the API of bundle is refined.
908 """
942 """
909 cg = None
943 cg = None
910 if kwargs.get('cg', True):
944 if kwargs.get('cg', True):
911 # build changegroup bundle here.
945 # build changegroup bundle here.
912 cg = changegroup.getbundle(repo, source, heads=heads,
946 cg = changegroup.getbundle(repo, source, heads=heads,
913 common=common, bundlecaps=bundlecaps)
947 common=common, bundlecaps=bundlecaps)
914 elif 'HG2X' not in bundlecaps:
948 elif 'HG2X' not in bundlecaps:
915 raise ValueError(_('request for bundle10 must include changegroup'))
949 raise ValueError(_('request for bundle10 must include changegroup'))
916 if bundlecaps is None or 'HG2X' not in bundlecaps:
950 if bundlecaps is None or 'HG2X' not in bundlecaps:
917 if kwargs:
951 if kwargs:
918 raise ValueError(_('unsupported getbundle arguments: %s')
952 raise ValueError(_('unsupported getbundle arguments: %s')
919 % ', '.join(sorted(kwargs.keys())))
953 % ', '.join(sorted(kwargs.keys())))
920 return cg
954 return cg
921 # very crude first implementation,
955 # very crude first implementation,
922 # the bundle API will change and the generation will be done lazily.
956 # the bundle API will change and the generation will be done lazily.
923 b2caps = {}
957 b2caps = {}
924 for bcaps in bundlecaps:
958 for bcaps in bundlecaps:
925 if bcaps.startswith('bundle2='):
959 if bcaps.startswith('bundle2='):
926 blob = urllib.unquote(bcaps[len('bundle2='):])
960 blob = urllib.unquote(bcaps[len('bundle2='):])
927 b2caps.update(bundle2.decodecaps(blob))
961 b2caps.update(bundle2.decodecaps(blob))
928 bundler = bundle2.bundle20(repo.ui, b2caps)
962 bundler = bundle2.bundle20(repo.ui, b2caps)
929 if cg:
963 if cg:
930 bundler.newpart('b2x:changegroup', data=cg.getchunks())
964 bundler.newpart('b2x:changegroup', data=cg.getchunks())
931 listkeys = kwargs.get('listkeys', ())
965 listkeys = kwargs.get('listkeys', ())
932 for namespace in listkeys:
966 for namespace in listkeys:
933 part = bundler.newpart('b2x:listkeys')
967 part = bundler.newpart('b2x:listkeys')
934 part.addparam('namespace', namespace)
968 part.addparam('namespace', namespace)
935 keys = repo.listkeys(namespace).items()
969 keys = repo.listkeys(namespace).items()
936 part.data = pushkey.encodekeys(keys)
970 part.data = pushkey.encodekeys(keys)
937 _getbundleextrapart(bundler, repo, source, heads=heads, common=common,
971 _getbundleextrapart(bundler, repo, source, heads=heads, common=common,
938 bundlecaps=bundlecaps, **kwargs)
972 bundlecaps=bundlecaps, **kwargs)
939 return util.chunkbuffer(bundler.getchunks())
973 return util.chunkbuffer(bundler.getchunks())
940
974
941 def _getbundleextrapart(bundler, repo, source, heads=None, common=None,
975 def _getbundleextrapart(bundler, repo, source, heads=None, common=None,
942 bundlecaps=None, **kwargs):
976 bundlecaps=None, **kwargs):
943 """hook function to let extensions add parts to the requested bundle"""
977 """hook function to let extensions add parts to the requested bundle"""
944 pass
978 pass
945
979
946 def check_heads(repo, their_heads, context):
980 def check_heads(repo, their_heads, context):
947 """check if the heads of a repo have been modified
981 """check if the heads of a repo have been modified
948
982
949 Used by peer for unbundling.
983 Used by peer for unbundling.
950 """
984 """
951 heads = repo.heads()
985 heads = repo.heads()
952 heads_hash = util.sha1(''.join(sorted(heads))).digest()
986 heads_hash = util.sha1(''.join(sorted(heads))).digest()
953 if not (their_heads == ['force'] or their_heads == heads or
987 if not (their_heads == ['force'] or their_heads == heads or
954 their_heads == ['hashed', heads_hash]):
988 their_heads == ['hashed', heads_hash]):
955 # someone else committed/pushed/unbundled while we
989 # someone else committed/pushed/unbundled while we
956 # were transferring data
990 # were transferring data
957 raise error.PushRaced('repository changed while %s - '
991 raise error.PushRaced('repository changed while %s - '
958 'please try again' % context)
992 'please try again' % context)
959
993
960 def unbundle(repo, cg, heads, source, url):
994 def unbundle(repo, cg, heads, source, url):
961 """Apply a bundle to a repo.
995 """Apply a bundle to a repo.
962
996
963 this function makes sure the repo is locked during the application and have
997 this function makes sure the repo is locked during the application and have
964 mechanism to check that no push race occurred between the creation of the
998 mechanism to check that no push race occurred between the creation of the
965 bundle and its application.
999 bundle and its application.
966
1000
967 If the push was raced as PushRaced exception is raised."""
1001 If the push was raced as PushRaced exception is raised."""
968 r = 0
1002 r = 0
969 # need a transaction when processing a bundle2 stream
1003 # need a transaction when processing a bundle2 stream
970 tr = None
1004 tr = None
971 lock = repo.lock()
1005 lock = repo.lock()
972 try:
1006 try:
973 check_heads(repo, heads, 'uploading changes')
1007 check_heads(repo, heads, 'uploading changes')
974 # push can proceed
1008 # push can proceed
975 if util.safehasattr(cg, 'params'):
1009 if util.safehasattr(cg, 'params'):
976 try:
1010 try:
977 tr = repo.transaction('unbundle')
1011 tr = repo.transaction('unbundle')
978 tr.hookargs['bundle2-exp'] = '1'
1012 tr.hookargs['bundle2-exp'] = '1'
979 r = bundle2.processbundle(repo, cg, lambda: tr).reply
1013 r = bundle2.processbundle(repo, cg, lambda: tr).reply
980 cl = repo.unfiltered().changelog
1014 cl = repo.unfiltered().changelog
981 p = cl.writepending() and repo.root or ""
1015 p = cl.writepending() and repo.root or ""
982 repo.hook('b2x-pretransactionclose', throw=True, source=source,
1016 repo.hook('b2x-pretransactionclose', throw=True, source=source,
983 url=url, pending=p, **tr.hookargs)
1017 url=url, pending=p, **tr.hookargs)
984 tr.close()
1018 tr.close()
985 repo.hook('b2x-transactionclose', source=source, url=url,
1019 repo.hook('b2x-transactionclose', source=source, url=url,
986 **tr.hookargs)
1020 **tr.hookargs)
987 except Exception, exc:
1021 except Exception, exc:
988 exc.duringunbundle2 = True
1022 exc.duringunbundle2 = True
989 raise
1023 raise
990 else:
1024 else:
991 r = changegroup.addchangegroup(repo, cg, source, url)
1025 r = changegroup.addchangegroup(repo, cg, source, url)
992 finally:
1026 finally:
993 if tr is not None:
1027 if tr is not None:
994 tr.release()
1028 tr.release()
995 lock.release()
1029 lock.release()
996 return r
1030 return r
General Comments 0
You need to be logged in to leave comments. Login now