##// END OF EJS Templates
exchange: add a `buildobsmarkerpart` function...
Pierre-Yves David -
r22346:a76660f8 default
parent child Browse files
Show More
@@ -1,1031 +1,1045 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno, urllib
10 import errno, urllib
11 import util, scmutil, changegroup, base85, error
11 import util, scmutil, changegroup, base85, error
12 import discovery, phases, obsolete, bookmarks, bundle2, pushkey
12 import discovery, phases, obsolete, bookmarks, bundle2, pushkey
13
13
14 def readbundle(ui, fh, fname, vfs=None):
14 def readbundle(ui, fh, fname, vfs=None):
15 header = changegroup.readexactly(fh, 4)
15 header = changegroup.readexactly(fh, 4)
16
16
17 alg = None
17 alg = None
18 if not fname:
18 if not fname:
19 fname = "stream"
19 fname = "stream"
20 if not header.startswith('HG') and header.startswith('\0'):
20 if not header.startswith('HG') and header.startswith('\0'):
21 fh = changegroup.headerlessfixup(fh, header)
21 fh = changegroup.headerlessfixup(fh, header)
22 header = "HG10"
22 header = "HG10"
23 alg = 'UN'
23 alg = 'UN'
24 elif vfs:
24 elif vfs:
25 fname = vfs.join(fname)
25 fname = vfs.join(fname)
26
26
27 magic, version = header[0:2], header[2:4]
27 magic, version = header[0:2], header[2:4]
28
28
29 if magic != 'HG':
29 if magic != 'HG':
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 if version == '10':
31 if version == '10':
32 if alg is None:
32 if alg is None:
33 alg = changegroup.readexactly(fh, 2)
33 alg = changegroup.readexactly(fh, 2)
34 return changegroup.unbundle10(fh, alg)
34 return changegroup.unbundle10(fh, alg)
35 elif version == '2X':
35 elif version == '2X':
36 return bundle2.unbundle20(ui, fh, header=magic + version)
36 return bundle2.unbundle20(ui, fh, header=magic + version)
37 else:
37 else:
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39
39
40 def buildobsmarkerspart(bundler, markers):
41 """add an obsmarker part to the bundler with <markers>
42
43 No part is created if markers is empty.
44 Raises ValueError if the bundler doesn't support any known obsmarker format.
45 """
46 if markers:
47 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
48 version = obsolete.commonversion(remoteversions)
49 if version is None:
50 raise ValueError('bundler do not support common obsmarker format')
51 stream = obsolete.encodemarkers(markers, True, version=version)
52 return bundler.newpart('B2X:OBSMARKERS', data=stream)
53 return None
40
54
41 class pushoperation(object):
55 class pushoperation(object):
42 """A object that represent a single push operation
56 """A object that represent a single push operation
43
57
44 It purpose is to carry push related state and very common operation.
58 It purpose is to carry push related state and very common operation.
45
59
46 A new should be created at the beginning of each push and discarded
60 A new should be created at the beginning of each push and discarded
47 afterward.
61 afterward.
48 """
62 """
49
63
50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
64 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
51 # repo we push from
65 # repo we push from
52 self.repo = repo
66 self.repo = repo
53 self.ui = repo.ui
67 self.ui = repo.ui
54 # repo we push to
68 # repo we push to
55 self.remote = remote
69 self.remote = remote
56 # force option provided
70 # force option provided
57 self.force = force
71 self.force = force
58 # revs to be pushed (None is "all")
72 # revs to be pushed (None is "all")
59 self.revs = revs
73 self.revs = revs
60 # allow push of new branch
74 # allow push of new branch
61 self.newbranch = newbranch
75 self.newbranch = newbranch
62 # did a local lock get acquired?
76 # did a local lock get acquired?
63 self.locallocked = None
77 self.locallocked = None
64 # step already performed
78 # step already performed
65 # (used to check what steps have been already performed through bundle2)
79 # (used to check what steps have been already performed through bundle2)
66 self.stepsdone = set()
80 self.stepsdone = set()
67 # Integer version of the push result
81 # Integer version of the push result
68 # - None means nothing to push
82 # - None means nothing to push
69 # - 0 means HTTP error
83 # - 0 means HTTP error
70 # - 1 means we pushed and remote head count is unchanged *or*
84 # - 1 means we pushed and remote head count is unchanged *or*
71 # we have outgoing changesets but refused to push
85 # we have outgoing changesets but refused to push
72 # - other values as described by addchangegroup()
86 # - other values as described by addchangegroup()
73 self.ret = None
87 self.ret = None
74 # discover.outgoing object (contains common and outgoing data)
88 # discover.outgoing object (contains common and outgoing data)
75 self.outgoing = None
89 self.outgoing = None
76 # all remote heads before the push
90 # all remote heads before the push
77 self.remoteheads = None
91 self.remoteheads = None
78 # testable as a boolean indicating if any nodes are missing locally.
92 # testable as a boolean indicating if any nodes are missing locally.
79 self.incoming = None
93 self.incoming = None
80 # phases changes that must be pushed along side the changesets
94 # phases changes that must be pushed along side the changesets
81 self.outdatedphases = None
95 self.outdatedphases = None
82 # phases changes that must be pushed if changeset push fails
96 # phases changes that must be pushed if changeset push fails
83 self.fallbackoutdatedphases = None
97 self.fallbackoutdatedphases = None
84 # outgoing obsmarkers
98 # outgoing obsmarkers
85 self.outobsmarkers = set()
99 self.outobsmarkers = set()
86 # outgoing bookmarks
100 # outgoing bookmarks
87 self.outbookmarks = []
101 self.outbookmarks = []
88
102
89 @util.propertycache
103 @util.propertycache
90 def futureheads(self):
104 def futureheads(self):
91 """future remote heads if the changeset push succeeds"""
105 """future remote heads if the changeset push succeeds"""
92 return self.outgoing.missingheads
106 return self.outgoing.missingheads
93
107
94 @util.propertycache
108 @util.propertycache
95 def fallbackheads(self):
109 def fallbackheads(self):
96 """future remote heads if the changeset push fails"""
110 """future remote heads if the changeset push fails"""
97 if self.revs is None:
111 if self.revs is None:
98 # not target to push, all common are relevant
112 # not target to push, all common are relevant
99 return self.outgoing.commonheads
113 return self.outgoing.commonheads
100 unfi = self.repo.unfiltered()
114 unfi = self.repo.unfiltered()
101 # I want cheads = heads(::missingheads and ::commonheads)
115 # I want cheads = heads(::missingheads and ::commonheads)
102 # (missingheads is revs with secret changeset filtered out)
116 # (missingheads is revs with secret changeset filtered out)
103 #
117 #
104 # This can be expressed as:
118 # This can be expressed as:
105 # cheads = ( (missingheads and ::commonheads)
119 # cheads = ( (missingheads and ::commonheads)
106 # + (commonheads and ::missingheads))"
120 # + (commonheads and ::missingheads))"
107 # )
121 # )
108 #
122 #
109 # while trying to push we already computed the following:
123 # while trying to push we already computed the following:
110 # common = (::commonheads)
124 # common = (::commonheads)
111 # missing = ((commonheads::missingheads) - commonheads)
125 # missing = ((commonheads::missingheads) - commonheads)
112 #
126 #
113 # We can pick:
127 # We can pick:
114 # * missingheads part of common (::commonheads)
128 # * missingheads part of common (::commonheads)
115 common = set(self.outgoing.common)
129 common = set(self.outgoing.common)
116 nm = self.repo.changelog.nodemap
130 nm = self.repo.changelog.nodemap
117 cheads = [node for node in self.revs if nm[node] in common]
131 cheads = [node for node in self.revs if nm[node] in common]
118 # and
132 # and
119 # * commonheads parents on missing
133 # * commonheads parents on missing
120 revset = unfi.set('%ln and parents(roots(%ln))',
134 revset = unfi.set('%ln and parents(roots(%ln))',
121 self.outgoing.commonheads,
135 self.outgoing.commonheads,
122 self.outgoing.missing)
136 self.outgoing.missing)
123 cheads.extend(c.node() for c in revset)
137 cheads.extend(c.node() for c in revset)
124 return cheads
138 return cheads
125
139
126 @property
140 @property
127 def commonheads(self):
141 def commonheads(self):
128 """set of all common heads after changeset bundle push"""
142 """set of all common heads after changeset bundle push"""
129 if self.ret:
143 if self.ret:
130 return self.futureheads
144 return self.futureheads
131 else:
145 else:
132 return self.fallbackheads
146 return self.fallbackheads
133
147
134 def push(repo, remote, force=False, revs=None, newbranch=False):
148 def push(repo, remote, force=False, revs=None, newbranch=False):
135 '''Push outgoing changesets (limited by revs) from a local
149 '''Push outgoing changesets (limited by revs) from a local
136 repository to remote. Return an integer:
150 repository to remote. Return an integer:
137 - None means nothing to push
151 - None means nothing to push
138 - 0 means HTTP error
152 - 0 means HTTP error
139 - 1 means we pushed and remote head count is unchanged *or*
153 - 1 means we pushed and remote head count is unchanged *or*
140 we have outgoing changesets but refused to push
154 we have outgoing changesets but refused to push
141 - other values as described by addchangegroup()
155 - other values as described by addchangegroup()
142 '''
156 '''
143 pushop = pushoperation(repo, remote, force, revs, newbranch)
157 pushop = pushoperation(repo, remote, force, revs, newbranch)
144 if pushop.remote.local():
158 if pushop.remote.local():
145 missing = (set(pushop.repo.requirements)
159 missing = (set(pushop.repo.requirements)
146 - pushop.remote.local().supported)
160 - pushop.remote.local().supported)
147 if missing:
161 if missing:
148 msg = _("required features are not"
162 msg = _("required features are not"
149 " supported in the destination:"
163 " supported in the destination:"
150 " %s") % (', '.join(sorted(missing)))
164 " %s") % (', '.join(sorted(missing)))
151 raise util.Abort(msg)
165 raise util.Abort(msg)
152
166
153 # there are two ways to push to remote repo:
167 # there are two ways to push to remote repo:
154 #
168 #
155 # addchangegroup assumes local user can lock remote
169 # addchangegroup assumes local user can lock remote
156 # repo (local filesystem, old ssh servers).
170 # repo (local filesystem, old ssh servers).
157 #
171 #
158 # unbundle assumes local user cannot lock remote repo (new ssh
172 # unbundle assumes local user cannot lock remote repo (new ssh
159 # servers, http servers).
173 # servers, http servers).
160
174
161 if not pushop.remote.canpush():
175 if not pushop.remote.canpush():
162 raise util.Abort(_("destination does not support push"))
176 raise util.Abort(_("destination does not support push"))
163 # get local lock as we might write phase data
177 # get local lock as we might write phase data
164 locallock = None
178 locallock = None
165 try:
179 try:
166 locallock = pushop.repo.lock()
180 locallock = pushop.repo.lock()
167 pushop.locallocked = True
181 pushop.locallocked = True
168 except IOError, err:
182 except IOError, err:
169 pushop.locallocked = False
183 pushop.locallocked = False
170 if err.errno != errno.EACCES:
184 if err.errno != errno.EACCES:
171 raise
185 raise
172 # source repo cannot be locked.
186 # source repo cannot be locked.
173 # We do not abort the push, but just disable the local phase
187 # We do not abort the push, but just disable the local phase
174 # synchronisation.
188 # synchronisation.
175 msg = 'cannot lock source repository: %s\n' % err
189 msg = 'cannot lock source repository: %s\n' % err
176 pushop.ui.debug(msg)
190 pushop.ui.debug(msg)
177 try:
191 try:
178 pushop.repo.checkpush(pushop)
192 pushop.repo.checkpush(pushop)
179 lock = None
193 lock = None
180 unbundle = pushop.remote.capable('unbundle')
194 unbundle = pushop.remote.capable('unbundle')
181 if not unbundle:
195 if not unbundle:
182 lock = pushop.remote.lock()
196 lock = pushop.remote.lock()
183 try:
197 try:
184 _pushdiscovery(pushop)
198 _pushdiscovery(pushop)
185 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
199 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
186 False)
200 False)
187 and pushop.remote.capable('bundle2-exp')):
201 and pushop.remote.capable('bundle2-exp')):
188 _pushbundle2(pushop)
202 _pushbundle2(pushop)
189 _pushchangeset(pushop)
203 _pushchangeset(pushop)
190 _pushsyncphase(pushop)
204 _pushsyncphase(pushop)
191 _pushobsolete(pushop)
205 _pushobsolete(pushop)
192 _pushbookmark(pushop)
206 _pushbookmark(pushop)
193 finally:
207 finally:
194 if lock is not None:
208 if lock is not None:
195 lock.release()
209 lock.release()
196 finally:
210 finally:
197 if locallock is not None:
211 if locallock is not None:
198 locallock.release()
212 locallock.release()
199
213
200 return pushop.ret
214 return pushop.ret
201
215
202 # list of steps to perform discovery before push
216 # list of steps to perform discovery before push
203 pushdiscoveryorder = []
217 pushdiscoveryorder = []
204
218
205 # Mapping between step name and function
219 # Mapping between step name and function
206 #
220 #
207 # This exists to help extensions wrap steps if necessary
221 # This exists to help extensions wrap steps if necessary
208 pushdiscoverymapping = {}
222 pushdiscoverymapping = {}
209
223
210 def pushdiscovery(stepname):
224 def pushdiscovery(stepname):
211 """decorator for function performing discovery before push
225 """decorator for function performing discovery before push
212
226
213 The function is added to the step -> function mapping and appended to the
227 The function is added to the step -> function mapping and appended to the
214 list of steps. Beware that decorated function will be added in order (this
228 list of steps. Beware that decorated function will be added in order (this
215 may matter).
229 may matter).
216
230
217 You can only use this decorator for a new step, if you want to wrap a step
231 You can only use this decorator for a new step, if you want to wrap a step
218 from an extension, change the pushdiscovery dictionary directly."""
232 from an extension, change the pushdiscovery dictionary directly."""
219 def dec(func):
233 def dec(func):
220 assert stepname not in pushdiscoverymapping
234 assert stepname not in pushdiscoverymapping
221 pushdiscoverymapping[stepname] = func
235 pushdiscoverymapping[stepname] = func
222 pushdiscoveryorder.append(stepname)
236 pushdiscoveryorder.append(stepname)
223 return func
237 return func
224 return dec
238 return dec
225
239
226 def _pushdiscovery(pushop):
240 def _pushdiscovery(pushop):
227 """Run all discovery steps"""
241 """Run all discovery steps"""
228 for stepname in pushdiscoveryorder:
242 for stepname in pushdiscoveryorder:
229 step = pushdiscoverymapping[stepname]
243 step = pushdiscoverymapping[stepname]
230 step(pushop)
244 step(pushop)
231
245
232 @pushdiscovery('changeset')
246 @pushdiscovery('changeset')
233 def _pushdiscoverychangeset(pushop):
247 def _pushdiscoverychangeset(pushop):
234 """discover the changeset that need to be pushed"""
248 """discover the changeset that need to be pushed"""
235 unfi = pushop.repo.unfiltered()
249 unfi = pushop.repo.unfiltered()
236 fci = discovery.findcommonincoming
250 fci = discovery.findcommonincoming
237 commoninc = fci(unfi, pushop.remote, force=pushop.force)
251 commoninc = fci(unfi, pushop.remote, force=pushop.force)
238 common, inc, remoteheads = commoninc
252 common, inc, remoteheads = commoninc
239 fco = discovery.findcommonoutgoing
253 fco = discovery.findcommonoutgoing
240 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
254 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
241 commoninc=commoninc, force=pushop.force)
255 commoninc=commoninc, force=pushop.force)
242 pushop.outgoing = outgoing
256 pushop.outgoing = outgoing
243 pushop.remoteheads = remoteheads
257 pushop.remoteheads = remoteheads
244 pushop.incoming = inc
258 pushop.incoming = inc
245
259
246 @pushdiscovery('phase')
260 @pushdiscovery('phase')
247 def _pushdiscoveryphase(pushop):
261 def _pushdiscoveryphase(pushop):
248 """discover the phase that needs to be pushed
262 """discover the phase that needs to be pushed
249
263
250 (computed for both success and failure case for changesets push)"""
264 (computed for both success and failure case for changesets push)"""
251 outgoing = pushop.outgoing
265 outgoing = pushop.outgoing
252 unfi = pushop.repo.unfiltered()
266 unfi = pushop.repo.unfiltered()
253 remotephases = pushop.remote.listkeys('phases')
267 remotephases = pushop.remote.listkeys('phases')
254 publishing = remotephases.get('publishing', False)
268 publishing = remotephases.get('publishing', False)
255 ana = phases.analyzeremotephases(pushop.repo,
269 ana = phases.analyzeremotephases(pushop.repo,
256 pushop.fallbackheads,
270 pushop.fallbackheads,
257 remotephases)
271 remotephases)
258 pheads, droots = ana
272 pheads, droots = ana
259 extracond = ''
273 extracond = ''
260 if not publishing:
274 if not publishing:
261 extracond = ' and public()'
275 extracond = ' and public()'
262 revset = 'heads((%%ln::%%ln) %s)' % extracond
276 revset = 'heads((%%ln::%%ln) %s)' % extracond
263 # Get the list of all revs draft on remote by public here.
277 # Get the list of all revs draft on remote by public here.
264 # XXX Beware that revset break if droots is not strictly
278 # XXX Beware that revset break if droots is not strictly
265 # XXX root we may want to ensure it is but it is costly
279 # XXX root we may want to ensure it is but it is costly
266 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
280 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
267 if not outgoing.missing:
281 if not outgoing.missing:
268 future = fallback
282 future = fallback
269 else:
283 else:
270 # adds changeset we are going to push as draft
284 # adds changeset we are going to push as draft
271 #
285 #
272 # should not be necessary for pushblishing server, but because of an
286 # should not be necessary for pushblishing server, but because of an
273 # issue fixed in xxxxx we have to do it anyway.
287 # issue fixed in xxxxx we have to do it anyway.
274 fdroots = list(unfi.set('roots(%ln + %ln::)',
288 fdroots = list(unfi.set('roots(%ln + %ln::)',
275 outgoing.missing, droots))
289 outgoing.missing, droots))
276 fdroots = [f.node() for f in fdroots]
290 fdroots = [f.node() for f in fdroots]
277 future = list(unfi.set(revset, fdroots, pushop.futureheads))
291 future = list(unfi.set(revset, fdroots, pushop.futureheads))
278 pushop.outdatedphases = future
292 pushop.outdatedphases = future
279 pushop.fallbackoutdatedphases = fallback
293 pushop.fallbackoutdatedphases = fallback
280
294
281 @pushdiscovery('obsmarker')
295 @pushdiscovery('obsmarker')
282 def _pushdiscoveryobsmarkers(pushop):
296 def _pushdiscoveryobsmarkers(pushop):
283 if (obsolete._enabled
297 if (obsolete._enabled
284 and pushop.repo.obsstore
298 and pushop.repo.obsstore
285 and 'obsolete' in pushop.remote.listkeys('namespaces')):
299 and 'obsolete' in pushop.remote.listkeys('namespaces')):
286 pushop.outobsmarkers = pushop.repo.obsstore
300 pushop.outobsmarkers = pushop.repo.obsstore
287
301
288 @pushdiscovery('bookmarks')
302 @pushdiscovery('bookmarks')
289 def _pushdiscoverybookmarks(pushop):
303 def _pushdiscoverybookmarks(pushop):
290 ui = pushop.ui
304 ui = pushop.ui
291 repo = pushop.repo.unfiltered()
305 repo = pushop.repo.unfiltered()
292 remote = pushop.remote
306 remote = pushop.remote
293 ui.debug("checking for updated bookmarks\n")
307 ui.debug("checking for updated bookmarks\n")
294 ancestors = ()
308 ancestors = ()
295 if pushop.revs:
309 if pushop.revs:
296 revnums = map(repo.changelog.rev, pushop.revs)
310 revnums = map(repo.changelog.rev, pushop.revs)
297 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
311 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
298 remotebookmark = remote.listkeys('bookmarks')
312 remotebookmark = remote.listkeys('bookmarks')
299
313
300 comp = bookmarks.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
314 comp = bookmarks.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
301 addsrc, adddst, advsrc, advdst, diverge, differ, invalid = comp
315 addsrc, adddst, advsrc, advdst, diverge, differ, invalid = comp
302 for b, scid, dcid in advsrc:
316 for b, scid, dcid in advsrc:
303 if not ancestors or repo[scid].rev() in ancestors:
317 if not ancestors or repo[scid].rev() in ancestors:
304 pushop.outbookmarks.append((b, dcid, scid))
318 pushop.outbookmarks.append((b, dcid, scid))
305
319
306 def _pushcheckoutgoing(pushop):
320 def _pushcheckoutgoing(pushop):
307 outgoing = pushop.outgoing
321 outgoing = pushop.outgoing
308 unfi = pushop.repo.unfiltered()
322 unfi = pushop.repo.unfiltered()
309 if not outgoing.missing:
323 if not outgoing.missing:
310 # nothing to push
324 # nothing to push
311 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
325 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
312 return False
326 return False
313 # something to push
327 # something to push
314 if not pushop.force:
328 if not pushop.force:
315 # if repo.obsstore == False --> no obsolete
329 # if repo.obsstore == False --> no obsolete
316 # then, save the iteration
330 # then, save the iteration
317 if unfi.obsstore:
331 if unfi.obsstore:
318 # this message are here for 80 char limit reason
332 # this message are here for 80 char limit reason
319 mso = _("push includes obsolete changeset: %s!")
333 mso = _("push includes obsolete changeset: %s!")
320 mst = "push includes %s changeset: %s!"
334 mst = "push includes %s changeset: %s!"
321 # plain versions for i18n tool to detect them
335 # plain versions for i18n tool to detect them
322 _("push includes unstable changeset: %s!")
336 _("push includes unstable changeset: %s!")
323 _("push includes bumped changeset: %s!")
337 _("push includes bumped changeset: %s!")
324 _("push includes divergent changeset: %s!")
338 _("push includes divergent changeset: %s!")
325 # If we are to push if there is at least one
339 # If we are to push if there is at least one
326 # obsolete or unstable changeset in missing, at
340 # obsolete or unstable changeset in missing, at
327 # least one of the missinghead will be obsolete or
341 # least one of the missinghead will be obsolete or
328 # unstable. So checking heads only is ok
342 # unstable. So checking heads only is ok
329 for node in outgoing.missingheads:
343 for node in outgoing.missingheads:
330 ctx = unfi[node]
344 ctx = unfi[node]
331 if ctx.obsolete():
345 if ctx.obsolete():
332 raise util.Abort(mso % ctx)
346 raise util.Abort(mso % ctx)
333 elif ctx.troubled():
347 elif ctx.troubled():
334 raise util.Abort(_(mst)
348 raise util.Abort(_(mst)
335 % (ctx.troubles()[0],
349 % (ctx.troubles()[0],
336 ctx))
350 ctx))
337 newbm = pushop.ui.configlist('bookmarks', 'pushing')
351 newbm = pushop.ui.configlist('bookmarks', 'pushing')
338 discovery.checkheads(unfi, pushop.remote, outgoing,
352 discovery.checkheads(unfi, pushop.remote, outgoing,
339 pushop.remoteheads,
353 pushop.remoteheads,
340 pushop.newbranch,
354 pushop.newbranch,
341 bool(pushop.incoming),
355 bool(pushop.incoming),
342 newbm)
356 newbm)
343 return True
357 return True
344
358
345 # List of names of steps to perform for an outgoing bundle2, order matters.
359 # List of names of steps to perform for an outgoing bundle2, order matters.
346 b2partsgenorder = []
360 b2partsgenorder = []
347
361
348 # Mapping between step name and function
362 # Mapping between step name and function
349 #
363 #
350 # This exists to help extensions wrap steps if necessary
364 # This exists to help extensions wrap steps if necessary
351 b2partsgenmapping = {}
365 b2partsgenmapping = {}
352
366
353 def b2partsgenerator(stepname):
367 def b2partsgenerator(stepname):
354 """decorator for function generating bundle2 part
368 """decorator for function generating bundle2 part
355
369
356 The function is added to the step -> function mapping and appended to the
370 The function is added to the step -> function mapping and appended to the
357 list of steps. Beware that decorated functions will be added in order
371 list of steps. Beware that decorated functions will be added in order
358 (this may matter).
372 (this may matter).
359
373
360 You can only use this decorator for new steps, if you want to wrap a step
374 You can only use this decorator for new steps, if you want to wrap a step
361 from an extension, attack the b2partsgenmapping dictionary directly."""
375 from an extension, attack the b2partsgenmapping dictionary directly."""
362 def dec(func):
376 def dec(func):
363 assert stepname not in b2partsgenmapping
377 assert stepname not in b2partsgenmapping
364 b2partsgenmapping[stepname] = func
378 b2partsgenmapping[stepname] = func
365 b2partsgenorder.append(stepname)
379 b2partsgenorder.append(stepname)
366 return func
380 return func
367 return dec
381 return dec
368
382
369 @b2partsgenerator('changeset')
383 @b2partsgenerator('changeset')
370 def _pushb2ctx(pushop, bundler):
384 def _pushb2ctx(pushop, bundler):
371 """handle changegroup push through bundle2
385 """handle changegroup push through bundle2
372
386
373 addchangegroup result is stored in the ``pushop.ret`` attribute.
387 addchangegroup result is stored in the ``pushop.ret`` attribute.
374 """
388 """
375 if 'changesets' in pushop.stepsdone:
389 if 'changesets' in pushop.stepsdone:
376 return
390 return
377 pushop.stepsdone.add('changesets')
391 pushop.stepsdone.add('changesets')
378 # Send known heads to the server for race detection.
392 # Send known heads to the server for race detection.
379 if not _pushcheckoutgoing(pushop):
393 if not _pushcheckoutgoing(pushop):
380 return
394 return
381 pushop.repo.prepushoutgoinghooks(pushop.repo,
395 pushop.repo.prepushoutgoinghooks(pushop.repo,
382 pushop.remote,
396 pushop.remote,
383 pushop.outgoing)
397 pushop.outgoing)
384 if not pushop.force:
398 if not pushop.force:
385 bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads))
399 bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads))
386 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
400 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
387 cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks())
401 cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks())
388 def handlereply(op):
402 def handlereply(op):
389 """extract addchangroup returns from server reply"""
403 """extract addchangroup returns from server reply"""
390 cgreplies = op.records.getreplies(cgpart.id)
404 cgreplies = op.records.getreplies(cgpart.id)
391 assert len(cgreplies['changegroup']) == 1
405 assert len(cgreplies['changegroup']) == 1
392 pushop.ret = cgreplies['changegroup'][0]['return']
406 pushop.ret = cgreplies['changegroup'][0]['return']
393 return handlereply
407 return handlereply
394
408
395 @b2partsgenerator('phase')
409 @b2partsgenerator('phase')
396 def _pushb2phases(pushop, bundler):
410 def _pushb2phases(pushop, bundler):
397 """handle phase push through bundle2"""
411 """handle phase push through bundle2"""
398 if 'phases' in pushop.stepsdone:
412 if 'phases' in pushop.stepsdone:
399 return
413 return
400 b2caps = bundle2.bundle2caps(pushop.remote)
414 b2caps = bundle2.bundle2caps(pushop.remote)
401 if not 'b2x:pushkey' in b2caps:
415 if not 'b2x:pushkey' in b2caps:
402 return
416 return
403 pushop.stepsdone.add('phases')
417 pushop.stepsdone.add('phases')
404 part2node = []
418 part2node = []
405 enc = pushkey.encode
419 enc = pushkey.encode
406 for newremotehead in pushop.outdatedphases:
420 for newremotehead in pushop.outdatedphases:
407 part = bundler.newpart('b2x:pushkey')
421 part = bundler.newpart('b2x:pushkey')
408 part.addparam('namespace', enc('phases'))
422 part.addparam('namespace', enc('phases'))
409 part.addparam('key', enc(newremotehead.hex()))
423 part.addparam('key', enc(newremotehead.hex()))
410 part.addparam('old', enc(str(phases.draft)))
424 part.addparam('old', enc(str(phases.draft)))
411 part.addparam('new', enc(str(phases.public)))
425 part.addparam('new', enc(str(phases.public)))
412 part2node.append((part.id, newremotehead))
426 part2node.append((part.id, newremotehead))
413 def handlereply(op):
427 def handlereply(op):
414 for partid, node in part2node:
428 for partid, node in part2node:
415 partrep = op.records.getreplies(partid)
429 partrep = op.records.getreplies(partid)
416 results = partrep['pushkey']
430 results = partrep['pushkey']
417 assert len(results) <= 1
431 assert len(results) <= 1
418 msg = None
432 msg = None
419 if not results:
433 if not results:
420 msg = _('server ignored update of %s to public!\n') % node
434 msg = _('server ignored update of %s to public!\n') % node
421 elif not int(results[0]['return']):
435 elif not int(results[0]['return']):
422 msg = _('updating %s to public failed!\n') % node
436 msg = _('updating %s to public failed!\n') % node
423 if msg is not None:
437 if msg is not None:
424 pushop.ui.warn(msg)
438 pushop.ui.warn(msg)
425 return handlereply
439 return handlereply
426
440
427 @b2partsgenerator('bookmarks')
441 @b2partsgenerator('bookmarks')
428 def _pushb2bookmarks(pushop, bundler):
442 def _pushb2bookmarks(pushop, bundler):
429 """handle phase push through bundle2"""
443 """handle phase push through bundle2"""
430 if 'bookmarks' in pushop.stepsdone:
444 if 'bookmarks' in pushop.stepsdone:
431 return
445 return
432 b2caps = bundle2.bundle2caps(pushop.remote)
446 b2caps = bundle2.bundle2caps(pushop.remote)
433 if 'b2x:pushkey' not in b2caps:
447 if 'b2x:pushkey' not in b2caps:
434 return
448 return
435 pushop.stepsdone.add('bookmarks')
449 pushop.stepsdone.add('bookmarks')
436 part2book = []
450 part2book = []
437 enc = pushkey.encode
451 enc = pushkey.encode
438 for book, old, new in pushop.outbookmarks:
452 for book, old, new in pushop.outbookmarks:
439 part = bundler.newpart('b2x:pushkey')
453 part = bundler.newpart('b2x:pushkey')
440 part.addparam('namespace', enc('bookmarks'))
454 part.addparam('namespace', enc('bookmarks'))
441 part.addparam('key', enc(book))
455 part.addparam('key', enc(book))
442 part.addparam('old', enc(old))
456 part.addparam('old', enc(old))
443 part.addparam('new', enc(new))
457 part.addparam('new', enc(new))
444 part2book.append((part.id, book))
458 part2book.append((part.id, book))
445 def handlereply(op):
459 def handlereply(op):
446 for partid, book in part2book:
460 for partid, book in part2book:
447 partrep = op.records.getreplies(partid)
461 partrep = op.records.getreplies(partid)
448 results = partrep['pushkey']
462 results = partrep['pushkey']
449 assert len(results) <= 1
463 assert len(results) <= 1
450 if not results:
464 if not results:
451 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
465 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
452 else:
466 else:
453 ret = int(results[0]['return'])
467 ret = int(results[0]['return'])
454 if ret:
468 if ret:
455 pushop.ui.status(_("updating bookmark %s\n") % book)
469 pushop.ui.status(_("updating bookmark %s\n") % book)
456 else:
470 else:
457 pushop.ui.warn(_('updating bookmark %s failed!\n') % book)
471 pushop.ui.warn(_('updating bookmark %s failed!\n') % book)
458 return handlereply
472 return handlereply
459
473
460
474
461 def _pushbundle2(pushop):
475 def _pushbundle2(pushop):
462 """push data to the remote using bundle2
476 """push data to the remote using bundle2
463
477
464 The only currently supported type of data is changegroup but this will
478 The only currently supported type of data is changegroup but this will
465 evolve in the future."""
479 evolve in the future."""
466 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
480 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
467 # create reply capability
481 # create reply capability
468 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo))
482 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo))
469 bundler.newpart('b2x:replycaps', data=capsblob)
483 bundler.newpart('b2x:replycaps', data=capsblob)
470 replyhandlers = []
484 replyhandlers = []
471 for partgenname in b2partsgenorder:
485 for partgenname in b2partsgenorder:
472 partgen = b2partsgenmapping[partgenname]
486 partgen = b2partsgenmapping[partgenname]
473 ret = partgen(pushop, bundler)
487 ret = partgen(pushop, bundler)
474 if callable(ret):
488 if callable(ret):
475 replyhandlers.append(ret)
489 replyhandlers.append(ret)
476 # do not push if nothing to push
490 # do not push if nothing to push
477 if bundler.nbparts <= 1:
491 if bundler.nbparts <= 1:
478 return
492 return
479 stream = util.chunkbuffer(bundler.getchunks())
493 stream = util.chunkbuffer(bundler.getchunks())
480 try:
494 try:
481 reply = pushop.remote.unbundle(stream, ['force'], 'push')
495 reply = pushop.remote.unbundle(stream, ['force'], 'push')
482 except error.BundleValueError, exc:
496 except error.BundleValueError, exc:
483 raise util.Abort('missing support for %s' % exc)
497 raise util.Abort('missing support for %s' % exc)
484 try:
498 try:
485 op = bundle2.processbundle(pushop.repo, reply)
499 op = bundle2.processbundle(pushop.repo, reply)
486 except error.BundleValueError, exc:
500 except error.BundleValueError, exc:
487 raise util.Abort('missing support for %s' % exc)
501 raise util.Abort('missing support for %s' % exc)
488 for rephand in replyhandlers:
502 for rephand in replyhandlers:
489 rephand(op)
503 rephand(op)
490
504
491 def _pushchangeset(pushop):
505 def _pushchangeset(pushop):
492 """Make the actual push of changeset bundle to remote repo"""
506 """Make the actual push of changeset bundle to remote repo"""
493 if 'changesets' in pushop.stepsdone:
507 if 'changesets' in pushop.stepsdone:
494 return
508 return
495 pushop.stepsdone.add('changesets')
509 pushop.stepsdone.add('changesets')
496 if not _pushcheckoutgoing(pushop):
510 if not _pushcheckoutgoing(pushop):
497 return
511 return
498 pushop.repo.prepushoutgoinghooks(pushop.repo,
512 pushop.repo.prepushoutgoinghooks(pushop.repo,
499 pushop.remote,
513 pushop.remote,
500 pushop.outgoing)
514 pushop.outgoing)
501 outgoing = pushop.outgoing
515 outgoing = pushop.outgoing
502 unbundle = pushop.remote.capable('unbundle')
516 unbundle = pushop.remote.capable('unbundle')
503 # TODO: get bundlecaps from remote
517 # TODO: get bundlecaps from remote
504 bundlecaps = None
518 bundlecaps = None
505 # create a changegroup from local
519 # create a changegroup from local
506 if pushop.revs is None and not (outgoing.excluded
520 if pushop.revs is None and not (outgoing.excluded
507 or pushop.repo.changelog.filteredrevs):
521 or pushop.repo.changelog.filteredrevs):
508 # push everything,
522 # push everything,
509 # use the fast path, no race possible on push
523 # use the fast path, no race possible on push
510 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
524 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
511 cg = changegroup.getsubset(pushop.repo,
525 cg = changegroup.getsubset(pushop.repo,
512 outgoing,
526 outgoing,
513 bundler,
527 bundler,
514 'push',
528 'push',
515 fastpath=True)
529 fastpath=True)
516 else:
530 else:
517 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
531 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
518 bundlecaps)
532 bundlecaps)
519
533
520 # apply changegroup to remote
534 # apply changegroup to remote
521 if unbundle:
535 if unbundle:
522 # local repo finds heads on server, finds out what
536 # local repo finds heads on server, finds out what
523 # revs it must push. once revs transferred, if server
537 # revs it must push. once revs transferred, if server
524 # finds it has different heads (someone else won
538 # finds it has different heads (someone else won
525 # commit/push race), server aborts.
539 # commit/push race), server aborts.
526 if pushop.force:
540 if pushop.force:
527 remoteheads = ['force']
541 remoteheads = ['force']
528 else:
542 else:
529 remoteheads = pushop.remoteheads
543 remoteheads = pushop.remoteheads
530 # ssh: return remote's addchangegroup()
544 # ssh: return remote's addchangegroup()
531 # http: return remote's addchangegroup() or 0 for error
545 # http: return remote's addchangegroup() or 0 for error
532 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
546 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
533 pushop.repo.url())
547 pushop.repo.url())
534 else:
548 else:
535 # we return an integer indicating remote head count
549 # we return an integer indicating remote head count
536 # change
550 # change
537 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
551 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
538
552
539 def _pushsyncphase(pushop):
553 def _pushsyncphase(pushop):
540 """synchronise phase information locally and remotely"""
554 """synchronise phase information locally and remotely"""
541 cheads = pushop.commonheads
555 cheads = pushop.commonheads
542 # even when we don't push, exchanging phase data is useful
556 # even when we don't push, exchanging phase data is useful
543 remotephases = pushop.remote.listkeys('phases')
557 remotephases = pushop.remote.listkeys('phases')
544 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
558 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
545 and remotephases # server supports phases
559 and remotephases # server supports phases
546 and pushop.ret is None # nothing was pushed
560 and pushop.ret is None # nothing was pushed
547 and remotephases.get('publishing', False)):
561 and remotephases.get('publishing', False)):
548 # When:
562 # When:
549 # - this is a subrepo push
563 # - this is a subrepo push
550 # - and remote support phase
564 # - and remote support phase
551 # - and no changeset was pushed
565 # - and no changeset was pushed
552 # - and remote is publishing
566 # - and remote is publishing
553 # We may be in issue 3871 case!
567 # We may be in issue 3871 case!
554 # We drop the possible phase synchronisation done by
568 # We drop the possible phase synchronisation done by
555 # courtesy to publish changesets possibly locally draft
569 # courtesy to publish changesets possibly locally draft
556 # on the remote.
570 # on the remote.
557 remotephases = {'publishing': 'True'}
571 remotephases = {'publishing': 'True'}
558 if not remotephases: # old server or public only reply from non-publishing
572 if not remotephases: # old server or public only reply from non-publishing
559 _localphasemove(pushop, cheads)
573 _localphasemove(pushop, cheads)
560 # don't push any phase data as there is nothing to push
574 # don't push any phase data as there is nothing to push
561 else:
575 else:
562 ana = phases.analyzeremotephases(pushop.repo, cheads,
576 ana = phases.analyzeremotephases(pushop.repo, cheads,
563 remotephases)
577 remotephases)
564 pheads, droots = ana
578 pheads, droots = ana
565 ### Apply remote phase on local
579 ### Apply remote phase on local
566 if remotephases.get('publishing', False):
580 if remotephases.get('publishing', False):
567 _localphasemove(pushop, cheads)
581 _localphasemove(pushop, cheads)
568 else: # publish = False
582 else: # publish = False
569 _localphasemove(pushop, pheads)
583 _localphasemove(pushop, pheads)
570 _localphasemove(pushop, cheads, phases.draft)
584 _localphasemove(pushop, cheads, phases.draft)
571 ### Apply local phase on remote
585 ### Apply local phase on remote
572
586
573 if pushop.ret:
587 if pushop.ret:
574 if 'phases' in pushop.stepsdone:
588 if 'phases' in pushop.stepsdone:
575 # phases already pushed though bundle2
589 # phases already pushed though bundle2
576 return
590 return
577 outdated = pushop.outdatedphases
591 outdated = pushop.outdatedphases
578 else:
592 else:
579 outdated = pushop.fallbackoutdatedphases
593 outdated = pushop.fallbackoutdatedphases
580
594
581 pushop.stepsdone.add('phases')
595 pushop.stepsdone.add('phases')
582
596
583 # filter heads already turned public by the push
597 # filter heads already turned public by the push
584 outdated = [c for c in outdated if c.node() not in pheads]
598 outdated = [c for c in outdated if c.node() not in pheads]
585 b2caps = bundle2.bundle2caps(pushop.remote)
599 b2caps = bundle2.bundle2caps(pushop.remote)
586 if 'b2x:pushkey' in b2caps:
600 if 'b2x:pushkey' in b2caps:
587 # server supports bundle2, let's do a batched push through it
601 # server supports bundle2, let's do a batched push through it
588 #
602 #
589 # This will eventually be unified with the changesets bundle2 push
603 # This will eventually be unified with the changesets bundle2 push
590 bundler = bundle2.bundle20(pushop.ui, b2caps)
604 bundler = bundle2.bundle20(pushop.ui, b2caps)
591 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo))
605 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo))
592 bundler.newpart('b2x:replycaps', data=capsblob)
606 bundler.newpart('b2x:replycaps', data=capsblob)
593 part2node = []
607 part2node = []
594 enc = pushkey.encode
608 enc = pushkey.encode
595 for newremotehead in outdated:
609 for newremotehead in outdated:
596 part = bundler.newpart('b2x:pushkey')
610 part = bundler.newpart('b2x:pushkey')
597 part.addparam('namespace', enc('phases'))
611 part.addparam('namespace', enc('phases'))
598 part.addparam('key', enc(newremotehead.hex()))
612 part.addparam('key', enc(newremotehead.hex()))
599 part.addparam('old', enc(str(phases.draft)))
613 part.addparam('old', enc(str(phases.draft)))
600 part.addparam('new', enc(str(phases.public)))
614 part.addparam('new', enc(str(phases.public)))
601 part2node.append((part.id, newremotehead))
615 part2node.append((part.id, newremotehead))
602 stream = util.chunkbuffer(bundler.getchunks())
616 stream = util.chunkbuffer(bundler.getchunks())
603 try:
617 try:
604 reply = pushop.remote.unbundle(stream, ['force'], 'push')
618 reply = pushop.remote.unbundle(stream, ['force'], 'push')
605 op = bundle2.processbundle(pushop.repo, reply)
619 op = bundle2.processbundle(pushop.repo, reply)
606 except error.BundleValueError, exc:
620 except error.BundleValueError, exc:
607 raise util.Abort('missing support for %s' % exc)
621 raise util.Abort('missing support for %s' % exc)
608 for partid, node in part2node:
622 for partid, node in part2node:
609 partrep = op.records.getreplies(partid)
623 partrep = op.records.getreplies(partid)
610 results = partrep['pushkey']
624 results = partrep['pushkey']
611 assert len(results) <= 1
625 assert len(results) <= 1
612 msg = None
626 msg = None
613 if not results:
627 if not results:
614 msg = _('server ignored update of %s to public!\n') % node
628 msg = _('server ignored update of %s to public!\n') % node
615 elif not int(results[0]['return']):
629 elif not int(results[0]['return']):
616 msg = _('updating %s to public failed!\n') % node
630 msg = _('updating %s to public failed!\n') % node
617 if msg is not None:
631 if msg is not None:
618 pushop.ui.warn(msg)
632 pushop.ui.warn(msg)
619
633
620 else:
634 else:
621 # fallback to independant pushkey command
635 # fallback to independant pushkey command
622 for newremotehead in outdated:
636 for newremotehead in outdated:
623 r = pushop.remote.pushkey('phases',
637 r = pushop.remote.pushkey('phases',
624 newremotehead.hex(),
638 newremotehead.hex(),
625 str(phases.draft),
639 str(phases.draft),
626 str(phases.public))
640 str(phases.public))
627 if not r:
641 if not r:
628 pushop.ui.warn(_('updating %s to public failed!\n')
642 pushop.ui.warn(_('updating %s to public failed!\n')
629 % newremotehead)
643 % newremotehead)
630
644
631 def _localphasemove(pushop, nodes, phase=phases.public):
645 def _localphasemove(pushop, nodes, phase=phases.public):
632 """move <nodes> to <phase> in the local source repo"""
646 """move <nodes> to <phase> in the local source repo"""
633 if pushop.locallocked:
647 if pushop.locallocked:
634 tr = pushop.repo.transaction('push-phase-sync')
648 tr = pushop.repo.transaction('push-phase-sync')
635 try:
649 try:
636 phases.advanceboundary(pushop.repo, tr, phase, nodes)
650 phases.advanceboundary(pushop.repo, tr, phase, nodes)
637 tr.close()
651 tr.close()
638 finally:
652 finally:
639 tr.release()
653 tr.release()
640 else:
654 else:
641 # repo is not locked, do not change any phases!
655 # repo is not locked, do not change any phases!
642 # Informs the user that phases should have been moved when
656 # Informs the user that phases should have been moved when
643 # applicable.
657 # applicable.
644 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
658 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
645 phasestr = phases.phasenames[phase]
659 phasestr = phases.phasenames[phase]
646 if actualmoves:
660 if actualmoves:
647 pushop.ui.status(_('cannot lock source repo, skipping '
661 pushop.ui.status(_('cannot lock source repo, skipping '
648 'local %s phase update\n') % phasestr)
662 'local %s phase update\n') % phasestr)
649
663
650 def _pushobsolete(pushop):
664 def _pushobsolete(pushop):
651 """utility function to push obsolete markers to a remote"""
665 """utility function to push obsolete markers to a remote"""
652 if 'obsmarkers' in pushop.stepsdone:
666 if 'obsmarkers' in pushop.stepsdone:
653 return
667 return
654 pushop.ui.debug('try to push obsolete markers to remote\n')
668 pushop.ui.debug('try to push obsolete markers to remote\n')
655 repo = pushop.repo
669 repo = pushop.repo
656 remote = pushop.remote
670 remote = pushop.remote
657 pushop.stepsdone.add('obsmarkers')
671 pushop.stepsdone.add('obsmarkers')
658 if (pushop.outobsmarkers):
672 if (pushop.outobsmarkers):
659 rslts = []
673 rslts = []
660 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
674 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
661 for key in sorted(remotedata, reverse=True):
675 for key in sorted(remotedata, reverse=True):
662 # reverse sort to ensure we end with dump0
676 # reverse sort to ensure we end with dump0
663 data = remotedata[key]
677 data = remotedata[key]
664 rslts.append(remote.pushkey('obsolete', key, '', data))
678 rslts.append(remote.pushkey('obsolete', key, '', data))
665 if [r for r in rslts if not r]:
679 if [r for r in rslts if not r]:
666 msg = _('failed to push some obsolete markers!\n')
680 msg = _('failed to push some obsolete markers!\n')
667 repo.ui.warn(msg)
681 repo.ui.warn(msg)
668
682
669 def _pushbookmark(pushop):
683 def _pushbookmark(pushop):
670 """Update bookmark position on remote"""
684 """Update bookmark position on remote"""
671 if pushop.ret == 0 or 'bookmarks' in pushop.stepsdone:
685 if pushop.ret == 0 or 'bookmarks' in pushop.stepsdone:
672 return
686 return
673 pushop.stepsdone.add('bookmarks')
687 pushop.stepsdone.add('bookmarks')
674 ui = pushop.ui
688 ui = pushop.ui
675 remote = pushop.remote
689 remote = pushop.remote
676 for b, old, new in pushop.outbookmarks:
690 for b, old, new in pushop.outbookmarks:
677 if remote.pushkey('bookmarks', b, old, new):
691 if remote.pushkey('bookmarks', b, old, new):
678 ui.status(_("updating bookmark %s\n") % b)
692 ui.status(_("updating bookmark %s\n") % b)
679 else:
693 else:
680 ui.warn(_('updating bookmark %s failed!\n') % b)
694 ui.warn(_('updating bookmark %s failed!\n') % b)
681
695
682 class pulloperation(object):
696 class pulloperation(object):
683 """A object that represent a single pull operation
697 """A object that represent a single pull operation
684
698
685 It purpose is to carry push related state and very common operation.
699 It purpose is to carry push related state and very common operation.
686
700
687 A new should be created at the beginning of each pull and discarded
701 A new should be created at the beginning of each pull and discarded
688 afterward.
702 afterward.
689 """
703 """
690
704
691 def __init__(self, repo, remote, heads=None, force=False):
705 def __init__(self, repo, remote, heads=None, force=False):
692 # repo we pull into
706 # repo we pull into
693 self.repo = repo
707 self.repo = repo
694 # repo we pull from
708 # repo we pull from
695 self.remote = remote
709 self.remote = remote
696 # revision we try to pull (None is "all")
710 # revision we try to pull (None is "all")
697 self.heads = heads
711 self.heads = heads
698 # do we force pull?
712 # do we force pull?
699 self.force = force
713 self.force = force
700 # the name the pull transaction
714 # the name the pull transaction
701 self._trname = 'pull\n' + util.hidepassword(remote.url())
715 self._trname = 'pull\n' + util.hidepassword(remote.url())
702 # hold the transaction once created
716 # hold the transaction once created
703 self._tr = None
717 self._tr = None
704 # set of common changeset between local and remote before pull
718 # set of common changeset between local and remote before pull
705 self.common = None
719 self.common = None
706 # set of pulled head
720 # set of pulled head
707 self.rheads = None
721 self.rheads = None
708 # list of missing changeset to fetch remotely
722 # list of missing changeset to fetch remotely
709 self.fetch = None
723 self.fetch = None
710 # result of changegroup pulling (used as return code by pull)
724 # result of changegroup pulling (used as return code by pull)
711 self.cgresult = None
725 self.cgresult = None
712 # list of step remaining todo (related to future bundle2 usage)
726 # list of step remaining todo (related to future bundle2 usage)
713 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
727 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
714
728
715 @util.propertycache
729 @util.propertycache
716 def pulledsubset(self):
730 def pulledsubset(self):
717 """heads of the set of changeset target by the pull"""
731 """heads of the set of changeset target by the pull"""
718 # compute target subset
732 # compute target subset
719 if self.heads is None:
733 if self.heads is None:
720 # We pulled every thing possible
734 # We pulled every thing possible
721 # sync on everything common
735 # sync on everything common
722 c = set(self.common)
736 c = set(self.common)
723 ret = list(self.common)
737 ret = list(self.common)
724 for n in self.rheads:
738 for n in self.rheads:
725 if n not in c:
739 if n not in c:
726 ret.append(n)
740 ret.append(n)
727 return ret
741 return ret
728 else:
742 else:
729 # We pulled a specific subset
743 # We pulled a specific subset
730 # sync on this subset
744 # sync on this subset
731 return self.heads
745 return self.heads
732
746
733 def gettransaction(self):
747 def gettransaction(self):
734 """get appropriate pull transaction, creating it if needed"""
748 """get appropriate pull transaction, creating it if needed"""
735 if self._tr is None:
749 if self._tr is None:
736 self._tr = self.repo.transaction(self._trname)
750 self._tr = self.repo.transaction(self._trname)
737 return self._tr
751 return self._tr
738
752
739 def closetransaction(self):
753 def closetransaction(self):
740 """close transaction if created"""
754 """close transaction if created"""
741 if self._tr is not None:
755 if self._tr is not None:
742 self._tr.close()
756 self._tr.close()
743
757
744 def releasetransaction(self):
758 def releasetransaction(self):
745 """release transaction if created"""
759 """release transaction if created"""
746 if self._tr is not None:
760 if self._tr is not None:
747 self._tr.release()
761 self._tr.release()
748
762
749 def pull(repo, remote, heads=None, force=False):
763 def pull(repo, remote, heads=None, force=False):
750 pullop = pulloperation(repo, remote, heads, force)
764 pullop = pulloperation(repo, remote, heads, force)
751 if pullop.remote.local():
765 if pullop.remote.local():
752 missing = set(pullop.remote.requirements) - pullop.repo.supported
766 missing = set(pullop.remote.requirements) - pullop.repo.supported
753 if missing:
767 if missing:
754 msg = _("required features are not"
768 msg = _("required features are not"
755 " supported in the destination:"
769 " supported in the destination:"
756 " %s") % (', '.join(sorted(missing)))
770 " %s") % (', '.join(sorted(missing)))
757 raise util.Abort(msg)
771 raise util.Abort(msg)
758
772
759 lock = pullop.repo.lock()
773 lock = pullop.repo.lock()
760 try:
774 try:
761 _pulldiscovery(pullop)
775 _pulldiscovery(pullop)
762 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
776 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
763 and pullop.remote.capable('bundle2-exp')):
777 and pullop.remote.capable('bundle2-exp')):
764 _pullbundle2(pullop)
778 _pullbundle2(pullop)
765 if 'changegroup' in pullop.todosteps:
779 if 'changegroup' in pullop.todosteps:
766 _pullchangeset(pullop)
780 _pullchangeset(pullop)
767 if 'phases' in pullop.todosteps:
781 if 'phases' in pullop.todosteps:
768 _pullphase(pullop)
782 _pullphase(pullop)
769 if 'obsmarkers' in pullop.todosteps:
783 if 'obsmarkers' in pullop.todosteps:
770 _pullobsolete(pullop)
784 _pullobsolete(pullop)
771 pullop.closetransaction()
785 pullop.closetransaction()
772 finally:
786 finally:
773 pullop.releasetransaction()
787 pullop.releasetransaction()
774 lock.release()
788 lock.release()
775
789
776 return pullop.cgresult
790 return pullop.cgresult
777
791
778 def _pulldiscovery(pullop):
792 def _pulldiscovery(pullop):
779 """discovery phase for the pull
793 """discovery phase for the pull
780
794
781 Current handle changeset discovery only, will change handle all discovery
795 Current handle changeset discovery only, will change handle all discovery
782 at some point."""
796 at some point."""
783 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
797 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
784 pullop.remote,
798 pullop.remote,
785 heads=pullop.heads,
799 heads=pullop.heads,
786 force=pullop.force)
800 force=pullop.force)
787 pullop.common, pullop.fetch, pullop.rheads = tmp
801 pullop.common, pullop.fetch, pullop.rheads = tmp
788
802
789 def _pullbundle2(pullop):
803 def _pullbundle2(pullop):
790 """pull data using bundle2
804 """pull data using bundle2
791
805
792 For now, the only supported data are changegroup."""
806 For now, the only supported data are changegroup."""
793 remotecaps = bundle2.bundle2caps(pullop.remote)
807 remotecaps = bundle2.bundle2caps(pullop.remote)
794 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
808 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
795 # pulling changegroup
809 # pulling changegroup
796 pullop.todosteps.remove('changegroup')
810 pullop.todosteps.remove('changegroup')
797
811
798 kwargs['common'] = pullop.common
812 kwargs['common'] = pullop.common
799 kwargs['heads'] = pullop.heads or pullop.rheads
813 kwargs['heads'] = pullop.heads or pullop.rheads
800 if 'b2x:listkeys' in remotecaps:
814 if 'b2x:listkeys' in remotecaps:
801 kwargs['listkeys'] = ['phase']
815 kwargs['listkeys'] = ['phase']
802 if not pullop.fetch:
816 if not pullop.fetch:
803 pullop.repo.ui.status(_("no changes found\n"))
817 pullop.repo.ui.status(_("no changes found\n"))
804 pullop.cgresult = 0
818 pullop.cgresult = 0
805 else:
819 else:
806 if pullop.heads is None and list(pullop.common) == [nullid]:
820 if pullop.heads is None and list(pullop.common) == [nullid]:
807 pullop.repo.ui.status(_("requesting all changes\n"))
821 pullop.repo.ui.status(_("requesting all changes\n"))
808 _pullbundle2extraprepare(pullop, kwargs)
822 _pullbundle2extraprepare(pullop, kwargs)
809 if kwargs.keys() == ['format']:
823 if kwargs.keys() == ['format']:
810 return # nothing to pull
824 return # nothing to pull
811 bundle = pullop.remote.getbundle('pull', **kwargs)
825 bundle = pullop.remote.getbundle('pull', **kwargs)
812 try:
826 try:
813 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
827 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
814 except error.BundleValueError, exc:
828 except error.BundleValueError, exc:
815 raise util.Abort('missing support for %s' % exc)
829 raise util.Abort('missing support for %s' % exc)
816
830
817 if pullop.fetch:
831 if pullop.fetch:
818 assert len(op.records['changegroup']) == 1
832 assert len(op.records['changegroup']) == 1
819 pullop.cgresult = op.records['changegroup'][0]['return']
833 pullop.cgresult = op.records['changegroup'][0]['return']
820
834
821 # processing phases change
835 # processing phases change
822 for namespace, value in op.records['listkeys']:
836 for namespace, value in op.records['listkeys']:
823 if namespace == 'phases':
837 if namespace == 'phases':
824 _pullapplyphases(pullop, value)
838 _pullapplyphases(pullop, value)
825
839
826 def _pullbundle2extraprepare(pullop, kwargs):
840 def _pullbundle2extraprepare(pullop, kwargs):
827 """hook function so that extensions can extend the getbundle call"""
841 """hook function so that extensions can extend the getbundle call"""
828 pass
842 pass
829
843
830 def _pullchangeset(pullop):
844 def _pullchangeset(pullop):
831 """pull changeset from unbundle into the local repo"""
845 """pull changeset from unbundle into the local repo"""
832 # We delay the open of the transaction as late as possible so we
846 # We delay the open of the transaction as late as possible so we
833 # don't open transaction for nothing or you break future useful
847 # don't open transaction for nothing or you break future useful
834 # rollback call
848 # rollback call
835 pullop.todosteps.remove('changegroup')
849 pullop.todosteps.remove('changegroup')
836 if not pullop.fetch:
850 if not pullop.fetch:
837 pullop.repo.ui.status(_("no changes found\n"))
851 pullop.repo.ui.status(_("no changes found\n"))
838 pullop.cgresult = 0
852 pullop.cgresult = 0
839 return
853 return
840 pullop.gettransaction()
854 pullop.gettransaction()
841 if pullop.heads is None and list(pullop.common) == [nullid]:
855 if pullop.heads is None and list(pullop.common) == [nullid]:
842 pullop.repo.ui.status(_("requesting all changes\n"))
856 pullop.repo.ui.status(_("requesting all changes\n"))
843 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
857 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
844 # issue1320, avoid a race if remote changed after discovery
858 # issue1320, avoid a race if remote changed after discovery
845 pullop.heads = pullop.rheads
859 pullop.heads = pullop.rheads
846
860
847 if pullop.remote.capable('getbundle'):
861 if pullop.remote.capable('getbundle'):
848 # TODO: get bundlecaps from remote
862 # TODO: get bundlecaps from remote
849 cg = pullop.remote.getbundle('pull', common=pullop.common,
863 cg = pullop.remote.getbundle('pull', common=pullop.common,
850 heads=pullop.heads or pullop.rheads)
864 heads=pullop.heads or pullop.rheads)
851 elif pullop.heads is None:
865 elif pullop.heads is None:
852 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
866 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
853 elif not pullop.remote.capable('changegroupsubset'):
867 elif not pullop.remote.capable('changegroupsubset'):
854 raise util.Abort(_("partial pull cannot be done because "
868 raise util.Abort(_("partial pull cannot be done because "
855 "other repository doesn't support "
869 "other repository doesn't support "
856 "changegroupsubset."))
870 "changegroupsubset."))
857 else:
871 else:
858 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
872 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
859 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
873 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
860 pullop.remote.url())
874 pullop.remote.url())
861
875
862 def _pullphase(pullop):
876 def _pullphase(pullop):
863 # Get remote phases data from remote
877 # Get remote phases data from remote
864 remotephases = pullop.remote.listkeys('phases')
878 remotephases = pullop.remote.listkeys('phases')
865 _pullapplyphases(pullop, remotephases)
879 _pullapplyphases(pullop, remotephases)
866
880
867 def _pullapplyphases(pullop, remotephases):
881 def _pullapplyphases(pullop, remotephases):
868 """apply phase movement from observed remote state"""
882 """apply phase movement from observed remote state"""
869 pullop.todosteps.remove('phases')
883 pullop.todosteps.remove('phases')
870 publishing = bool(remotephases.get('publishing', False))
884 publishing = bool(remotephases.get('publishing', False))
871 if remotephases and not publishing:
885 if remotephases and not publishing:
872 # remote is new and unpublishing
886 # remote is new and unpublishing
873 pheads, _dr = phases.analyzeremotephases(pullop.repo,
887 pheads, _dr = phases.analyzeremotephases(pullop.repo,
874 pullop.pulledsubset,
888 pullop.pulledsubset,
875 remotephases)
889 remotephases)
876 dheads = pullop.pulledsubset
890 dheads = pullop.pulledsubset
877 else:
891 else:
878 # Remote is old or publishing all common changesets
892 # Remote is old or publishing all common changesets
879 # should be seen as public
893 # should be seen as public
880 pheads = pullop.pulledsubset
894 pheads = pullop.pulledsubset
881 dheads = []
895 dheads = []
882 unfi = pullop.repo.unfiltered()
896 unfi = pullop.repo.unfiltered()
883 phase = unfi._phasecache.phase
897 phase = unfi._phasecache.phase
884 rev = unfi.changelog.nodemap.get
898 rev = unfi.changelog.nodemap.get
885 public = phases.public
899 public = phases.public
886 draft = phases.draft
900 draft = phases.draft
887
901
888 # exclude changesets already public locally and update the others
902 # exclude changesets already public locally and update the others
889 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
903 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
890 if pheads:
904 if pheads:
891 tr = pullop.gettransaction()
905 tr = pullop.gettransaction()
892 phases.advanceboundary(pullop.repo, tr, public, pheads)
906 phases.advanceboundary(pullop.repo, tr, public, pheads)
893
907
894 # exclude changesets already draft locally and update the others
908 # exclude changesets already draft locally and update the others
895 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
909 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
896 if dheads:
910 if dheads:
897 tr = pullop.gettransaction()
911 tr = pullop.gettransaction()
898 phases.advanceboundary(pullop.repo, tr, draft, dheads)
912 phases.advanceboundary(pullop.repo, tr, draft, dheads)
899
913
900 def _pullobsolete(pullop):
914 def _pullobsolete(pullop):
901 """utility function to pull obsolete markers from a remote
915 """utility function to pull obsolete markers from a remote
902
916
903 The `gettransaction` is function that return the pull transaction, creating
917 The `gettransaction` is function that return the pull transaction, creating
904 one if necessary. We return the transaction to inform the calling code that
918 one if necessary. We return the transaction to inform the calling code that
905 a new transaction have been created (when applicable).
919 a new transaction have been created (when applicable).
906
920
907 Exists mostly to allow overriding for experimentation purpose"""
921 Exists mostly to allow overriding for experimentation purpose"""
908 pullop.todosteps.remove('obsmarkers')
922 pullop.todosteps.remove('obsmarkers')
909 tr = None
923 tr = None
910 if obsolete._enabled:
924 if obsolete._enabled:
911 pullop.repo.ui.debug('fetching remote obsolete markers\n')
925 pullop.repo.ui.debug('fetching remote obsolete markers\n')
912 remoteobs = pullop.remote.listkeys('obsolete')
926 remoteobs = pullop.remote.listkeys('obsolete')
913 if 'dump0' in remoteobs:
927 if 'dump0' in remoteobs:
914 tr = pullop.gettransaction()
928 tr = pullop.gettransaction()
915 for key in sorted(remoteobs, reverse=True):
929 for key in sorted(remoteobs, reverse=True):
916 if key.startswith('dump'):
930 if key.startswith('dump'):
917 data = base85.b85decode(remoteobs[key])
931 data = base85.b85decode(remoteobs[key])
918 pullop.repo.obsstore.mergemarkers(tr, data)
932 pullop.repo.obsstore.mergemarkers(tr, data)
919 pullop.repo.invalidatevolatilesets()
933 pullop.repo.invalidatevolatilesets()
920 return tr
934 return tr
921
935
922 def caps20to10(repo):
936 def caps20to10(repo):
923 """return a set with appropriate options to use bundle20 during getbundle"""
937 """return a set with appropriate options to use bundle20 during getbundle"""
924 caps = set(['HG2X'])
938 caps = set(['HG2X'])
925 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
939 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
926 caps.add('bundle2=' + urllib.quote(capsblob))
940 caps.add('bundle2=' + urllib.quote(capsblob))
927 return caps
941 return caps
928
942
929 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
943 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
930 **kwargs):
944 **kwargs):
931 """return a full bundle (with potentially multiple kind of parts)
945 """return a full bundle (with potentially multiple kind of parts)
932
946
933 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
947 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
934 passed. For now, the bundle can contain only changegroup, but this will
948 passed. For now, the bundle can contain only changegroup, but this will
935 changes when more part type will be available for bundle2.
949 changes when more part type will be available for bundle2.
936
950
937 This is different from changegroup.getbundle that only returns an HG10
951 This is different from changegroup.getbundle that only returns an HG10
938 changegroup bundle. They may eventually get reunited in the future when we
952 changegroup bundle. They may eventually get reunited in the future when we
939 have a clearer idea of the API we what to query different data.
953 have a clearer idea of the API we what to query different data.
940
954
941 The implementation is at a very early stage and will get massive rework
955 The implementation is at a very early stage and will get massive rework
942 when the API of bundle is refined.
956 when the API of bundle is refined.
943 """
957 """
944 cg = None
958 cg = None
945 if kwargs.get('cg', True):
959 if kwargs.get('cg', True):
946 # build changegroup bundle here.
960 # build changegroup bundle here.
947 cg = changegroup.getbundle(repo, source, heads=heads,
961 cg = changegroup.getbundle(repo, source, heads=heads,
948 common=common, bundlecaps=bundlecaps)
962 common=common, bundlecaps=bundlecaps)
949 elif 'HG2X' not in bundlecaps:
963 elif 'HG2X' not in bundlecaps:
950 raise ValueError(_('request for bundle10 must include changegroup'))
964 raise ValueError(_('request for bundle10 must include changegroup'))
951 if bundlecaps is None or 'HG2X' not in bundlecaps:
965 if bundlecaps is None or 'HG2X' not in bundlecaps:
952 if kwargs:
966 if kwargs:
953 raise ValueError(_('unsupported getbundle arguments: %s')
967 raise ValueError(_('unsupported getbundle arguments: %s')
954 % ', '.join(sorted(kwargs.keys())))
968 % ', '.join(sorted(kwargs.keys())))
955 return cg
969 return cg
956 # very crude first implementation,
970 # very crude first implementation,
957 # the bundle API will change and the generation will be done lazily.
971 # the bundle API will change and the generation will be done lazily.
958 b2caps = {}
972 b2caps = {}
959 for bcaps in bundlecaps:
973 for bcaps in bundlecaps:
960 if bcaps.startswith('bundle2='):
974 if bcaps.startswith('bundle2='):
961 blob = urllib.unquote(bcaps[len('bundle2='):])
975 blob = urllib.unquote(bcaps[len('bundle2='):])
962 b2caps.update(bundle2.decodecaps(blob))
976 b2caps.update(bundle2.decodecaps(blob))
963 bundler = bundle2.bundle20(repo.ui, b2caps)
977 bundler = bundle2.bundle20(repo.ui, b2caps)
964 if cg:
978 if cg:
965 bundler.newpart('b2x:changegroup', data=cg.getchunks())
979 bundler.newpart('b2x:changegroup', data=cg.getchunks())
966 listkeys = kwargs.get('listkeys', ())
980 listkeys = kwargs.get('listkeys', ())
967 for namespace in listkeys:
981 for namespace in listkeys:
968 part = bundler.newpart('b2x:listkeys')
982 part = bundler.newpart('b2x:listkeys')
969 part.addparam('namespace', namespace)
983 part.addparam('namespace', namespace)
970 keys = repo.listkeys(namespace).items()
984 keys = repo.listkeys(namespace).items()
971 part.data = pushkey.encodekeys(keys)
985 part.data = pushkey.encodekeys(keys)
972 _getbundleextrapart(bundler, repo, source, heads=heads, common=common,
986 _getbundleextrapart(bundler, repo, source, heads=heads, common=common,
973 bundlecaps=bundlecaps, **kwargs)
987 bundlecaps=bundlecaps, **kwargs)
974 return util.chunkbuffer(bundler.getchunks())
988 return util.chunkbuffer(bundler.getchunks())
975
989
976 def _getbundleextrapart(bundler, repo, source, heads=None, common=None,
990 def _getbundleextrapart(bundler, repo, source, heads=None, common=None,
977 bundlecaps=None, **kwargs):
991 bundlecaps=None, **kwargs):
978 """hook function to let extensions add parts to the requested bundle"""
992 """hook function to let extensions add parts to the requested bundle"""
979 pass
993 pass
980
994
981 def check_heads(repo, their_heads, context):
995 def check_heads(repo, their_heads, context):
982 """check if the heads of a repo have been modified
996 """check if the heads of a repo have been modified
983
997
984 Used by peer for unbundling.
998 Used by peer for unbundling.
985 """
999 """
986 heads = repo.heads()
1000 heads = repo.heads()
987 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1001 heads_hash = util.sha1(''.join(sorted(heads))).digest()
988 if not (their_heads == ['force'] or their_heads == heads or
1002 if not (their_heads == ['force'] or their_heads == heads or
989 their_heads == ['hashed', heads_hash]):
1003 their_heads == ['hashed', heads_hash]):
990 # someone else committed/pushed/unbundled while we
1004 # someone else committed/pushed/unbundled while we
991 # were transferring data
1005 # were transferring data
992 raise error.PushRaced('repository changed while %s - '
1006 raise error.PushRaced('repository changed while %s - '
993 'please try again' % context)
1007 'please try again' % context)
994
1008
995 def unbundle(repo, cg, heads, source, url):
1009 def unbundle(repo, cg, heads, source, url):
996 """Apply a bundle to a repo.
1010 """Apply a bundle to a repo.
997
1011
998 this function makes sure the repo is locked during the application and have
1012 this function makes sure the repo is locked during the application and have
999 mechanism to check that no push race occurred between the creation of the
1013 mechanism to check that no push race occurred between the creation of the
1000 bundle and its application.
1014 bundle and its application.
1001
1015
1002 If the push was raced as PushRaced exception is raised."""
1016 If the push was raced as PushRaced exception is raised."""
1003 r = 0
1017 r = 0
1004 # need a transaction when processing a bundle2 stream
1018 # need a transaction when processing a bundle2 stream
1005 tr = None
1019 tr = None
1006 lock = repo.lock()
1020 lock = repo.lock()
1007 try:
1021 try:
1008 check_heads(repo, heads, 'uploading changes')
1022 check_heads(repo, heads, 'uploading changes')
1009 # push can proceed
1023 # push can proceed
1010 if util.safehasattr(cg, 'params'):
1024 if util.safehasattr(cg, 'params'):
1011 try:
1025 try:
1012 tr = repo.transaction('unbundle')
1026 tr = repo.transaction('unbundle')
1013 tr.hookargs['bundle2-exp'] = '1'
1027 tr.hookargs['bundle2-exp'] = '1'
1014 r = bundle2.processbundle(repo, cg, lambda: tr).reply
1028 r = bundle2.processbundle(repo, cg, lambda: tr).reply
1015 cl = repo.unfiltered().changelog
1029 cl = repo.unfiltered().changelog
1016 p = cl.writepending() and repo.root or ""
1030 p = cl.writepending() and repo.root or ""
1017 repo.hook('b2x-pretransactionclose', throw=True, source=source,
1031 repo.hook('b2x-pretransactionclose', throw=True, source=source,
1018 url=url, pending=p, **tr.hookargs)
1032 url=url, pending=p, **tr.hookargs)
1019 tr.close()
1033 tr.close()
1020 repo.hook('b2x-transactionclose', source=source, url=url,
1034 repo.hook('b2x-transactionclose', source=source, url=url,
1021 **tr.hookargs)
1035 **tr.hookargs)
1022 except Exception, exc:
1036 except Exception, exc:
1023 exc.duringunbundle2 = True
1037 exc.duringunbundle2 = True
1024 raise
1038 raise
1025 else:
1039 else:
1026 r = changegroup.addchangegroup(repo, cg, source, url)
1040 r = changegroup.addchangegroup(repo, cg, source, url)
1027 finally:
1041 finally:
1028 if tr is not None:
1042 if tr is not None:
1029 tr.release()
1043 tr.release()
1030 lock.release()
1044 lock.release()
1031 return r
1045 return r
General Comments 0
You need to be logged in to leave comments. Login now