##// END OF EJS Templates
bundle2: separate bundle10 and bundle2 cases in getbundle()...
Mike Hommey -
r22542:6b180a0c default
parent child Browse files
Show More
@@ -1,1078 +1,1120 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno, urllib
10 import errno, urllib
11 import util, scmutil, changegroup, base85, error
11 import util, scmutil, changegroup, base85, error
12 import discovery, phases, obsolete, bookmarks, bundle2, pushkey
12 import discovery, phases, obsolete, bookmarks, bundle2, pushkey
13
13
14 def readbundle(ui, fh, fname, vfs=None):
14 def readbundle(ui, fh, fname, vfs=None):
15 header = changegroup.readexactly(fh, 4)
15 header = changegroup.readexactly(fh, 4)
16
16
17 alg = None
17 alg = None
18 if not fname:
18 if not fname:
19 fname = "stream"
19 fname = "stream"
20 if not header.startswith('HG') and header.startswith('\0'):
20 if not header.startswith('HG') and header.startswith('\0'):
21 fh = changegroup.headerlessfixup(fh, header)
21 fh = changegroup.headerlessfixup(fh, header)
22 header = "HG10"
22 header = "HG10"
23 alg = 'UN'
23 alg = 'UN'
24 elif vfs:
24 elif vfs:
25 fname = vfs.join(fname)
25 fname = vfs.join(fname)
26
26
27 magic, version = header[0:2], header[2:4]
27 magic, version = header[0:2], header[2:4]
28
28
29 if magic != 'HG':
29 if magic != 'HG':
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 if version == '10':
31 if version == '10':
32 if alg is None:
32 if alg is None:
33 alg = changegroup.readexactly(fh, 2)
33 alg = changegroup.readexactly(fh, 2)
34 return changegroup.cg1unpacker(fh, alg)
34 return changegroup.cg1unpacker(fh, alg)
35 elif version == '2X':
35 elif version == '2X':
36 return bundle2.unbundle20(ui, fh, header=magic + version)
36 return bundle2.unbundle20(ui, fh, header=magic + version)
37 else:
37 else:
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39
39
40 def buildobsmarkerspart(bundler, markers):
40 def buildobsmarkerspart(bundler, markers):
41 """add an obsmarker part to the bundler with <markers>
41 """add an obsmarker part to the bundler with <markers>
42
42
43 No part is created if markers is empty.
43 No part is created if markers is empty.
44 Raises ValueError if the bundler doesn't support any known obsmarker format.
44 Raises ValueError if the bundler doesn't support any known obsmarker format.
45 """
45 """
46 if markers:
46 if markers:
47 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
47 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
48 version = obsolete.commonversion(remoteversions)
48 version = obsolete.commonversion(remoteversions)
49 if version is None:
49 if version is None:
50 raise ValueError('bundler do not support common obsmarker format')
50 raise ValueError('bundler do not support common obsmarker format')
51 stream = obsolete.encodemarkers(markers, True, version=version)
51 stream = obsolete.encodemarkers(markers, True, version=version)
52 return bundler.newpart('B2X:OBSMARKERS', data=stream)
52 return bundler.newpart('B2X:OBSMARKERS', data=stream)
53 return None
53 return None
54
54
55 class pushoperation(object):
55 class pushoperation(object):
56 """A object that represent a single push operation
56 """A object that represent a single push operation
57
57
58 It purpose is to carry push related state and very common operation.
58 It purpose is to carry push related state and very common operation.
59
59
60 A new should be created at the beginning of each push and discarded
60 A new should be created at the beginning of each push and discarded
61 afterward.
61 afterward.
62 """
62 """
63
63
64 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
64 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
65 # repo we push from
65 # repo we push from
66 self.repo = repo
66 self.repo = repo
67 self.ui = repo.ui
67 self.ui = repo.ui
68 # repo we push to
68 # repo we push to
69 self.remote = remote
69 self.remote = remote
70 # force option provided
70 # force option provided
71 self.force = force
71 self.force = force
72 # revs to be pushed (None is "all")
72 # revs to be pushed (None is "all")
73 self.revs = revs
73 self.revs = revs
74 # allow push of new branch
74 # allow push of new branch
75 self.newbranch = newbranch
75 self.newbranch = newbranch
76 # did a local lock get acquired?
76 # did a local lock get acquired?
77 self.locallocked = None
77 self.locallocked = None
78 # step already performed
78 # step already performed
79 # (used to check what steps have been already performed through bundle2)
79 # (used to check what steps have been already performed through bundle2)
80 self.stepsdone = set()
80 self.stepsdone = set()
81 # Integer version of the push result
81 # Integer version of the push result
82 # - None means nothing to push
82 # - None means nothing to push
83 # - 0 means HTTP error
83 # - 0 means HTTP error
84 # - 1 means we pushed and remote head count is unchanged *or*
84 # - 1 means we pushed and remote head count is unchanged *or*
85 # we have outgoing changesets but refused to push
85 # we have outgoing changesets but refused to push
86 # - other values as described by addchangegroup()
86 # - other values as described by addchangegroup()
87 self.ret = None
87 self.ret = None
88 # discover.outgoing object (contains common and outgoing data)
88 # discover.outgoing object (contains common and outgoing data)
89 self.outgoing = None
89 self.outgoing = None
90 # all remote heads before the push
90 # all remote heads before the push
91 self.remoteheads = None
91 self.remoteheads = None
92 # testable as a boolean indicating if any nodes are missing locally.
92 # testable as a boolean indicating if any nodes are missing locally.
93 self.incoming = None
93 self.incoming = None
94 # phases changes that must be pushed along side the changesets
94 # phases changes that must be pushed along side the changesets
95 self.outdatedphases = None
95 self.outdatedphases = None
96 # phases changes that must be pushed if changeset push fails
96 # phases changes that must be pushed if changeset push fails
97 self.fallbackoutdatedphases = None
97 self.fallbackoutdatedphases = None
98 # outgoing obsmarkers
98 # outgoing obsmarkers
99 self.outobsmarkers = set()
99 self.outobsmarkers = set()
100 # outgoing bookmarks
100 # outgoing bookmarks
101 self.outbookmarks = []
101 self.outbookmarks = []
102
102
103 @util.propertycache
103 @util.propertycache
104 def futureheads(self):
104 def futureheads(self):
105 """future remote heads if the changeset push succeeds"""
105 """future remote heads if the changeset push succeeds"""
106 return self.outgoing.missingheads
106 return self.outgoing.missingheads
107
107
108 @util.propertycache
108 @util.propertycache
109 def fallbackheads(self):
109 def fallbackheads(self):
110 """future remote heads if the changeset push fails"""
110 """future remote heads if the changeset push fails"""
111 if self.revs is None:
111 if self.revs is None:
112 # not target to push, all common are relevant
112 # not target to push, all common are relevant
113 return self.outgoing.commonheads
113 return self.outgoing.commonheads
114 unfi = self.repo.unfiltered()
114 unfi = self.repo.unfiltered()
115 # I want cheads = heads(::missingheads and ::commonheads)
115 # I want cheads = heads(::missingheads and ::commonheads)
116 # (missingheads is revs with secret changeset filtered out)
116 # (missingheads is revs with secret changeset filtered out)
117 #
117 #
118 # This can be expressed as:
118 # This can be expressed as:
119 # cheads = ( (missingheads and ::commonheads)
119 # cheads = ( (missingheads and ::commonheads)
120 # + (commonheads and ::missingheads))"
120 # + (commonheads and ::missingheads))"
121 # )
121 # )
122 #
122 #
123 # while trying to push we already computed the following:
123 # while trying to push we already computed the following:
124 # common = (::commonheads)
124 # common = (::commonheads)
125 # missing = ((commonheads::missingheads) - commonheads)
125 # missing = ((commonheads::missingheads) - commonheads)
126 #
126 #
127 # We can pick:
127 # We can pick:
128 # * missingheads part of common (::commonheads)
128 # * missingheads part of common (::commonheads)
129 common = set(self.outgoing.common)
129 common = set(self.outgoing.common)
130 nm = self.repo.changelog.nodemap
130 nm = self.repo.changelog.nodemap
131 cheads = [node for node in self.revs if nm[node] in common]
131 cheads = [node for node in self.revs if nm[node] in common]
132 # and
132 # and
133 # * commonheads parents on missing
133 # * commonheads parents on missing
134 revset = unfi.set('%ln and parents(roots(%ln))',
134 revset = unfi.set('%ln and parents(roots(%ln))',
135 self.outgoing.commonheads,
135 self.outgoing.commonheads,
136 self.outgoing.missing)
136 self.outgoing.missing)
137 cheads.extend(c.node() for c in revset)
137 cheads.extend(c.node() for c in revset)
138 return cheads
138 return cheads
139
139
140 @property
140 @property
141 def commonheads(self):
141 def commonheads(self):
142 """set of all common heads after changeset bundle push"""
142 """set of all common heads after changeset bundle push"""
143 if self.ret:
143 if self.ret:
144 return self.futureheads
144 return self.futureheads
145 else:
145 else:
146 return self.fallbackheads
146 return self.fallbackheads
147
147
148 def push(repo, remote, force=False, revs=None, newbranch=False):
148 def push(repo, remote, force=False, revs=None, newbranch=False):
149 '''Push outgoing changesets (limited by revs) from a local
149 '''Push outgoing changesets (limited by revs) from a local
150 repository to remote. Return an integer:
150 repository to remote. Return an integer:
151 - None means nothing to push
151 - None means nothing to push
152 - 0 means HTTP error
152 - 0 means HTTP error
153 - 1 means we pushed and remote head count is unchanged *or*
153 - 1 means we pushed and remote head count is unchanged *or*
154 we have outgoing changesets but refused to push
154 we have outgoing changesets but refused to push
155 - other values as described by addchangegroup()
155 - other values as described by addchangegroup()
156 '''
156 '''
157 pushop = pushoperation(repo, remote, force, revs, newbranch)
157 pushop = pushoperation(repo, remote, force, revs, newbranch)
158 if pushop.remote.local():
158 if pushop.remote.local():
159 missing = (set(pushop.repo.requirements)
159 missing = (set(pushop.repo.requirements)
160 - pushop.remote.local().supported)
160 - pushop.remote.local().supported)
161 if missing:
161 if missing:
162 msg = _("required features are not"
162 msg = _("required features are not"
163 " supported in the destination:"
163 " supported in the destination:"
164 " %s") % (', '.join(sorted(missing)))
164 " %s") % (', '.join(sorted(missing)))
165 raise util.Abort(msg)
165 raise util.Abort(msg)
166
166
167 # there are two ways to push to remote repo:
167 # there are two ways to push to remote repo:
168 #
168 #
169 # addchangegroup assumes local user can lock remote
169 # addchangegroup assumes local user can lock remote
170 # repo (local filesystem, old ssh servers).
170 # repo (local filesystem, old ssh servers).
171 #
171 #
172 # unbundle assumes local user cannot lock remote repo (new ssh
172 # unbundle assumes local user cannot lock remote repo (new ssh
173 # servers, http servers).
173 # servers, http servers).
174
174
175 if not pushop.remote.canpush():
175 if not pushop.remote.canpush():
176 raise util.Abort(_("destination does not support push"))
176 raise util.Abort(_("destination does not support push"))
177 # get local lock as we might write phase data
177 # get local lock as we might write phase data
178 locallock = None
178 locallock = None
179 try:
179 try:
180 locallock = pushop.repo.lock()
180 locallock = pushop.repo.lock()
181 pushop.locallocked = True
181 pushop.locallocked = True
182 except IOError, err:
182 except IOError, err:
183 pushop.locallocked = False
183 pushop.locallocked = False
184 if err.errno != errno.EACCES:
184 if err.errno != errno.EACCES:
185 raise
185 raise
186 # source repo cannot be locked.
186 # source repo cannot be locked.
187 # We do not abort the push, but just disable the local phase
187 # We do not abort the push, but just disable the local phase
188 # synchronisation.
188 # synchronisation.
189 msg = 'cannot lock source repository: %s\n' % err
189 msg = 'cannot lock source repository: %s\n' % err
190 pushop.ui.debug(msg)
190 pushop.ui.debug(msg)
191 try:
191 try:
192 pushop.repo.checkpush(pushop)
192 pushop.repo.checkpush(pushop)
193 lock = None
193 lock = None
194 unbundle = pushop.remote.capable('unbundle')
194 unbundle = pushop.remote.capable('unbundle')
195 if not unbundle:
195 if not unbundle:
196 lock = pushop.remote.lock()
196 lock = pushop.remote.lock()
197 try:
197 try:
198 _pushdiscovery(pushop)
198 _pushdiscovery(pushop)
199 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
199 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
200 False)
200 False)
201 and pushop.remote.capable('bundle2-exp')):
201 and pushop.remote.capable('bundle2-exp')):
202 _pushbundle2(pushop)
202 _pushbundle2(pushop)
203 _pushchangeset(pushop)
203 _pushchangeset(pushop)
204 _pushsyncphase(pushop)
204 _pushsyncphase(pushop)
205 _pushobsolete(pushop)
205 _pushobsolete(pushop)
206 _pushbookmark(pushop)
206 _pushbookmark(pushop)
207 finally:
207 finally:
208 if lock is not None:
208 if lock is not None:
209 lock.release()
209 lock.release()
210 finally:
210 finally:
211 if locallock is not None:
211 if locallock is not None:
212 locallock.release()
212 locallock.release()
213
213
214 return pushop.ret
214 return pushop.ret
215
215
216 # list of steps to perform discovery before push
216 # list of steps to perform discovery before push
217 pushdiscoveryorder = []
217 pushdiscoveryorder = []
218
218
219 # Mapping between step name and function
219 # Mapping between step name and function
220 #
220 #
221 # This exists to help extensions wrap steps if necessary
221 # This exists to help extensions wrap steps if necessary
222 pushdiscoverymapping = {}
222 pushdiscoverymapping = {}
223
223
224 def pushdiscovery(stepname):
224 def pushdiscovery(stepname):
225 """decorator for function performing discovery before push
225 """decorator for function performing discovery before push
226
226
227 The function is added to the step -> function mapping and appended to the
227 The function is added to the step -> function mapping and appended to the
228 list of steps. Beware that decorated function will be added in order (this
228 list of steps. Beware that decorated function will be added in order (this
229 may matter).
229 may matter).
230
230
231 You can only use this decorator for a new step, if you want to wrap a step
231 You can only use this decorator for a new step, if you want to wrap a step
232 from an extension, change the pushdiscovery dictionary directly."""
232 from an extension, change the pushdiscovery dictionary directly."""
233 def dec(func):
233 def dec(func):
234 assert stepname not in pushdiscoverymapping
234 assert stepname not in pushdiscoverymapping
235 pushdiscoverymapping[stepname] = func
235 pushdiscoverymapping[stepname] = func
236 pushdiscoveryorder.append(stepname)
236 pushdiscoveryorder.append(stepname)
237 return func
237 return func
238 return dec
238 return dec
239
239
240 def _pushdiscovery(pushop):
240 def _pushdiscovery(pushop):
241 """Run all discovery steps"""
241 """Run all discovery steps"""
242 for stepname in pushdiscoveryorder:
242 for stepname in pushdiscoveryorder:
243 step = pushdiscoverymapping[stepname]
243 step = pushdiscoverymapping[stepname]
244 step(pushop)
244 step(pushop)
245
245
246 @pushdiscovery('changeset')
246 @pushdiscovery('changeset')
247 def _pushdiscoverychangeset(pushop):
247 def _pushdiscoverychangeset(pushop):
248 """discover the changeset that need to be pushed"""
248 """discover the changeset that need to be pushed"""
249 unfi = pushop.repo.unfiltered()
249 unfi = pushop.repo.unfiltered()
250 fci = discovery.findcommonincoming
250 fci = discovery.findcommonincoming
251 commoninc = fci(unfi, pushop.remote, force=pushop.force)
251 commoninc = fci(unfi, pushop.remote, force=pushop.force)
252 common, inc, remoteheads = commoninc
252 common, inc, remoteheads = commoninc
253 fco = discovery.findcommonoutgoing
253 fco = discovery.findcommonoutgoing
254 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
254 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
255 commoninc=commoninc, force=pushop.force)
255 commoninc=commoninc, force=pushop.force)
256 pushop.outgoing = outgoing
256 pushop.outgoing = outgoing
257 pushop.remoteheads = remoteheads
257 pushop.remoteheads = remoteheads
258 pushop.incoming = inc
258 pushop.incoming = inc
259
259
260 @pushdiscovery('phase')
260 @pushdiscovery('phase')
261 def _pushdiscoveryphase(pushop):
261 def _pushdiscoveryphase(pushop):
262 """discover the phase that needs to be pushed
262 """discover the phase that needs to be pushed
263
263
264 (computed for both success and failure case for changesets push)"""
264 (computed for both success and failure case for changesets push)"""
265 outgoing = pushop.outgoing
265 outgoing = pushop.outgoing
266 unfi = pushop.repo.unfiltered()
266 unfi = pushop.repo.unfiltered()
267 remotephases = pushop.remote.listkeys('phases')
267 remotephases = pushop.remote.listkeys('phases')
268 publishing = remotephases.get('publishing', False)
268 publishing = remotephases.get('publishing', False)
269 ana = phases.analyzeremotephases(pushop.repo,
269 ana = phases.analyzeremotephases(pushop.repo,
270 pushop.fallbackheads,
270 pushop.fallbackheads,
271 remotephases)
271 remotephases)
272 pheads, droots = ana
272 pheads, droots = ana
273 extracond = ''
273 extracond = ''
274 if not publishing:
274 if not publishing:
275 extracond = ' and public()'
275 extracond = ' and public()'
276 revset = 'heads((%%ln::%%ln) %s)' % extracond
276 revset = 'heads((%%ln::%%ln) %s)' % extracond
277 # Get the list of all revs draft on remote by public here.
277 # Get the list of all revs draft on remote by public here.
278 # XXX Beware that revset break if droots is not strictly
278 # XXX Beware that revset break if droots is not strictly
279 # XXX root we may want to ensure it is but it is costly
279 # XXX root we may want to ensure it is but it is costly
280 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
280 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
281 if not outgoing.missing:
281 if not outgoing.missing:
282 future = fallback
282 future = fallback
283 else:
283 else:
284 # adds changeset we are going to push as draft
284 # adds changeset we are going to push as draft
285 #
285 #
286 # should not be necessary for pushblishing server, but because of an
286 # should not be necessary for pushblishing server, but because of an
287 # issue fixed in xxxxx we have to do it anyway.
287 # issue fixed in xxxxx we have to do it anyway.
288 fdroots = list(unfi.set('roots(%ln + %ln::)',
288 fdroots = list(unfi.set('roots(%ln + %ln::)',
289 outgoing.missing, droots))
289 outgoing.missing, droots))
290 fdroots = [f.node() for f in fdroots]
290 fdroots = [f.node() for f in fdroots]
291 future = list(unfi.set(revset, fdroots, pushop.futureheads))
291 future = list(unfi.set(revset, fdroots, pushop.futureheads))
292 pushop.outdatedphases = future
292 pushop.outdatedphases = future
293 pushop.fallbackoutdatedphases = fallback
293 pushop.fallbackoutdatedphases = fallback
294
294
295 @pushdiscovery('obsmarker')
295 @pushdiscovery('obsmarker')
296 def _pushdiscoveryobsmarkers(pushop):
296 def _pushdiscoveryobsmarkers(pushop):
297 if (obsolete._enabled
297 if (obsolete._enabled
298 and pushop.repo.obsstore
298 and pushop.repo.obsstore
299 and 'obsolete' in pushop.remote.listkeys('namespaces')):
299 and 'obsolete' in pushop.remote.listkeys('namespaces')):
300 repo = pushop.repo
300 repo = pushop.repo
301 # very naive computation, that can be quite expensive on big repo.
301 # very naive computation, that can be quite expensive on big repo.
302 # However: evolution is currently slow on them anyway.
302 # However: evolution is currently slow on them anyway.
303 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
303 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
304 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
304 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
305
305
306 @pushdiscovery('bookmarks')
306 @pushdiscovery('bookmarks')
307 def _pushdiscoverybookmarks(pushop):
307 def _pushdiscoverybookmarks(pushop):
308 ui = pushop.ui
308 ui = pushop.ui
309 repo = pushop.repo.unfiltered()
309 repo = pushop.repo.unfiltered()
310 remote = pushop.remote
310 remote = pushop.remote
311 ui.debug("checking for updated bookmarks\n")
311 ui.debug("checking for updated bookmarks\n")
312 ancestors = ()
312 ancestors = ()
313 if pushop.revs:
313 if pushop.revs:
314 revnums = map(repo.changelog.rev, pushop.revs)
314 revnums = map(repo.changelog.rev, pushop.revs)
315 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
315 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
316 remotebookmark = remote.listkeys('bookmarks')
316 remotebookmark = remote.listkeys('bookmarks')
317
317
318 comp = bookmarks.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
318 comp = bookmarks.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
319 addsrc, adddst, advsrc, advdst, diverge, differ, invalid = comp
319 addsrc, adddst, advsrc, advdst, diverge, differ, invalid = comp
320 for b, scid, dcid in advsrc:
320 for b, scid, dcid in advsrc:
321 if not ancestors or repo[scid].rev() in ancestors:
321 if not ancestors or repo[scid].rev() in ancestors:
322 pushop.outbookmarks.append((b, dcid, scid))
322 pushop.outbookmarks.append((b, dcid, scid))
323
323
324 def _pushcheckoutgoing(pushop):
324 def _pushcheckoutgoing(pushop):
325 outgoing = pushop.outgoing
325 outgoing = pushop.outgoing
326 unfi = pushop.repo.unfiltered()
326 unfi = pushop.repo.unfiltered()
327 if not outgoing.missing:
327 if not outgoing.missing:
328 # nothing to push
328 # nothing to push
329 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
329 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
330 return False
330 return False
331 # something to push
331 # something to push
332 if not pushop.force:
332 if not pushop.force:
333 # if repo.obsstore == False --> no obsolete
333 # if repo.obsstore == False --> no obsolete
334 # then, save the iteration
334 # then, save the iteration
335 if unfi.obsstore:
335 if unfi.obsstore:
336 # this message are here for 80 char limit reason
336 # this message are here for 80 char limit reason
337 mso = _("push includes obsolete changeset: %s!")
337 mso = _("push includes obsolete changeset: %s!")
338 mst = "push includes %s changeset: %s!"
338 mst = "push includes %s changeset: %s!"
339 # plain versions for i18n tool to detect them
339 # plain versions for i18n tool to detect them
340 _("push includes unstable changeset: %s!")
340 _("push includes unstable changeset: %s!")
341 _("push includes bumped changeset: %s!")
341 _("push includes bumped changeset: %s!")
342 _("push includes divergent changeset: %s!")
342 _("push includes divergent changeset: %s!")
343 # If we are to push if there is at least one
343 # If we are to push if there is at least one
344 # obsolete or unstable changeset in missing, at
344 # obsolete or unstable changeset in missing, at
345 # least one of the missinghead will be obsolete or
345 # least one of the missinghead will be obsolete or
346 # unstable. So checking heads only is ok
346 # unstable. So checking heads only is ok
347 for node in outgoing.missingheads:
347 for node in outgoing.missingheads:
348 ctx = unfi[node]
348 ctx = unfi[node]
349 if ctx.obsolete():
349 if ctx.obsolete():
350 raise util.Abort(mso % ctx)
350 raise util.Abort(mso % ctx)
351 elif ctx.troubled():
351 elif ctx.troubled():
352 raise util.Abort(_(mst)
352 raise util.Abort(_(mst)
353 % (ctx.troubles()[0],
353 % (ctx.troubles()[0],
354 ctx))
354 ctx))
355 newbm = pushop.ui.configlist('bookmarks', 'pushing')
355 newbm = pushop.ui.configlist('bookmarks', 'pushing')
356 discovery.checkheads(unfi, pushop.remote, outgoing,
356 discovery.checkheads(unfi, pushop.remote, outgoing,
357 pushop.remoteheads,
357 pushop.remoteheads,
358 pushop.newbranch,
358 pushop.newbranch,
359 bool(pushop.incoming),
359 bool(pushop.incoming),
360 newbm)
360 newbm)
361 return True
361 return True
362
362
363 # List of names of steps to perform for an outgoing bundle2, order matters.
363 # List of names of steps to perform for an outgoing bundle2, order matters.
364 b2partsgenorder = []
364 b2partsgenorder = []
365
365
366 # Mapping between step name and function
366 # Mapping between step name and function
367 #
367 #
368 # This exists to help extensions wrap steps if necessary
368 # This exists to help extensions wrap steps if necessary
369 b2partsgenmapping = {}
369 b2partsgenmapping = {}
370
370
371 def b2partsgenerator(stepname):
371 def b2partsgenerator(stepname):
372 """decorator for function generating bundle2 part
372 """decorator for function generating bundle2 part
373
373
374 The function is added to the step -> function mapping and appended to the
374 The function is added to the step -> function mapping and appended to the
375 list of steps. Beware that decorated functions will be added in order
375 list of steps. Beware that decorated functions will be added in order
376 (this may matter).
376 (this may matter).
377
377
378 You can only use this decorator for new steps, if you want to wrap a step
378 You can only use this decorator for new steps, if you want to wrap a step
379 from an extension, attack the b2partsgenmapping dictionary directly."""
379 from an extension, attack the b2partsgenmapping dictionary directly."""
380 def dec(func):
380 def dec(func):
381 assert stepname not in b2partsgenmapping
381 assert stepname not in b2partsgenmapping
382 b2partsgenmapping[stepname] = func
382 b2partsgenmapping[stepname] = func
383 b2partsgenorder.append(stepname)
383 b2partsgenorder.append(stepname)
384 return func
384 return func
385 return dec
385 return dec
386
386
387 @b2partsgenerator('changeset')
387 @b2partsgenerator('changeset')
388 def _pushb2ctx(pushop, bundler):
388 def _pushb2ctx(pushop, bundler):
389 """handle changegroup push through bundle2
389 """handle changegroup push through bundle2
390
390
391 addchangegroup result is stored in the ``pushop.ret`` attribute.
391 addchangegroup result is stored in the ``pushop.ret`` attribute.
392 """
392 """
393 if 'changesets' in pushop.stepsdone:
393 if 'changesets' in pushop.stepsdone:
394 return
394 return
395 pushop.stepsdone.add('changesets')
395 pushop.stepsdone.add('changesets')
396 # Send known heads to the server for race detection.
396 # Send known heads to the server for race detection.
397 if not _pushcheckoutgoing(pushop):
397 if not _pushcheckoutgoing(pushop):
398 return
398 return
399 pushop.repo.prepushoutgoinghooks(pushop.repo,
399 pushop.repo.prepushoutgoinghooks(pushop.repo,
400 pushop.remote,
400 pushop.remote,
401 pushop.outgoing)
401 pushop.outgoing)
402 if not pushop.force:
402 if not pushop.force:
403 bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads))
403 bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads))
404 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', pushop.outgoing)
404 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', pushop.outgoing)
405 cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks())
405 cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks())
406 def handlereply(op):
406 def handlereply(op):
407 """extract addchangroup returns from server reply"""
407 """extract addchangroup returns from server reply"""
408 cgreplies = op.records.getreplies(cgpart.id)
408 cgreplies = op.records.getreplies(cgpart.id)
409 assert len(cgreplies['changegroup']) == 1
409 assert len(cgreplies['changegroup']) == 1
410 pushop.ret = cgreplies['changegroup'][0]['return']
410 pushop.ret = cgreplies['changegroup'][0]['return']
411 return handlereply
411 return handlereply
412
412
413 @b2partsgenerator('phase')
413 @b2partsgenerator('phase')
414 def _pushb2phases(pushop, bundler):
414 def _pushb2phases(pushop, bundler):
415 """handle phase push through bundle2"""
415 """handle phase push through bundle2"""
416 if 'phases' in pushop.stepsdone:
416 if 'phases' in pushop.stepsdone:
417 return
417 return
418 b2caps = bundle2.bundle2caps(pushop.remote)
418 b2caps = bundle2.bundle2caps(pushop.remote)
419 if not 'b2x:pushkey' in b2caps:
419 if not 'b2x:pushkey' in b2caps:
420 return
420 return
421 pushop.stepsdone.add('phases')
421 pushop.stepsdone.add('phases')
422 part2node = []
422 part2node = []
423 enc = pushkey.encode
423 enc = pushkey.encode
424 for newremotehead in pushop.outdatedphases:
424 for newremotehead in pushop.outdatedphases:
425 part = bundler.newpart('b2x:pushkey')
425 part = bundler.newpart('b2x:pushkey')
426 part.addparam('namespace', enc('phases'))
426 part.addparam('namespace', enc('phases'))
427 part.addparam('key', enc(newremotehead.hex()))
427 part.addparam('key', enc(newremotehead.hex()))
428 part.addparam('old', enc(str(phases.draft)))
428 part.addparam('old', enc(str(phases.draft)))
429 part.addparam('new', enc(str(phases.public)))
429 part.addparam('new', enc(str(phases.public)))
430 part2node.append((part.id, newremotehead))
430 part2node.append((part.id, newremotehead))
431 def handlereply(op):
431 def handlereply(op):
432 for partid, node in part2node:
432 for partid, node in part2node:
433 partrep = op.records.getreplies(partid)
433 partrep = op.records.getreplies(partid)
434 results = partrep['pushkey']
434 results = partrep['pushkey']
435 assert len(results) <= 1
435 assert len(results) <= 1
436 msg = None
436 msg = None
437 if not results:
437 if not results:
438 msg = _('server ignored update of %s to public!\n') % node
438 msg = _('server ignored update of %s to public!\n') % node
439 elif not int(results[0]['return']):
439 elif not int(results[0]['return']):
440 msg = _('updating %s to public failed!\n') % node
440 msg = _('updating %s to public failed!\n') % node
441 if msg is not None:
441 if msg is not None:
442 pushop.ui.warn(msg)
442 pushop.ui.warn(msg)
443 return handlereply
443 return handlereply
444
444
445 @b2partsgenerator('obsmarkers')
445 @b2partsgenerator('obsmarkers')
446 def _pushb2obsmarkers(pushop, bundler):
446 def _pushb2obsmarkers(pushop, bundler):
447 if 'obsmarkers' in pushop.stepsdone:
447 if 'obsmarkers' in pushop.stepsdone:
448 return
448 return
449 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
449 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
450 if obsolete.commonversion(remoteversions) is None:
450 if obsolete.commonversion(remoteversions) is None:
451 return
451 return
452 pushop.stepsdone.add('obsmarkers')
452 pushop.stepsdone.add('obsmarkers')
453 if pushop.outobsmarkers:
453 if pushop.outobsmarkers:
454 buildobsmarkerspart(bundler, pushop.outobsmarkers)
454 buildobsmarkerspart(bundler, pushop.outobsmarkers)
455
455
456 @b2partsgenerator('bookmarks')
456 @b2partsgenerator('bookmarks')
457 def _pushb2bookmarks(pushop, bundler):
457 def _pushb2bookmarks(pushop, bundler):
458 """handle phase push through bundle2"""
458 """handle phase push through bundle2"""
459 if 'bookmarks' in pushop.stepsdone:
459 if 'bookmarks' in pushop.stepsdone:
460 return
460 return
461 b2caps = bundle2.bundle2caps(pushop.remote)
461 b2caps = bundle2.bundle2caps(pushop.remote)
462 if 'b2x:pushkey' not in b2caps:
462 if 'b2x:pushkey' not in b2caps:
463 return
463 return
464 pushop.stepsdone.add('bookmarks')
464 pushop.stepsdone.add('bookmarks')
465 part2book = []
465 part2book = []
466 enc = pushkey.encode
466 enc = pushkey.encode
467 for book, old, new in pushop.outbookmarks:
467 for book, old, new in pushop.outbookmarks:
468 part = bundler.newpart('b2x:pushkey')
468 part = bundler.newpart('b2x:pushkey')
469 part.addparam('namespace', enc('bookmarks'))
469 part.addparam('namespace', enc('bookmarks'))
470 part.addparam('key', enc(book))
470 part.addparam('key', enc(book))
471 part.addparam('old', enc(old))
471 part.addparam('old', enc(old))
472 part.addparam('new', enc(new))
472 part.addparam('new', enc(new))
473 part2book.append((part.id, book))
473 part2book.append((part.id, book))
474 def handlereply(op):
474 def handlereply(op):
475 for partid, book in part2book:
475 for partid, book in part2book:
476 partrep = op.records.getreplies(partid)
476 partrep = op.records.getreplies(partid)
477 results = partrep['pushkey']
477 results = partrep['pushkey']
478 assert len(results) <= 1
478 assert len(results) <= 1
479 if not results:
479 if not results:
480 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
480 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
481 else:
481 else:
482 ret = int(results[0]['return'])
482 ret = int(results[0]['return'])
483 if ret:
483 if ret:
484 pushop.ui.status(_("updating bookmark %s\n") % book)
484 pushop.ui.status(_("updating bookmark %s\n") % book)
485 else:
485 else:
486 pushop.ui.warn(_('updating bookmark %s failed!\n') % book)
486 pushop.ui.warn(_('updating bookmark %s failed!\n') % book)
487 return handlereply
487 return handlereply
488
488
489
489
490 def _pushbundle2(pushop):
490 def _pushbundle2(pushop):
491 """push data to the remote using bundle2
491 """push data to the remote using bundle2
492
492
493 The only currently supported type of data is changegroup but this will
493 The only currently supported type of data is changegroup but this will
494 evolve in the future."""
494 evolve in the future."""
495 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
495 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
496 # create reply capability
496 # create reply capability
497 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo))
497 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo))
498 bundler.newpart('b2x:replycaps', data=capsblob)
498 bundler.newpart('b2x:replycaps', data=capsblob)
499 replyhandlers = []
499 replyhandlers = []
500 for partgenname in b2partsgenorder:
500 for partgenname in b2partsgenorder:
501 partgen = b2partsgenmapping[partgenname]
501 partgen = b2partsgenmapping[partgenname]
502 ret = partgen(pushop, bundler)
502 ret = partgen(pushop, bundler)
503 if callable(ret):
503 if callable(ret):
504 replyhandlers.append(ret)
504 replyhandlers.append(ret)
505 # do not push if nothing to push
505 # do not push if nothing to push
506 if bundler.nbparts <= 1:
506 if bundler.nbparts <= 1:
507 return
507 return
508 stream = util.chunkbuffer(bundler.getchunks())
508 stream = util.chunkbuffer(bundler.getchunks())
509 try:
509 try:
510 reply = pushop.remote.unbundle(stream, ['force'], 'push')
510 reply = pushop.remote.unbundle(stream, ['force'], 'push')
511 except error.BundleValueError, exc:
511 except error.BundleValueError, exc:
512 raise util.Abort('missing support for %s' % exc)
512 raise util.Abort('missing support for %s' % exc)
513 try:
513 try:
514 op = bundle2.processbundle(pushop.repo, reply)
514 op = bundle2.processbundle(pushop.repo, reply)
515 except error.BundleValueError, exc:
515 except error.BundleValueError, exc:
516 raise util.Abort('missing support for %s' % exc)
516 raise util.Abort('missing support for %s' % exc)
517 for rephand in replyhandlers:
517 for rephand in replyhandlers:
518 rephand(op)
518 rephand(op)
519
519
520 def _pushchangeset(pushop):
520 def _pushchangeset(pushop):
521 """Make the actual push of changeset bundle to remote repo"""
521 """Make the actual push of changeset bundle to remote repo"""
522 if 'changesets' in pushop.stepsdone:
522 if 'changesets' in pushop.stepsdone:
523 return
523 return
524 pushop.stepsdone.add('changesets')
524 pushop.stepsdone.add('changesets')
525 if not _pushcheckoutgoing(pushop):
525 if not _pushcheckoutgoing(pushop):
526 return
526 return
527 pushop.repo.prepushoutgoinghooks(pushop.repo,
527 pushop.repo.prepushoutgoinghooks(pushop.repo,
528 pushop.remote,
528 pushop.remote,
529 pushop.outgoing)
529 pushop.outgoing)
530 outgoing = pushop.outgoing
530 outgoing = pushop.outgoing
531 unbundle = pushop.remote.capable('unbundle')
531 unbundle = pushop.remote.capable('unbundle')
532 # TODO: get bundlecaps from remote
532 # TODO: get bundlecaps from remote
533 bundlecaps = None
533 bundlecaps = None
534 # create a changegroup from local
534 # create a changegroup from local
535 if pushop.revs is None and not (outgoing.excluded
535 if pushop.revs is None and not (outgoing.excluded
536 or pushop.repo.changelog.filteredrevs):
536 or pushop.repo.changelog.filteredrevs):
537 # push everything,
537 # push everything,
538 # use the fast path, no race possible on push
538 # use the fast path, no race possible on push
539 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
539 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
540 cg = changegroup.getsubset(pushop.repo,
540 cg = changegroup.getsubset(pushop.repo,
541 outgoing,
541 outgoing,
542 bundler,
542 bundler,
543 'push',
543 'push',
544 fastpath=True)
544 fastpath=True)
545 else:
545 else:
546 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
546 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
547 bundlecaps)
547 bundlecaps)
548
548
549 # apply changegroup to remote
549 # apply changegroup to remote
550 if unbundle:
550 if unbundle:
551 # local repo finds heads on server, finds out what
551 # local repo finds heads on server, finds out what
552 # revs it must push. once revs transferred, if server
552 # revs it must push. once revs transferred, if server
553 # finds it has different heads (someone else won
553 # finds it has different heads (someone else won
554 # commit/push race), server aborts.
554 # commit/push race), server aborts.
555 if pushop.force:
555 if pushop.force:
556 remoteheads = ['force']
556 remoteheads = ['force']
557 else:
557 else:
558 remoteheads = pushop.remoteheads
558 remoteheads = pushop.remoteheads
559 # ssh: return remote's addchangegroup()
559 # ssh: return remote's addchangegroup()
560 # http: return remote's addchangegroup() or 0 for error
560 # http: return remote's addchangegroup() or 0 for error
561 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
561 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
562 pushop.repo.url())
562 pushop.repo.url())
563 else:
563 else:
564 # we return an integer indicating remote head count
564 # we return an integer indicating remote head count
565 # change
565 # change
566 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
566 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
567
567
568 def _pushsyncphase(pushop):
568 def _pushsyncphase(pushop):
569 """synchronise phase information locally and remotely"""
569 """synchronise phase information locally and remotely"""
570 cheads = pushop.commonheads
570 cheads = pushop.commonheads
571 # even when we don't push, exchanging phase data is useful
571 # even when we don't push, exchanging phase data is useful
572 remotephases = pushop.remote.listkeys('phases')
572 remotephases = pushop.remote.listkeys('phases')
573 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
573 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
574 and remotephases # server supports phases
574 and remotephases # server supports phases
575 and pushop.ret is None # nothing was pushed
575 and pushop.ret is None # nothing was pushed
576 and remotephases.get('publishing', False)):
576 and remotephases.get('publishing', False)):
577 # When:
577 # When:
578 # - this is a subrepo push
578 # - this is a subrepo push
579 # - and remote support phase
579 # - and remote support phase
580 # - and no changeset was pushed
580 # - and no changeset was pushed
581 # - and remote is publishing
581 # - and remote is publishing
582 # We may be in issue 3871 case!
582 # We may be in issue 3871 case!
583 # We drop the possible phase synchronisation done by
583 # We drop the possible phase synchronisation done by
584 # courtesy to publish changesets possibly locally draft
584 # courtesy to publish changesets possibly locally draft
585 # on the remote.
585 # on the remote.
586 remotephases = {'publishing': 'True'}
586 remotephases = {'publishing': 'True'}
587 if not remotephases: # old server or public only reply from non-publishing
587 if not remotephases: # old server or public only reply from non-publishing
588 _localphasemove(pushop, cheads)
588 _localphasemove(pushop, cheads)
589 # don't push any phase data as there is nothing to push
589 # don't push any phase data as there is nothing to push
590 else:
590 else:
591 ana = phases.analyzeremotephases(pushop.repo, cheads,
591 ana = phases.analyzeremotephases(pushop.repo, cheads,
592 remotephases)
592 remotephases)
593 pheads, droots = ana
593 pheads, droots = ana
594 ### Apply remote phase on local
594 ### Apply remote phase on local
595 if remotephases.get('publishing', False):
595 if remotephases.get('publishing', False):
596 _localphasemove(pushop, cheads)
596 _localphasemove(pushop, cheads)
597 else: # publish = False
597 else: # publish = False
598 _localphasemove(pushop, pheads)
598 _localphasemove(pushop, pheads)
599 _localphasemove(pushop, cheads, phases.draft)
599 _localphasemove(pushop, cheads, phases.draft)
600 ### Apply local phase on remote
600 ### Apply local phase on remote
601
601
602 if pushop.ret:
602 if pushop.ret:
603 if 'phases' in pushop.stepsdone:
603 if 'phases' in pushop.stepsdone:
604 # phases already pushed though bundle2
604 # phases already pushed though bundle2
605 return
605 return
606 outdated = pushop.outdatedphases
606 outdated = pushop.outdatedphases
607 else:
607 else:
608 outdated = pushop.fallbackoutdatedphases
608 outdated = pushop.fallbackoutdatedphases
609
609
610 pushop.stepsdone.add('phases')
610 pushop.stepsdone.add('phases')
611
611
612 # filter heads already turned public by the push
612 # filter heads already turned public by the push
613 outdated = [c for c in outdated if c.node() not in pheads]
613 outdated = [c for c in outdated if c.node() not in pheads]
614 b2caps = bundle2.bundle2caps(pushop.remote)
614 b2caps = bundle2.bundle2caps(pushop.remote)
615 if 'b2x:pushkey' in b2caps:
615 if 'b2x:pushkey' in b2caps:
616 # server supports bundle2, let's do a batched push through it
616 # server supports bundle2, let's do a batched push through it
617 #
617 #
618 # This will eventually be unified with the changesets bundle2 push
618 # This will eventually be unified with the changesets bundle2 push
619 bundler = bundle2.bundle20(pushop.ui, b2caps)
619 bundler = bundle2.bundle20(pushop.ui, b2caps)
620 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo))
620 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo))
621 bundler.newpart('b2x:replycaps', data=capsblob)
621 bundler.newpart('b2x:replycaps', data=capsblob)
622 part2node = []
622 part2node = []
623 enc = pushkey.encode
623 enc = pushkey.encode
624 for newremotehead in outdated:
624 for newremotehead in outdated:
625 part = bundler.newpart('b2x:pushkey')
625 part = bundler.newpart('b2x:pushkey')
626 part.addparam('namespace', enc('phases'))
626 part.addparam('namespace', enc('phases'))
627 part.addparam('key', enc(newremotehead.hex()))
627 part.addparam('key', enc(newremotehead.hex()))
628 part.addparam('old', enc(str(phases.draft)))
628 part.addparam('old', enc(str(phases.draft)))
629 part.addparam('new', enc(str(phases.public)))
629 part.addparam('new', enc(str(phases.public)))
630 part2node.append((part.id, newremotehead))
630 part2node.append((part.id, newremotehead))
631 stream = util.chunkbuffer(bundler.getchunks())
631 stream = util.chunkbuffer(bundler.getchunks())
632 try:
632 try:
633 reply = pushop.remote.unbundle(stream, ['force'], 'push')
633 reply = pushop.remote.unbundle(stream, ['force'], 'push')
634 op = bundle2.processbundle(pushop.repo, reply)
634 op = bundle2.processbundle(pushop.repo, reply)
635 except error.BundleValueError, exc:
635 except error.BundleValueError, exc:
636 raise util.Abort('missing support for %s' % exc)
636 raise util.Abort('missing support for %s' % exc)
637 for partid, node in part2node:
637 for partid, node in part2node:
638 partrep = op.records.getreplies(partid)
638 partrep = op.records.getreplies(partid)
639 results = partrep['pushkey']
639 results = partrep['pushkey']
640 assert len(results) <= 1
640 assert len(results) <= 1
641 msg = None
641 msg = None
642 if not results:
642 if not results:
643 msg = _('server ignored update of %s to public!\n') % node
643 msg = _('server ignored update of %s to public!\n') % node
644 elif not int(results[0]['return']):
644 elif not int(results[0]['return']):
645 msg = _('updating %s to public failed!\n') % node
645 msg = _('updating %s to public failed!\n') % node
646 if msg is not None:
646 if msg is not None:
647 pushop.ui.warn(msg)
647 pushop.ui.warn(msg)
648
648
649 else:
649 else:
650 # fallback to independant pushkey command
650 # fallback to independant pushkey command
651 for newremotehead in outdated:
651 for newremotehead in outdated:
652 r = pushop.remote.pushkey('phases',
652 r = pushop.remote.pushkey('phases',
653 newremotehead.hex(),
653 newremotehead.hex(),
654 str(phases.draft),
654 str(phases.draft),
655 str(phases.public))
655 str(phases.public))
656 if not r:
656 if not r:
657 pushop.ui.warn(_('updating %s to public failed!\n')
657 pushop.ui.warn(_('updating %s to public failed!\n')
658 % newremotehead)
658 % newremotehead)
659
659
660 def _localphasemove(pushop, nodes, phase=phases.public):
660 def _localphasemove(pushop, nodes, phase=phases.public):
661 """move <nodes> to <phase> in the local source repo"""
661 """move <nodes> to <phase> in the local source repo"""
662 if pushop.locallocked:
662 if pushop.locallocked:
663 tr = pushop.repo.transaction('push-phase-sync')
663 tr = pushop.repo.transaction('push-phase-sync')
664 try:
664 try:
665 phases.advanceboundary(pushop.repo, tr, phase, nodes)
665 phases.advanceboundary(pushop.repo, tr, phase, nodes)
666 tr.close()
666 tr.close()
667 finally:
667 finally:
668 tr.release()
668 tr.release()
669 else:
669 else:
670 # repo is not locked, do not change any phases!
670 # repo is not locked, do not change any phases!
671 # Informs the user that phases should have been moved when
671 # Informs the user that phases should have been moved when
672 # applicable.
672 # applicable.
673 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
673 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
674 phasestr = phases.phasenames[phase]
674 phasestr = phases.phasenames[phase]
675 if actualmoves:
675 if actualmoves:
676 pushop.ui.status(_('cannot lock source repo, skipping '
676 pushop.ui.status(_('cannot lock source repo, skipping '
677 'local %s phase update\n') % phasestr)
677 'local %s phase update\n') % phasestr)
678
678
679 def _pushobsolete(pushop):
679 def _pushobsolete(pushop):
680 """utility function to push obsolete markers to a remote"""
680 """utility function to push obsolete markers to a remote"""
681 if 'obsmarkers' in pushop.stepsdone:
681 if 'obsmarkers' in pushop.stepsdone:
682 return
682 return
683 pushop.ui.debug('try to push obsolete markers to remote\n')
683 pushop.ui.debug('try to push obsolete markers to remote\n')
684 repo = pushop.repo
684 repo = pushop.repo
685 remote = pushop.remote
685 remote = pushop.remote
686 pushop.stepsdone.add('obsmarkers')
686 pushop.stepsdone.add('obsmarkers')
687 if pushop.outobsmarkers:
687 if pushop.outobsmarkers:
688 rslts = []
688 rslts = []
689 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
689 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
690 for key in sorted(remotedata, reverse=True):
690 for key in sorted(remotedata, reverse=True):
691 # reverse sort to ensure we end with dump0
691 # reverse sort to ensure we end with dump0
692 data = remotedata[key]
692 data = remotedata[key]
693 rslts.append(remote.pushkey('obsolete', key, '', data))
693 rslts.append(remote.pushkey('obsolete', key, '', data))
694 if [r for r in rslts if not r]:
694 if [r for r in rslts if not r]:
695 msg = _('failed to push some obsolete markers!\n')
695 msg = _('failed to push some obsolete markers!\n')
696 repo.ui.warn(msg)
696 repo.ui.warn(msg)
697
697
698 def _pushbookmark(pushop):
698 def _pushbookmark(pushop):
699 """Update bookmark position on remote"""
699 """Update bookmark position on remote"""
700 if pushop.ret == 0 or 'bookmarks' in pushop.stepsdone:
700 if pushop.ret == 0 or 'bookmarks' in pushop.stepsdone:
701 return
701 return
702 pushop.stepsdone.add('bookmarks')
702 pushop.stepsdone.add('bookmarks')
703 ui = pushop.ui
703 ui = pushop.ui
704 remote = pushop.remote
704 remote = pushop.remote
705 for b, old, new in pushop.outbookmarks:
705 for b, old, new in pushop.outbookmarks:
706 if remote.pushkey('bookmarks', b, old, new):
706 if remote.pushkey('bookmarks', b, old, new):
707 ui.status(_("updating bookmark %s\n") % b)
707 ui.status(_("updating bookmark %s\n") % b)
708 else:
708 else:
709 ui.warn(_('updating bookmark %s failed!\n') % b)
709 ui.warn(_('updating bookmark %s failed!\n') % b)
710
710
711 class pulloperation(object):
711 class pulloperation(object):
712 """A object that represent a single pull operation
712 """A object that represent a single pull operation
713
713
714 It purpose is to carry push related state and very common operation.
714 It purpose is to carry push related state and very common operation.
715
715
716 A new should be created at the beginning of each pull and discarded
716 A new should be created at the beginning of each pull and discarded
717 afterward.
717 afterward.
718 """
718 """
719
719
720 def __init__(self, repo, remote, heads=None, force=False):
720 def __init__(self, repo, remote, heads=None, force=False):
721 # repo we pull into
721 # repo we pull into
722 self.repo = repo
722 self.repo = repo
723 # repo we pull from
723 # repo we pull from
724 self.remote = remote
724 self.remote = remote
725 # revision we try to pull (None is "all")
725 # revision we try to pull (None is "all")
726 self.heads = heads
726 self.heads = heads
727 # do we force pull?
727 # do we force pull?
728 self.force = force
728 self.force = force
729 # the name the pull transaction
729 # the name the pull transaction
730 self._trname = 'pull\n' + util.hidepassword(remote.url())
730 self._trname = 'pull\n' + util.hidepassword(remote.url())
731 # hold the transaction once created
731 # hold the transaction once created
732 self._tr = None
732 self._tr = None
733 # set of common changeset between local and remote before pull
733 # set of common changeset between local and remote before pull
734 self.common = None
734 self.common = None
735 # set of pulled head
735 # set of pulled head
736 self.rheads = None
736 self.rheads = None
737 # list of missing changeset to fetch remotely
737 # list of missing changeset to fetch remotely
738 self.fetch = None
738 self.fetch = None
739 # result of changegroup pulling (used as return code by pull)
739 # result of changegroup pulling (used as return code by pull)
740 self.cgresult = None
740 self.cgresult = None
741 # list of step remaining todo (related to future bundle2 usage)
741 # list of step remaining todo (related to future bundle2 usage)
742 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
742 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
743
743
744 @util.propertycache
744 @util.propertycache
745 def pulledsubset(self):
745 def pulledsubset(self):
746 """heads of the set of changeset target by the pull"""
746 """heads of the set of changeset target by the pull"""
747 # compute target subset
747 # compute target subset
748 if self.heads is None:
748 if self.heads is None:
749 # We pulled every thing possible
749 # We pulled every thing possible
750 # sync on everything common
750 # sync on everything common
751 c = set(self.common)
751 c = set(self.common)
752 ret = list(self.common)
752 ret = list(self.common)
753 for n in self.rheads:
753 for n in self.rheads:
754 if n not in c:
754 if n not in c:
755 ret.append(n)
755 ret.append(n)
756 return ret
756 return ret
757 else:
757 else:
758 # We pulled a specific subset
758 # We pulled a specific subset
759 # sync on this subset
759 # sync on this subset
760 return self.heads
760 return self.heads
761
761
762 def gettransaction(self):
762 def gettransaction(self):
763 """get appropriate pull transaction, creating it if needed"""
763 """get appropriate pull transaction, creating it if needed"""
764 if self._tr is None:
764 if self._tr is None:
765 self._tr = self.repo.transaction(self._trname)
765 self._tr = self.repo.transaction(self._trname)
766 return self._tr
766 return self._tr
767
767
768 def closetransaction(self):
768 def closetransaction(self):
769 """close transaction if created"""
769 """close transaction if created"""
770 if self._tr is not None:
770 if self._tr is not None:
771 self._tr.close()
771 self._tr.close()
772
772
773 def releasetransaction(self):
773 def releasetransaction(self):
774 """release transaction if created"""
774 """release transaction if created"""
775 if self._tr is not None:
775 if self._tr is not None:
776 self._tr.release()
776 self._tr.release()
777
777
778 def pull(repo, remote, heads=None, force=False):
778 def pull(repo, remote, heads=None, force=False):
779 pullop = pulloperation(repo, remote, heads, force)
779 pullop = pulloperation(repo, remote, heads, force)
780 if pullop.remote.local():
780 if pullop.remote.local():
781 missing = set(pullop.remote.requirements) - pullop.repo.supported
781 missing = set(pullop.remote.requirements) - pullop.repo.supported
782 if missing:
782 if missing:
783 msg = _("required features are not"
783 msg = _("required features are not"
784 " supported in the destination:"
784 " supported in the destination:"
785 " %s") % (', '.join(sorted(missing)))
785 " %s") % (', '.join(sorted(missing)))
786 raise util.Abort(msg)
786 raise util.Abort(msg)
787
787
788 lock = pullop.repo.lock()
788 lock = pullop.repo.lock()
789 try:
789 try:
790 _pulldiscovery(pullop)
790 _pulldiscovery(pullop)
791 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
791 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
792 and pullop.remote.capable('bundle2-exp')):
792 and pullop.remote.capable('bundle2-exp')):
793 _pullbundle2(pullop)
793 _pullbundle2(pullop)
794 if 'changegroup' in pullop.todosteps:
794 if 'changegroup' in pullop.todosteps:
795 _pullchangeset(pullop)
795 _pullchangeset(pullop)
796 if 'phases' in pullop.todosteps:
796 if 'phases' in pullop.todosteps:
797 _pullphase(pullop)
797 _pullphase(pullop)
798 if 'obsmarkers' in pullop.todosteps:
798 if 'obsmarkers' in pullop.todosteps:
799 _pullobsolete(pullop)
799 _pullobsolete(pullop)
800 pullop.closetransaction()
800 pullop.closetransaction()
801 finally:
801 finally:
802 pullop.releasetransaction()
802 pullop.releasetransaction()
803 lock.release()
803 lock.release()
804
804
805 return pullop.cgresult
805 return pullop.cgresult
806
806
807 def _pulldiscovery(pullop):
807 def _pulldiscovery(pullop):
808 """discovery phase for the pull
808 """discovery phase for the pull
809
809
810 Current handle changeset discovery only, will change handle all discovery
810 Current handle changeset discovery only, will change handle all discovery
811 at some point."""
811 at some point."""
812 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
812 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
813 pullop.remote,
813 pullop.remote,
814 heads=pullop.heads,
814 heads=pullop.heads,
815 force=pullop.force)
815 force=pullop.force)
816 pullop.common, pullop.fetch, pullop.rheads = tmp
816 pullop.common, pullop.fetch, pullop.rheads = tmp
817
817
818 def _pullbundle2(pullop):
818 def _pullbundle2(pullop):
819 """pull data using bundle2
819 """pull data using bundle2
820
820
821 For now, the only supported data are changegroup."""
821 For now, the only supported data are changegroup."""
822 remotecaps = bundle2.bundle2caps(pullop.remote)
822 remotecaps = bundle2.bundle2caps(pullop.remote)
823 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
823 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
824 # pulling changegroup
824 # pulling changegroup
825 pullop.todosteps.remove('changegroup')
825 pullop.todosteps.remove('changegroup')
826
826
827 kwargs['common'] = pullop.common
827 kwargs['common'] = pullop.common
828 kwargs['heads'] = pullop.heads or pullop.rheads
828 kwargs['heads'] = pullop.heads or pullop.rheads
829 kwargs['cg'] = pullop.fetch
829 kwargs['cg'] = pullop.fetch
830 if 'b2x:listkeys' in remotecaps:
830 if 'b2x:listkeys' in remotecaps:
831 kwargs['listkeys'] = ['phase']
831 kwargs['listkeys'] = ['phase']
832 if not pullop.fetch:
832 if not pullop.fetch:
833 pullop.repo.ui.status(_("no changes found\n"))
833 pullop.repo.ui.status(_("no changes found\n"))
834 pullop.cgresult = 0
834 pullop.cgresult = 0
835 else:
835 else:
836 if pullop.heads is None and list(pullop.common) == [nullid]:
836 if pullop.heads is None and list(pullop.common) == [nullid]:
837 pullop.repo.ui.status(_("requesting all changes\n"))
837 pullop.repo.ui.status(_("requesting all changes\n"))
838 if obsolete._enabled:
838 if obsolete._enabled:
839 remoteversions = bundle2.obsmarkersversion(remotecaps)
839 remoteversions = bundle2.obsmarkersversion(remotecaps)
840 if obsolete.commonversion(remoteversions) is not None:
840 if obsolete.commonversion(remoteversions) is not None:
841 kwargs['obsmarkers'] = True
841 kwargs['obsmarkers'] = True
842 pullop.todosteps.remove('obsmarkers')
842 pullop.todosteps.remove('obsmarkers')
843 _pullbundle2extraprepare(pullop, kwargs)
843 _pullbundle2extraprepare(pullop, kwargs)
844 if kwargs.keys() == ['format']:
844 if kwargs.keys() == ['format']:
845 return # nothing to pull
845 return # nothing to pull
846 bundle = pullop.remote.getbundle('pull', **kwargs)
846 bundle = pullop.remote.getbundle('pull', **kwargs)
847 try:
847 try:
848 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
848 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
849 except error.BundleValueError, exc:
849 except error.BundleValueError, exc:
850 raise util.Abort('missing support for %s' % exc)
850 raise util.Abort('missing support for %s' % exc)
851
851
852 if pullop.fetch:
852 if pullop.fetch:
853 assert len(op.records['changegroup']) == 1
853 assert len(op.records['changegroup']) == 1
854 pullop.cgresult = op.records['changegroup'][0]['return']
854 pullop.cgresult = op.records['changegroup'][0]['return']
855
855
856 # processing phases change
856 # processing phases change
857 for namespace, value in op.records['listkeys']:
857 for namespace, value in op.records['listkeys']:
858 if namespace == 'phases':
858 if namespace == 'phases':
859 _pullapplyphases(pullop, value)
859 _pullapplyphases(pullop, value)
860
860
861 def _pullbundle2extraprepare(pullop, kwargs):
861 def _pullbundle2extraprepare(pullop, kwargs):
862 """hook function so that extensions can extend the getbundle call"""
862 """hook function so that extensions can extend the getbundle call"""
863 pass
863 pass
864
864
865 def _pullchangeset(pullop):
865 def _pullchangeset(pullop):
866 """pull changeset from unbundle into the local repo"""
866 """pull changeset from unbundle into the local repo"""
867 # We delay the open of the transaction as late as possible so we
867 # We delay the open of the transaction as late as possible so we
868 # don't open transaction for nothing or you break future useful
868 # don't open transaction for nothing or you break future useful
869 # rollback call
869 # rollback call
870 pullop.todosteps.remove('changegroup')
870 pullop.todosteps.remove('changegroup')
871 if not pullop.fetch:
871 if not pullop.fetch:
872 pullop.repo.ui.status(_("no changes found\n"))
872 pullop.repo.ui.status(_("no changes found\n"))
873 pullop.cgresult = 0
873 pullop.cgresult = 0
874 return
874 return
875 pullop.gettransaction()
875 pullop.gettransaction()
876 if pullop.heads is None and list(pullop.common) == [nullid]:
876 if pullop.heads is None and list(pullop.common) == [nullid]:
877 pullop.repo.ui.status(_("requesting all changes\n"))
877 pullop.repo.ui.status(_("requesting all changes\n"))
878 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
878 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
879 # issue1320, avoid a race if remote changed after discovery
879 # issue1320, avoid a race if remote changed after discovery
880 pullop.heads = pullop.rheads
880 pullop.heads = pullop.rheads
881
881
882 if pullop.remote.capable('getbundle'):
882 if pullop.remote.capable('getbundle'):
883 # TODO: get bundlecaps from remote
883 # TODO: get bundlecaps from remote
884 cg = pullop.remote.getbundle('pull', common=pullop.common,
884 cg = pullop.remote.getbundle('pull', common=pullop.common,
885 heads=pullop.heads or pullop.rheads)
885 heads=pullop.heads or pullop.rheads)
886 elif pullop.heads is None:
886 elif pullop.heads is None:
887 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
887 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
888 elif not pullop.remote.capable('changegroupsubset'):
888 elif not pullop.remote.capable('changegroupsubset'):
889 raise util.Abort(_("partial pull cannot be done because "
889 raise util.Abort(_("partial pull cannot be done because "
890 "other repository doesn't support "
890 "other repository doesn't support "
891 "changegroupsubset."))
891 "changegroupsubset."))
892 else:
892 else:
893 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
893 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
894 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
894 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
895 pullop.remote.url())
895 pullop.remote.url())
896
896
897 def _pullphase(pullop):
897 def _pullphase(pullop):
898 # Get remote phases data from remote
898 # Get remote phases data from remote
899 remotephases = pullop.remote.listkeys('phases')
899 remotephases = pullop.remote.listkeys('phases')
900 _pullapplyphases(pullop, remotephases)
900 _pullapplyphases(pullop, remotephases)
901
901
902 def _pullapplyphases(pullop, remotephases):
902 def _pullapplyphases(pullop, remotephases):
903 """apply phase movement from observed remote state"""
903 """apply phase movement from observed remote state"""
904 pullop.todosteps.remove('phases')
904 pullop.todosteps.remove('phases')
905 publishing = bool(remotephases.get('publishing', False))
905 publishing = bool(remotephases.get('publishing', False))
906 if remotephases and not publishing:
906 if remotephases and not publishing:
907 # remote is new and unpublishing
907 # remote is new and unpublishing
908 pheads, _dr = phases.analyzeremotephases(pullop.repo,
908 pheads, _dr = phases.analyzeremotephases(pullop.repo,
909 pullop.pulledsubset,
909 pullop.pulledsubset,
910 remotephases)
910 remotephases)
911 dheads = pullop.pulledsubset
911 dheads = pullop.pulledsubset
912 else:
912 else:
913 # Remote is old or publishing all common changesets
913 # Remote is old or publishing all common changesets
914 # should be seen as public
914 # should be seen as public
915 pheads = pullop.pulledsubset
915 pheads = pullop.pulledsubset
916 dheads = []
916 dheads = []
917 unfi = pullop.repo.unfiltered()
917 unfi = pullop.repo.unfiltered()
918 phase = unfi._phasecache.phase
918 phase = unfi._phasecache.phase
919 rev = unfi.changelog.nodemap.get
919 rev = unfi.changelog.nodemap.get
920 public = phases.public
920 public = phases.public
921 draft = phases.draft
921 draft = phases.draft
922
922
923 # exclude changesets already public locally and update the others
923 # exclude changesets already public locally and update the others
924 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
924 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
925 if pheads:
925 if pheads:
926 tr = pullop.gettransaction()
926 tr = pullop.gettransaction()
927 phases.advanceboundary(pullop.repo, tr, public, pheads)
927 phases.advanceboundary(pullop.repo, tr, public, pheads)
928
928
929 # exclude changesets already draft locally and update the others
929 # exclude changesets already draft locally and update the others
930 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
930 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
931 if dheads:
931 if dheads:
932 tr = pullop.gettransaction()
932 tr = pullop.gettransaction()
933 phases.advanceboundary(pullop.repo, tr, draft, dheads)
933 phases.advanceboundary(pullop.repo, tr, draft, dheads)
934
934
935 def _pullobsolete(pullop):
935 def _pullobsolete(pullop):
936 """utility function to pull obsolete markers from a remote
936 """utility function to pull obsolete markers from a remote
937
937
938 The `gettransaction` is function that return the pull transaction, creating
938 The `gettransaction` is function that return the pull transaction, creating
939 one if necessary. We return the transaction to inform the calling code that
939 one if necessary. We return the transaction to inform the calling code that
940 a new transaction have been created (when applicable).
940 a new transaction have been created (when applicable).
941
941
942 Exists mostly to allow overriding for experimentation purpose"""
942 Exists mostly to allow overriding for experimentation purpose"""
943 pullop.todosteps.remove('obsmarkers')
943 pullop.todosteps.remove('obsmarkers')
944 tr = None
944 tr = None
945 if obsolete._enabled:
945 if obsolete._enabled:
946 pullop.repo.ui.debug('fetching remote obsolete markers\n')
946 pullop.repo.ui.debug('fetching remote obsolete markers\n')
947 remoteobs = pullop.remote.listkeys('obsolete')
947 remoteobs = pullop.remote.listkeys('obsolete')
948 if 'dump0' in remoteobs:
948 if 'dump0' in remoteobs:
949 tr = pullop.gettransaction()
949 tr = pullop.gettransaction()
950 for key in sorted(remoteobs, reverse=True):
950 for key in sorted(remoteobs, reverse=True):
951 if key.startswith('dump'):
951 if key.startswith('dump'):
952 data = base85.b85decode(remoteobs[key])
952 data = base85.b85decode(remoteobs[key])
953 pullop.repo.obsstore.mergemarkers(tr, data)
953 pullop.repo.obsstore.mergemarkers(tr, data)
954 pullop.repo.invalidatevolatilesets()
954 pullop.repo.invalidatevolatilesets()
955 return tr
955 return tr
956
956
957 def caps20to10(repo):
957 def caps20to10(repo):
958 """return a set with appropriate options to use bundle20 during getbundle"""
958 """return a set with appropriate options to use bundle20 during getbundle"""
959 caps = set(['HG2X'])
959 caps = set(['HG2X'])
960 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
960 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
961 caps.add('bundle2=' + urllib.quote(capsblob))
961 caps.add('bundle2=' + urllib.quote(capsblob))
962 return caps
962 return caps
963
963
964 # List of names of steps to perform for a bundle2 for getbundle, order matters.
965 getbundle2partsorder = []
966
967 # Mapping between step name and function
968 #
969 # This exists to help extensions wrap steps if necessary
970 getbundle2partsmapping = {}
971
972 def getbundle2partsgenerator(stepname):
973 """decorator for function generating bundle2 part for getbundle
974
975 The function is added to the step -> function mapping and appended to the
976 list of steps. Beware that decorated functions will be added in order
977 (this may matter).
978
979 You can only use this decorator for new steps, if you want to wrap a step
980 from an extension, attack the getbundle2partsmapping dictionary directly."""
981 def dec(func):
982 assert stepname not in getbundle2partsmapping
983 getbundle2partsmapping[stepname] = func
984 getbundle2partsorder.append(stepname)
985 return func
986 return dec
987
964 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
988 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
965 **kwargs):
989 **kwargs):
966 """return a full bundle (with potentially multiple kind of parts)
990 """return a full bundle (with potentially multiple kind of parts)
967
991
968 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
992 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
969 passed. For now, the bundle can contain only changegroup, but this will
993 passed. For now, the bundle can contain only changegroup, but this will
970 changes when more part type will be available for bundle2.
994 changes when more part type will be available for bundle2.
971
995
972 This is different from changegroup.getchangegroup that only returns an HG10
996 This is different from changegroup.getchangegroup that only returns an HG10
973 changegroup bundle. They may eventually get reunited in the future when we
997 changegroup bundle. They may eventually get reunited in the future when we
974 have a clearer idea of the API we what to query different data.
998 have a clearer idea of the API we what to query different data.
975
999
976 The implementation is at a very early stage and will get massive rework
1000 The implementation is at a very early stage and will get massive rework
977 when the API of bundle is refined.
1001 when the API of bundle is refined.
978 """
1002 """
979 cg = None
1003 # bundle10 case
980 if kwargs.get('cg', True):
981 # build changegroup bundle here.
982 cg = changegroup.getchangegroup(repo, source, heads=heads,
983 common=common, bundlecaps=bundlecaps)
984 elif 'HG2X' not in bundlecaps:
985 raise ValueError(_('request for bundle10 must include changegroup'))
986 if bundlecaps is None or 'HG2X' not in bundlecaps:
1004 if bundlecaps is None or 'HG2X' not in bundlecaps:
1005 if bundlecaps and not kwargs.get('cg', True):
1006 raise ValueError(_('request for bundle10 must include changegroup'))
1007
987 if kwargs:
1008 if kwargs:
988 raise ValueError(_('unsupported getbundle arguments: %s')
1009 raise ValueError(_('unsupported getbundle arguments: %s')
989 % ', '.join(sorted(kwargs.keys())))
1010 % ', '.join(sorted(kwargs.keys())))
990 return cg
1011 return changegroup.getchangegroup(repo, source, heads=heads,
991 # very crude first implementation,
1012 common=common, bundlecaps=bundlecaps)
992 # the bundle API will change and the generation will be done lazily.
1013
1014 # bundle20 case
993 b2caps = {}
1015 b2caps = {}
994 for bcaps in bundlecaps:
1016 for bcaps in bundlecaps:
995 if bcaps.startswith('bundle2='):
1017 if bcaps.startswith('bundle2='):
996 blob = urllib.unquote(bcaps[len('bundle2='):])
1018 blob = urllib.unquote(bcaps[len('bundle2='):])
997 b2caps.update(bundle2.decodecaps(blob))
1019 b2caps.update(bundle2.decodecaps(blob))
998 bundler = bundle2.bundle20(repo.ui, b2caps)
1020 bundler = bundle2.bundle20(repo.ui, b2caps)
1021
1022 for name in getbundle2partsorder:
1023 func = getbundle2partsmapping[name]
1024 func(bundler, repo, source, heads=heads, common=common,
1025 bundlecaps=bundlecaps, b2caps=b2caps, **kwargs)
1026
1027 return util.chunkbuffer(bundler.getchunks())
1028
1029 @getbundle2partsgenerator('changegroup')
1030 def _getbundlechangegrouppart(bundler, repo, source, heads=None, common=None,
1031 bundlecaps=None, b2caps=None, **kwargs):
1032 """add a changegroup part to the requested bundle"""
1033 cg = None
1034 if kwargs.get('cg', True):
1035 # build changegroup bundle here.
1036 cg = changegroup.getchangegroup(repo, source, heads=heads,
1037 common=common, bundlecaps=bundlecaps)
1038
999 if cg:
1039 if cg:
1000 bundler.newpart('b2x:changegroup', data=cg.getchunks())
1040 bundler.newpart('b2x:changegroup', data=cg.getchunks())
1041
1042 @getbundle2partsgenerator('listkeys')
1043 def _getbundlelistkeysparts(bundler, repo, source, heads=None, common=None,
1044 bundlecaps=None, b2caps=None, **kwargs):
1045 """add parts containing listkeys namespaces to the requested bundle"""
1001 listkeys = kwargs.get('listkeys', ())
1046 listkeys = kwargs.get('listkeys', ())
1002 for namespace in listkeys:
1047 for namespace in listkeys:
1003 part = bundler.newpart('b2x:listkeys')
1048 part = bundler.newpart('b2x:listkeys')
1004 part.addparam('namespace', namespace)
1049 part.addparam('namespace', namespace)
1005 keys = repo.listkeys(namespace).items()
1050 keys = repo.listkeys(namespace).items()
1006 part.data = pushkey.encodekeys(keys)
1051 part.data = pushkey.encodekeys(keys)
1007 _getbundleobsmarkerpart(bundler, repo, source, heads=heads, common=common,
1008 bundlecaps=bundlecaps, b2caps=b2caps, **kwargs)
1009 _getbundleextrapart(bundler, repo, source, heads=heads, common=common,
1010 bundlecaps=bundlecaps, b2caps=b2caps, **kwargs)
1011 return util.chunkbuffer(bundler.getchunks())
1012
1052
1053 @getbundle2partsgenerator('obsmarkers')
1013 def _getbundleobsmarkerpart(bundler, repo, source, heads=None, common=None,
1054 def _getbundleobsmarkerpart(bundler, repo, source, heads=None, common=None,
1014 bundlecaps=None, b2caps=None, **kwargs):
1055 bundlecaps=None, b2caps=None, **kwargs):
1015 """add an obsolescence markers part to the requested bundle"""
1056 """add an obsolescence markers part to the requested bundle"""
1016 if kwargs.get('obsmarkers', False):
1057 if kwargs.get('obsmarkers', False):
1017 if heads is None:
1058 if heads is None:
1018 heads = repo.heads()
1059 heads = repo.heads()
1019 subset = [c.node() for c in repo.set('::%ln', heads)]
1060 subset = [c.node() for c in repo.set('::%ln', heads)]
1020 markers = repo.obsstore.relevantmarkers(subset)
1061 markers = repo.obsstore.relevantmarkers(subset)
1021 buildobsmarkerspart(bundler, markers)
1062 buildobsmarkerspart(bundler, markers)
1022
1063
1064 @getbundle2partsgenerator('extra')
1023 def _getbundleextrapart(bundler, repo, source, heads=None, common=None,
1065 def _getbundleextrapart(bundler, repo, source, heads=None, common=None,
1024 bundlecaps=None, b2caps=None, **kwargs):
1066 bundlecaps=None, b2caps=None, **kwargs):
1025 """hook function to let extensions add parts to the requested bundle"""
1067 """hook function to let extensions add parts to the requested bundle"""
1026 pass
1068 pass
1027
1069
1028 def check_heads(repo, their_heads, context):
1070 def check_heads(repo, their_heads, context):
1029 """check if the heads of a repo have been modified
1071 """check if the heads of a repo have been modified
1030
1072
1031 Used by peer for unbundling.
1073 Used by peer for unbundling.
1032 """
1074 """
1033 heads = repo.heads()
1075 heads = repo.heads()
1034 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1076 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1035 if not (their_heads == ['force'] or their_heads == heads or
1077 if not (their_heads == ['force'] or their_heads == heads or
1036 their_heads == ['hashed', heads_hash]):
1078 their_heads == ['hashed', heads_hash]):
1037 # someone else committed/pushed/unbundled while we
1079 # someone else committed/pushed/unbundled while we
1038 # were transferring data
1080 # were transferring data
1039 raise error.PushRaced('repository changed while %s - '
1081 raise error.PushRaced('repository changed while %s - '
1040 'please try again' % context)
1082 'please try again' % context)
1041
1083
1042 def unbundle(repo, cg, heads, source, url):
1084 def unbundle(repo, cg, heads, source, url):
1043 """Apply a bundle to a repo.
1085 """Apply a bundle to a repo.
1044
1086
1045 this function makes sure the repo is locked during the application and have
1087 this function makes sure the repo is locked during the application and have
1046 mechanism to check that no push race occurred between the creation of the
1088 mechanism to check that no push race occurred between the creation of the
1047 bundle and its application.
1089 bundle and its application.
1048
1090
1049 If the push was raced as PushRaced exception is raised."""
1091 If the push was raced as PushRaced exception is raised."""
1050 r = 0
1092 r = 0
1051 # need a transaction when processing a bundle2 stream
1093 # need a transaction when processing a bundle2 stream
1052 tr = None
1094 tr = None
1053 lock = repo.lock()
1095 lock = repo.lock()
1054 try:
1096 try:
1055 check_heads(repo, heads, 'uploading changes')
1097 check_heads(repo, heads, 'uploading changes')
1056 # push can proceed
1098 # push can proceed
1057 if util.safehasattr(cg, 'params'):
1099 if util.safehasattr(cg, 'params'):
1058 try:
1100 try:
1059 tr = repo.transaction('unbundle')
1101 tr = repo.transaction('unbundle')
1060 tr.hookargs['bundle2-exp'] = '1'
1102 tr.hookargs['bundle2-exp'] = '1'
1061 r = bundle2.processbundle(repo, cg, lambda: tr).reply
1103 r = bundle2.processbundle(repo, cg, lambda: tr).reply
1062 cl = repo.unfiltered().changelog
1104 cl = repo.unfiltered().changelog
1063 p = cl.writepending() and repo.root or ""
1105 p = cl.writepending() and repo.root or ""
1064 repo.hook('b2x-pretransactionclose', throw=True, source=source,
1106 repo.hook('b2x-pretransactionclose', throw=True, source=source,
1065 url=url, pending=p, **tr.hookargs)
1107 url=url, pending=p, **tr.hookargs)
1066 tr.close()
1108 tr.close()
1067 repo.hook('b2x-transactionclose', source=source, url=url,
1109 repo.hook('b2x-transactionclose', source=source, url=url,
1068 **tr.hookargs)
1110 **tr.hookargs)
1069 except Exception, exc:
1111 except Exception, exc:
1070 exc.duringunbundle2 = True
1112 exc.duringunbundle2 = True
1071 raise
1113 raise
1072 else:
1114 else:
1073 r = changegroup.addchangegroup(repo, cg, source, url)
1115 r = changegroup.addchangegroup(repo, cg, source, url)
1074 finally:
1116 finally:
1075 if tr is not None:
1117 if tr is not None:
1076 tr.release()
1118 tr.release()
1077 lock.release()
1119 lock.release()
1078 return r
1120 return r
General Comments 0
You need to be logged in to leave comments. Login now