##// END OF EJS Templates
exchange: import bookmarks as bookmod...
Pierre-Yves David -
r22622:ce6b9ede default
parent child Browse files
Show More
@@ -1,1123 +1,1123 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno, urllib
10 import errno, urllib
11 import util, scmutil, changegroup, base85, error
11 import util, scmutil, changegroup, base85, error
12 import discovery, phases, obsolete, bookmarks, bundle2, pushkey
12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13
13
14 def readbundle(ui, fh, fname, vfs=None):
14 def readbundle(ui, fh, fname, vfs=None):
15 header = changegroup.readexactly(fh, 4)
15 header = changegroup.readexactly(fh, 4)
16
16
17 alg = None
17 alg = None
18 if not fname:
18 if not fname:
19 fname = "stream"
19 fname = "stream"
20 if not header.startswith('HG') and header.startswith('\0'):
20 if not header.startswith('HG') and header.startswith('\0'):
21 fh = changegroup.headerlessfixup(fh, header)
21 fh = changegroup.headerlessfixup(fh, header)
22 header = "HG10"
22 header = "HG10"
23 alg = 'UN'
23 alg = 'UN'
24 elif vfs:
24 elif vfs:
25 fname = vfs.join(fname)
25 fname = vfs.join(fname)
26
26
27 magic, version = header[0:2], header[2:4]
27 magic, version = header[0:2], header[2:4]
28
28
29 if magic != 'HG':
29 if magic != 'HG':
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 if version == '10':
31 if version == '10':
32 if alg is None:
32 if alg is None:
33 alg = changegroup.readexactly(fh, 2)
33 alg = changegroup.readexactly(fh, 2)
34 return changegroup.cg1unpacker(fh, alg)
34 return changegroup.cg1unpacker(fh, alg)
35 elif version == '2X':
35 elif version == '2X':
36 return bundle2.unbundle20(ui, fh, header=magic + version)
36 return bundle2.unbundle20(ui, fh, header=magic + version)
37 else:
37 else:
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39
39
40 def buildobsmarkerspart(bundler, markers):
40 def buildobsmarkerspart(bundler, markers):
41 """add an obsmarker part to the bundler with <markers>
41 """add an obsmarker part to the bundler with <markers>
42
42
43 No part is created if markers is empty.
43 No part is created if markers is empty.
44 Raises ValueError if the bundler doesn't support any known obsmarker format.
44 Raises ValueError if the bundler doesn't support any known obsmarker format.
45 """
45 """
46 if markers:
46 if markers:
47 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
47 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
48 version = obsolete.commonversion(remoteversions)
48 version = obsolete.commonversion(remoteversions)
49 if version is None:
49 if version is None:
50 raise ValueError('bundler do not support common obsmarker format')
50 raise ValueError('bundler do not support common obsmarker format')
51 stream = obsolete.encodemarkers(markers, True, version=version)
51 stream = obsolete.encodemarkers(markers, True, version=version)
52 return bundler.newpart('B2X:OBSMARKERS', data=stream)
52 return bundler.newpart('B2X:OBSMARKERS', data=stream)
53 return None
53 return None
54
54
55 class pushoperation(object):
55 class pushoperation(object):
56 """A object that represent a single push operation
56 """A object that represent a single push operation
57
57
58 It purpose is to carry push related state and very common operation.
58 It purpose is to carry push related state and very common operation.
59
59
60 A new should be created at the beginning of each push and discarded
60 A new should be created at the beginning of each push and discarded
61 afterward.
61 afterward.
62 """
62 """
63
63
64 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
64 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
65 # repo we push from
65 # repo we push from
66 self.repo = repo
66 self.repo = repo
67 self.ui = repo.ui
67 self.ui = repo.ui
68 # repo we push to
68 # repo we push to
69 self.remote = remote
69 self.remote = remote
70 # force option provided
70 # force option provided
71 self.force = force
71 self.force = force
72 # revs to be pushed (None is "all")
72 # revs to be pushed (None is "all")
73 self.revs = revs
73 self.revs = revs
74 # allow push of new branch
74 # allow push of new branch
75 self.newbranch = newbranch
75 self.newbranch = newbranch
76 # did a local lock get acquired?
76 # did a local lock get acquired?
77 self.locallocked = None
77 self.locallocked = None
78 # step already performed
78 # step already performed
79 # (used to check what steps have been already performed through bundle2)
79 # (used to check what steps have been already performed through bundle2)
80 self.stepsdone = set()
80 self.stepsdone = set()
81 # Integer version of the changegroup push result
81 # Integer version of the changegroup push result
82 # - None means nothing to push
82 # - None means nothing to push
83 # - 0 means HTTP error
83 # - 0 means HTTP error
84 # - 1 means we pushed and remote head count is unchanged *or*
84 # - 1 means we pushed and remote head count is unchanged *or*
85 # we have outgoing changesets but refused to push
85 # we have outgoing changesets but refused to push
86 # - other values as described by addchangegroup()
86 # - other values as described by addchangegroup()
87 self.cgresult = None
87 self.cgresult = None
88 # discover.outgoing object (contains common and outgoing data)
88 # discover.outgoing object (contains common and outgoing data)
89 self.outgoing = None
89 self.outgoing = None
90 # all remote heads before the push
90 # all remote heads before the push
91 self.remoteheads = None
91 self.remoteheads = None
92 # testable as a boolean indicating if any nodes are missing locally.
92 # testable as a boolean indicating if any nodes are missing locally.
93 self.incoming = None
93 self.incoming = None
94 # phases changes that must be pushed along side the changesets
94 # phases changes that must be pushed along side the changesets
95 self.outdatedphases = None
95 self.outdatedphases = None
96 # phases changes that must be pushed if changeset push fails
96 # phases changes that must be pushed if changeset push fails
97 self.fallbackoutdatedphases = None
97 self.fallbackoutdatedphases = None
98 # outgoing obsmarkers
98 # outgoing obsmarkers
99 self.outobsmarkers = set()
99 self.outobsmarkers = set()
100 # outgoing bookmarks
100 # outgoing bookmarks
101 self.outbookmarks = []
101 self.outbookmarks = []
102
102
103 @util.propertycache
103 @util.propertycache
104 def futureheads(self):
104 def futureheads(self):
105 """future remote heads if the changeset push succeeds"""
105 """future remote heads if the changeset push succeeds"""
106 return self.outgoing.missingheads
106 return self.outgoing.missingheads
107
107
108 @util.propertycache
108 @util.propertycache
109 def fallbackheads(self):
109 def fallbackheads(self):
110 """future remote heads if the changeset push fails"""
110 """future remote heads if the changeset push fails"""
111 if self.revs is None:
111 if self.revs is None:
112 # not target to push, all common are relevant
112 # not target to push, all common are relevant
113 return self.outgoing.commonheads
113 return self.outgoing.commonheads
114 unfi = self.repo.unfiltered()
114 unfi = self.repo.unfiltered()
115 # I want cheads = heads(::missingheads and ::commonheads)
115 # I want cheads = heads(::missingheads and ::commonheads)
116 # (missingheads is revs with secret changeset filtered out)
116 # (missingheads is revs with secret changeset filtered out)
117 #
117 #
118 # This can be expressed as:
118 # This can be expressed as:
119 # cheads = ( (missingheads and ::commonheads)
119 # cheads = ( (missingheads and ::commonheads)
120 # + (commonheads and ::missingheads))"
120 # + (commonheads and ::missingheads))"
121 # )
121 # )
122 #
122 #
123 # while trying to push we already computed the following:
123 # while trying to push we already computed the following:
124 # common = (::commonheads)
124 # common = (::commonheads)
125 # missing = ((commonheads::missingheads) - commonheads)
125 # missing = ((commonheads::missingheads) - commonheads)
126 #
126 #
127 # We can pick:
127 # We can pick:
128 # * missingheads part of common (::commonheads)
128 # * missingheads part of common (::commonheads)
129 common = set(self.outgoing.common)
129 common = set(self.outgoing.common)
130 nm = self.repo.changelog.nodemap
130 nm = self.repo.changelog.nodemap
131 cheads = [node for node in self.revs if nm[node] in common]
131 cheads = [node for node in self.revs if nm[node] in common]
132 # and
132 # and
133 # * commonheads parents on missing
133 # * commonheads parents on missing
134 revset = unfi.set('%ln and parents(roots(%ln))',
134 revset = unfi.set('%ln and parents(roots(%ln))',
135 self.outgoing.commonheads,
135 self.outgoing.commonheads,
136 self.outgoing.missing)
136 self.outgoing.missing)
137 cheads.extend(c.node() for c in revset)
137 cheads.extend(c.node() for c in revset)
138 return cheads
138 return cheads
139
139
140 @property
140 @property
141 def commonheads(self):
141 def commonheads(self):
142 """set of all common heads after changeset bundle push"""
142 """set of all common heads after changeset bundle push"""
143 if self.cgresult:
143 if self.cgresult:
144 return self.futureheads
144 return self.futureheads
145 else:
145 else:
146 return self.fallbackheads
146 return self.fallbackheads
147
147
148 def push(repo, remote, force=False, revs=None, newbranch=False):
148 def push(repo, remote, force=False, revs=None, newbranch=False):
149 '''Push outgoing changesets (limited by revs) from a local
149 '''Push outgoing changesets (limited by revs) from a local
150 repository to remote. Return an integer:
150 repository to remote. Return an integer:
151 - None means nothing to push
151 - None means nothing to push
152 - 0 means HTTP error
152 - 0 means HTTP error
153 - 1 means we pushed and remote head count is unchanged *or*
153 - 1 means we pushed and remote head count is unchanged *or*
154 we have outgoing changesets but refused to push
154 we have outgoing changesets but refused to push
155 - other values as described by addchangegroup()
155 - other values as described by addchangegroup()
156 '''
156 '''
157 pushop = pushoperation(repo, remote, force, revs, newbranch)
157 pushop = pushoperation(repo, remote, force, revs, newbranch)
158 if pushop.remote.local():
158 if pushop.remote.local():
159 missing = (set(pushop.repo.requirements)
159 missing = (set(pushop.repo.requirements)
160 - pushop.remote.local().supported)
160 - pushop.remote.local().supported)
161 if missing:
161 if missing:
162 msg = _("required features are not"
162 msg = _("required features are not"
163 " supported in the destination:"
163 " supported in the destination:"
164 " %s") % (', '.join(sorted(missing)))
164 " %s") % (', '.join(sorted(missing)))
165 raise util.Abort(msg)
165 raise util.Abort(msg)
166
166
167 # there are two ways to push to remote repo:
167 # there are two ways to push to remote repo:
168 #
168 #
169 # addchangegroup assumes local user can lock remote
169 # addchangegroup assumes local user can lock remote
170 # repo (local filesystem, old ssh servers).
170 # repo (local filesystem, old ssh servers).
171 #
171 #
172 # unbundle assumes local user cannot lock remote repo (new ssh
172 # unbundle assumes local user cannot lock remote repo (new ssh
173 # servers, http servers).
173 # servers, http servers).
174
174
175 if not pushop.remote.canpush():
175 if not pushop.remote.canpush():
176 raise util.Abort(_("destination does not support push"))
176 raise util.Abort(_("destination does not support push"))
177 # get local lock as we might write phase data
177 # get local lock as we might write phase data
178 locallock = None
178 locallock = None
179 try:
179 try:
180 locallock = pushop.repo.lock()
180 locallock = pushop.repo.lock()
181 pushop.locallocked = True
181 pushop.locallocked = True
182 except IOError, err:
182 except IOError, err:
183 pushop.locallocked = False
183 pushop.locallocked = False
184 if err.errno != errno.EACCES:
184 if err.errno != errno.EACCES:
185 raise
185 raise
186 # source repo cannot be locked.
186 # source repo cannot be locked.
187 # We do not abort the push, but just disable the local phase
187 # We do not abort the push, but just disable the local phase
188 # synchronisation.
188 # synchronisation.
189 msg = 'cannot lock source repository: %s\n' % err
189 msg = 'cannot lock source repository: %s\n' % err
190 pushop.ui.debug(msg)
190 pushop.ui.debug(msg)
191 try:
191 try:
192 pushop.repo.checkpush(pushop)
192 pushop.repo.checkpush(pushop)
193 lock = None
193 lock = None
194 unbundle = pushop.remote.capable('unbundle')
194 unbundle = pushop.remote.capable('unbundle')
195 if not unbundle:
195 if not unbundle:
196 lock = pushop.remote.lock()
196 lock = pushop.remote.lock()
197 try:
197 try:
198 _pushdiscovery(pushop)
198 _pushdiscovery(pushop)
199 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
199 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
200 False)
200 False)
201 and pushop.remote.capable('bundle2-exp')):
201 and pushop.remote.capable('bundle2-exp')):
202 _pushbundle2(pushop)
202 _pushbundle2(pushop)
203 _pushchangeset(pushop)
203 _pushchangeset(pushop)
204 _pushsyncphase(pushop)
204 _pushsyncphase(pushop)
205 _pushobsolete(pushop)
205 _pushobsolete(pushop)
206 _pushbookmark(pushop)
206 _pushbookmark(pushop)
207 finally:
207 finally:
208 if lock is not None:
208 if lock is not None:
209 lock.release()
209 lock.release()
210 finally:
210 finally:
211 if locallock is not None:
211 if locallock is not None:
212 locallock.release()
212 locallock.release()
213
213
214 return pushop
214 return pushop
215
215
216 # list of steps to perform discovery before push
216 # list of steps to perform discovery before push
217 pushdiscoveryorder = []
217 pushdiscoveryorder = []
218
218
219 # Mapping between step name and function
219 # Mapping between step name and function
220 #
220 #
221 # This exists to help extensions wrap steps if necessary
221 # This exists to help extensions wrap steps if necessary
222 pushdiscoverymapping = {}
222 pushdiscoverymapping = {}
223
223
224 def pushdiscovery(stepname):
224 def pushdiscovery(stepname):
225 """decorator for function performing discovery before push
225 """decorator for function performing discovery before push
226
226
227 The function is added to the step -> function mapping and appended to the
227 The function is added to the step -> function mapping and appended to the
228 list of steps. Beware that decorated function will be added in order (this
228 list of steps. Beware that decorated function will be added in order (this
229 may matter).
229 may matter).
230
230
231 You can only use this decorator for a new step, if you want to wrap a step
231 You can only use this decorator for a new step, if you want to wrap a step
232 from an extension, change the pushdiscovery dictionary directly."""
232 from an extension, change the pushdiscovery dictionary directly."""
233 def dec(func):
233 def dec(func):
234 assert stepname not in pushdiscoverymapping
234 assert stepname not in pushdiscoverymapping
235 pushdiscoverymapping[stepname] = func
235 pushdiscoverymapping[stepname] = func
236 pushdiscoveryorder.append(stepname)
236 pushdiscoveryorder.append(stepname)
237 return func
237 return func
238 return dec
238 return dec
239
239
240 def _pushdiscovery(pushop):
240 def _pushdiscovery(pushop):
241 """Run all discovery steps"""
241 """Run all discovery steps"""
242 for stepname in pushdiscoveryorder:
242 for stepname in pushdiscoveryorder:
243 step = pushdiscoverymapping[stepname]
243 step = pushdiscoverymapping[stepname]
244 step(pushop)
244 step(pushop)
245
245
246 @pushdiscovery('changeset')
246 @pushdiscovery('changeset')
247 def _pushdiscoverychangeset(pushop):
247 def _pushdiscoverychangeset(pushop):
248 """discover the changeset that need to be pushed"""
248 """discover the changeset that need to be pushed"""
249 unfi = pushop.repo.unfiltered()
249 unfi = pushop.repo.unfiltered()
250 fci = discovery.findcommonincoming
250 fci = discovery.findcommonincoming
251 commoninc = fci(unfi, pushop.remote, force=pushop.force)
251 commoninc = fci(unfi, pushop.remote, force=pushop.force)
252 common, inc, remoteheads = commoninc
252 common, inc, remoteheads = commoninc
253 fco = discovery.findcommonoutgoing
253 fco = discovery.findcommonoutgoing
254 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
254 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
255 commoninc=commoninc, force=pushop.force)
255 commoninc=commoninc, force=pushop.force)
256 pushop.outgoing = outgoing
256 pushop.outgoing = outgoing
257 pushop.remoteheads = remoteheads
257 pushop.remoteheads = remoteheads
258 pushop.incoming = inc
258 pushop.incoming = inc
259
259
260 @pushdiscovery('phase')
260 @pushdiscovery('phase')
261 def _pushdiscoveryphase(pushop):
261 def _pushdiscoveryphase(pushop):
262 """discover the phase that needs to be pushed
262 """discover the phase that needs to be pushed
263
263
264 (computed for both success and failure case for changesets push)"""
264 (computed for both success and failure case for changesets push)"""
265 outgoing = pushop.outgoing
265 outgoing = pushop.outgoing
266 unfi = pushop.repo.unfiltered()
266 unfi = pushop.repo.unfiltered()
267 remotephases = pushop.remote.listkeys('phases')
267 remotephases = pushop.remote.listkeys('phases')
268 publishing = remotephases.get('publishing', False)
268 publishing = remotephases.get('publishing', False)
269 ana = phases.analyzeremotephases(pushop.repo,
269 ana = phases.analyzeremotephases(pushop.repo,
270 pushop.fallbackheads,
270 pushop.fallbackheads,
271 remotephases)
271 remotephases)
272 pheads, droots = ana
272 pheads, droots = ana
273 extracond = ''
273 extracond = ''
274 if not publishing:
274 if not publishing:
275 extracond = ' and public()'
275 extracond = ' and public()'
276 revset = 'heads((%%ln::%%ln) %s)' % extracond
276 revset = 'heads((%%ln::%%ln) %s)' % extracond
277 # Get the list of all revs draft on remote by public here.
277 # Get the list of all revs draft on remote by public here.
278 # XXX Beware that revset break if droots is not strictly
278 # XXX Beware that revset break if droots is not strictly
279 # XXX root we may want to ensure it is but it is costly
279 # XXX root we may want to ensure it is but it is costly
280 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
280 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
281 if not outgoing.missing:
281 if not outgoing.missing:
282 future = fallback
282 future = fallback
283 else:
283 else:
284 # adds changeset we are going to push as draft
284 # adds changeset we are going to push as draft
285 #
285 #
286 # should not be necessary for pushblishing server, but because of an
286 # should not be necessary for pushblishing server, but because of an
287 # issue fixed in xxxxx we have to do it anyway.
287 # issue fixed in xxxxx we have to do it anyway.
288 fdroots = list(unfi.set('roots(%ln + %ln::)',
288 fdroots = list(unfi.set('roots(%ln + %ln::)',
289 outgoing.missing, droots))
289 outgoing.missing, droots))
290 fdroots = [f.node() for f in fdroots]
290 fdroots = [f.node() for f in fdroots]
291 future = list(unfi.set(revset, fdroots, pushop.futureheads))
291 future = list(unfi.set(revset, fdroots, pushop.futureheads))
292 pushop.outdatedphases = future
292 pushop.outdatedphases = future
293 pushop.fallbackoutdatedphases = fallback
293 pushop.fallbackoutdatedphases = fallback
294
294
295 @pushdiscovery('obsmarker')
295 @pushdiscovery('obsmarker')
296 def _pushdiscoveryobsmarkers(pushop):
296 def _pushdiscoveryobsmarkers(pushop):
297 if (obsolete._enabled
297 if (obsolete._enabled
298 and pushop.repo.obsstore
298 and pushop.repo.obsstore
299 and 'obsolete' in pushop.remote.listkeys('namespaces')):
299 and 'obsolete' in pushop.remote.listkeys('namespaces')):
300 repo = pushop.repo
300 repo = pushop.repo
301 # very naive computation, that can be quite expensive on big repo.
301 # very naive computation, that can be quite expensive on big repo.
302 # However: evolution is currently slow on them anyway.
302 # However: evolution is currently slow on them anyway.
303 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
303 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
304 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
304 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
305
305
306 @pushdiscovery('bookmarks')
306 @pushdiscovery('bookmarks')
307 def _pushdiscoverybookmarks(pushop):
307 def _pushdiscoverybookmarks(pushop):
308 ui = pushop.ui
308 ui = pushop.ui
309 repo = pushop.repo.unfiltered()
309 repo = pushop.repo.unfiltered()
310 remote = pushop.remote
310 remote = pushop.remote
311 ui.debug("checking for updated bookmarks\n")
311 ui.debug("checking for updated bookmarks\n")
312 ancestors = ()
312 ancestors = ()
313 if pushop.revs:
313 if pushop.revs:
314 revnums = map(repo.changelog.rev, pushop.revs)
314 revnums = map(repo.changelog.rev, pushop.revs)
315 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
315 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
316 remotebookmark = remote.listkeys('bookmarks')
316 remotebookmark = remote.listkeys('bookmarks')
317
317
318 comp = bookmarks.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
318 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
319 addsrc, adddst, advsrc, advdst, diverge, differ, invalid = comp
319 addsrc, adddst, advsrc, advdst, diverge, differ, invalid = comp
320 for b, scid, dcid in advsrc:
320 for b, scid, dcid in advsrc:
321 if not ancestors or repo[scid].rev() in ancestors:
321 if not ancestors or repo[scid].rev() in ancestors:
322 pushop.outbookmarks.append((b, dcid, scid))
322 pushop.outbookmarks.append((b, dcid, scid))
323
323
324 def _pushcheckoutgoing(pushop):
324 def _pushcheckoutgoing(pushop):
325 outgoing = pushop.outgoing
325 outgoing = pushop.outgoing
326 unfi = pushop.repo.unfiltered()
326 unfi = pushop.repo.unfiltered()
327 if not outgoing.missing:
327 if not outgoing.missing:
328 # nothing to push
328 # nothing to push
329 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
329 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
330 return False
330 return False
331 # something to push
331 # something to push
332 if not pushop.force:
332 if not pushop.force:
333 # if repo.obsstore == False --> no obsolete
333 # if repo.obsstore == False --> no obsolete
334 # then, save the iteration
334 # then, save the iteration
335 if unfi.obsstore:
335 if unfi.obsstore:
336 # this message are here for 80 char limit reason
336 # this message are here for 80 char limit reason
337 mso = _("push includes obsolete changeset: %s!")
337 mso = _("push includes obsolete changeset: %s!")
338 mst = "push includes %s changeset: %s!"
338 mst = "push includes %s changeset: %s!"
339 # plain versions for i18n tool to detect them
339 # plain versions for i18n tool to detect them
340 _("push includes unstable changeset: %s!")
340 _("push includes unstable changeset: %s!")
341 _("push includes bumped changeset: %s!")
341 _("push includes bumped changeset: %s!")
342 _("push includes divergent changeset: %s!")
342 _("push includes divergent changeset: %s!")
343 # If we are to push if there is at least one
343 # If we are to push if there is at least one
344 # obsolete or unstable changeset in missing, at
344 # obsolete or unstable changeset in missing, at
345 # least one of the missinghead will be obsolete or
345 # least one of the missinghead will be obsolete or
346 # unstable. So checking heads only is ok
346 # unstable. So checking heads only is ok
347 for node in outgoing.missingheads:
347 for node in outgoing.missingheads:
348 ctx = unfi[node]
348 ctx = unfi[node]
349 if ctx.obsolete():
349 if ctx.obsolete():
350 raise util.Abort(mso % ctx)
350 raise util.Abort(mso % ctx)
351 elif ctx.troubled():
351 elif ctx.troubled():
352 raise util.Abort(_(mst)
352 raise util.Abort(_(mst)
353 % (ctx.troubles()[0],
353 % (ctx.troubles()[0],
354 ctx))
354 ctx))
355 newbm = pushop.ui.configlist('bookmarks', 'pushing')
355 newbm = pushop.ui.configlist('bookmarks', 'pushing')
356 discovery.checkheads(unfi, pushop.remote, outgoing,
356 discovery.checkheads(unfi, pushop.remote, outgoing,
357 pushop.remoteheads,
357 pushop.remoteheads,
358 pushop.newbranch,
358 pushop.newbranch,
359 bool(pushop.incoming),
359 bool(pushop.incoming),
360 newbm)
360 newbm)
361 return True
361 return True
362
362
363 # List of names of steps to perform for an outgoing bundle2, order matters.
363 # List of names of steps to perform for an outgoing bundle2, order matters.
364 b2partsgenorder = []
364 b2partsgenorder = []
365
365
366 # Mapping between step name and function
366 # Mapping between step name and function
367 #
367 #
368 # This exists to help extensions wrap steps if necessary
368 # This exists to help extensions wrap steps if necessary
369 b2partsgenmapping = {}
369 b2partsgenmapping = {}
370
370
371 def b2partsgenerator(stepname):
371 def b2partsgenerator(stepname):
372 """decorator for function generating bundle2 part
372 """decorator for function generating bundle2 part
373
373
374 The function is added to the step -> function mapping and appended to the
374 The function is added to the step -> function mapping and appended to the
375 list of steps. Beware that decorated functions will be added in order
375 list of steps. Beware that decorated functions will be added in order
376 (this may matter).
376 (this may matter).
377
377
378 You can only use this decorator for new steps, if you want to wrap a step
378 You can only use this decorator for new steps, if you want to wrap a step
379 from an extension, attack the b2partsgenmapping dictionary directly."""
379 from an extension, attack the b2partsgenmapping dictionary directly."""
380 def dec(func):
380 def dec(func):
381 assert stepname not in b2partsgenmapping
381 assert stepname not in b2partsgenmapping
382 b2partsgenmapping[stepname] = func
382 b2partsgenmapping[stepname] = func
383 b2partsgenorder.append(stepname)
383 b2partsgenorder.append(stepname)
384 return func
384 return func
385 return dec
385 return dec
386
386
387 @b2partsgenerator('changeset')
387 @b2partsgenerator('changeset')
388 def _pushb2ctx(pushop, bundler):
388 def _pushb2ctx(pushop, bundler):
389 """handle changegroup push through bundle2
389 """handle changegroup push through bundle2
390
390
391 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
391 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
392 """
392 """
393 if 'changesets' in pushop.stepsdone:
393 if 'changesets' in pushop.stepsdone:
394 return
394 return
395 pushop.stepsdone.add('changesets')
395 pushop.stepsdone.add('changesets')
396 # Send known heads to the server for race detection.
396 # Send known heads to the server for race detection.
397 if not _pushcheckoutgoing(pushop):
397 if not _pushcheckoutgoing(pushop):
398 return
398 return
399 pushop.repo.prepushoutgoinghooks(pushop.repo,
399 pushop.repo.prepushoutgoinghooks(pushop.repo,
400 pushop.remote,
400 pushop.remote,
401 pushop.outgoing)
401 pushop.outgoing)
402 if not pushop.force:
402 if not pushop.force:
403 bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads))
403 bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads))
404 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', pushop.outgoing)
404 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', pushop.outgoing)
405 cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks())
405 cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks())
406 def handlereply(op):
406 def handlereply(op):
407 """extract addchangroup returns from server reply"""
407 """extract addchangroup returns from server reply"""
408 cgreplies = op.records.getreplies(cgpart.id)
408 cgreplies = op.records.getreplies(cgpart.id)
409 assert len(cgreplies['changegroup']) == 1
409 assert len(cgreplies['changegroup']) == 1
410 pushop.cgresult = cgreplies['changegroup'][0]['return']
410 pushop.cgresult = cgreplies['changegroup'][0]['return']
411 return handlereply
411 return handlereply
412
412
413 @b2partsgenerator('phase')
413 @b2partsgenerator('phase')
414 def _pushb2phases(pushop, bundler):
414 def _pushb2phases(pushop, bundler):
415 """handle phase push through bundle2"""
415 """handle phase push through bundle2"""
416 if 'phases' in pushop.stepsdone:
416 if 'phases' in pushop.stepsdone:
417 return
417 return
418 b2caps = bundle2.bundle2caps(pushop.remote)
418 b2caps = bundle2.bundle2caps(pushop.remote)
419 if not 'b2x:pushkey' in b2caps:
419 if not 'b2x:pushkey' in b2caps:
420 return
420 return
421 pushop.stepsdone.add('phases')
421 pushop.stepsdone.add('phases')
422 part2node = []
422 part2node = []
423 enc = pushkey.encode
423 enc = pushkey.encode
424 for newremotehead in pushop.outdatedphases:
424 for newremotehead in pushop.outdatedphases:
425 part = bundler.newpart('b2x:pushkey')
425 part = bundler.newpart('b2x:pushkey')
426 part.addparam('namespace', enc('phases'))
426 part.addparam('namespace', enc('phases'))
427 part.addparam('key', enc(newremotehead.hex()))
427 part.addparam('key', enc(newremotehead.hex()))
428 part.addparam('old', enc(str(phases.draft)))
428 part.addparam('old', enc(str(phases.draft)))
429 part.addparam('new', enc(str(phases.public)))
429 part.addparam('new', enc(str(phases.public)))
430 part2node.append((part.id, newremotehead))
430 part2node.append((part.id, newremotehead))
431 def handlereply(op):
431 def handlereply(op):
432 for partid, node in part2node:
432 for partid, node in part2node:
433 partrep = op.records.getreplies(partid)
433 partrep = op.records.getreplies(partid)
434 results = partrep['pushkey']
434 results = partrep['pushkey']
435 assert len(results) <= 1
435 assert len(results) <= 1
436 msg = None
436 msg = None
437 if not results:
437 if not results:
438 msg = _('server ignored update of %s to public!\n') % node
438 msg = _('server ignored update of %s to public!\n') % node
439 elif not int(results[0]['return']):
439 elif not int(results[0]['return']):
440 msg = _('updating %s to public failed!\n') % node
440 msg = _('updating %s to public failed!\n') % node
441 if msg is not None:
441 if msg is not None:
442 pushop.ui.warn(msg)
442 pushop.ui.warn(msg)
443 return handlereply
443 return handlereply
444
444
445 @b2partsgenerator('obsmarkers')
445 @b2partsgenerator('obsmarkers')
446 def _pushb2obsmarkers(pushop, bundler):
446 def _pushb2obsmarkers(pushop, bundler):
447 if 'obsmarkers' in pushop.stepsdone:
447 if 'obsmarkers' in pushop.stepsdone:
448 return
448 return
449 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
449 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
450 if obsolete.commonversion(remoteversions) is None:
450 if obsolete.commonversion(remoteversions) is None:
451 return
451 return
452 pushop.stepsdone.add('obsmarkers')
452 pushop.stepsdone.add('obsmarkers')
453 if pushop.outobsmarkers:
453 if pushop.outobsmarkers:
454 buildobsmarkerspart(bundler, pushop.outobsmarkers)
454 buildobsmarkerspart(bundler, pushop.outobsmarkers)
455
455
456 @b2partsgenerator('bookmarks')
456 @b2partsgenerator('bookmarks')
457 def _pushb2bookmarks(pushop, bundler):
457 def _pushb2bookmarks(pushop, bundler):
458 """handle phase push through bundle2"""
458 """handle phase push through bundle2"""
459 if 'bookmarks' in pushop.stepsdone:
459 if 'bookmarks' in pushop.stepsdone:
460 return
460 return
461 b2caps = bundle2.bundle2caps(pushop.remote)
461 b2caps = bundle2.bundle2caps(pushop.remote)
462 if 'b2x:pushkey' not in b2caps:
462 if 'b2x:pushkey' not in b2caps:
463 return
463 return
464 pushop.stepsdone.add('bookmarks')
464 pushop.stepsdone.add('bookmarks')
465 part2book = []
465 part2book = []
466 enc = pushkey.encode
466 enc = pushkey.encode
467 for book, old, new in pushop.outbookmarks:
467 for book, old, new in pushop.outbookmarks:
468 part = bundler.newpart('b2x:pushkey')
468 part = bundler.newpart('b2x:pushkey')
469 part.addparam('namespace', enc('bookmarks'))
469 part.addparam('namespace', enc('bookmarks'))
470 part.addparam('key', enc(book))
470 part.addparam('key', enc(book))
471 part.addparam('old', enc(old))
471 part.addparam('old', enc(old))
472 part.addparam('new', enc(new))
472 part.addparam('new', enc(new))
473 part2book.append((part.id, book))
473 part2book.append((part.id, book))
474 def handlereply(op):
474 def handlereply(op):
475 for partid, book in part2book:
475 for partid, book in part2book:
476 partrep = op.records.getreplies(partid)
476 partrep = op.records.getreplies(partid)
477 results = partrep['pushkey']
477 results = partrep['pushkey']
478 assert len(results) <= 1
478 assert len(results) <= 1
479 if not results:
479 if not results:
480 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
480 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
481 else:
481 else:
482 ret = int(results[0]['return'])
482 ret = int(results[0]['return'])
483 if ret:
483 if ret:
484 pushop.ui.status(_("updating bookmark %s\n") % book)
484 pushop.ui.status(_("updating bookmark %s\n") % book)
485 else:
485 else:
486 pushop.ui.warn(_('updating bookmark %s failed!\n') % book)
486 pushop.ui.warn(_('updating bookmark %s failed!\n') % book)
487 return handlereply
487 return handlereply
488
488
489
489
490 def _pushbundle2(pushop):
490 def _pushbundle2(pushop):
491 """push data to the remote using bundle2
491 """push data to the remote using bundle2
492
492
493 The only currently supported type of data is changegroup but this will
493 The only currently supported type of data is changegroup but this will
494 evolve in the future."""
494 evolve in the future."""
495 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
495 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
496 # create reply capability
496 # create reply capability
497 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo))
497 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo))
498 bundler.newpart('b2x:replycaps', data=capsblob)
498 bundler.newpart('b2x:replycaps', data=capsblob)
499 replyhandlers = []
499 replyhandlers = []
500 for partgenname in b2partsgenorder:
500 for partgenname in b2partsgenorder:
501 partgen = b2partsgenmapping[partgenname]
501 partgen = b2partsgenmapping[partgenname]
502 ret = partgen(pushop, bundler)
502 ret = partgen(pushop, bundler)
503 if callable(ret):
503 if callable(ret):
504 replyhandlers.append(ret)
504 replyhandlers.append(ret)
505 # do not push if nothing to push
505 # do not push if nothing to push
506 if bundler.nbparts <= 1:
506 if bundler.nbparts <= 1:
507 return
507 return
508 stream = util.chunkbuffer(bundler.getchunks())
508 stream = util.chunkbuffer(bundler.getchunks())
509 try:
509 try:
510 reply = pushop.remote.unbundle(stream, ['force'], 'push')
510 reply = pushop.remote.unbundle(stream, ['force'], 'push')
511 except error.BundleValueError, exc:
511 except error.BundleValueError, exc:
512 raise util.Abort('missing support for %s' % exc)
512 raise util.Abort('missing support for %s' % exc)
513 try:
513 try:
514 op = bundle2.processbundle(pushop.repo, reply)
514 op = bundle2.processbundle(pushop.repo, reply)
515 except error.BundleValueError, exc:
515 except error.BundleValueError, exc:
516 raise util.Abort('missing support for %s' % exc)
516 raise util.Abort('missing support for %s' % exc)
517 for rephand in replyhandlers:
517 for rephand in replyhandlers:
518 rephand(op)
518 rephand(op)
519
519
520 def _pushchangeset(pushop):
520 def _pushchangeset(pushop):
521 """Make the actual push of changeset bundle to remote repo"""
521 """Make the actual push of changeset bundle to remote repo"""
522 if 'changesets' in pushop.stepsdone:
522 if 'changesets' in pushop.stepsdone:
523 return
523 return
524 pushop.stepsdone.add('changesets')
524 pushop.stepsdone.add('changesets')
525 if not _pushcheckoutgoing(pushop):
525 if not _pushcheckoutgoing(pushop):
526 return
526 return
527 pushop.repo.prepushoutgoinghooks(pushop.repo,
527 pushop.repo.prepushoutgoinghooks(pushop.repo,
528 pushop.remote,
528 pushop.remote,
529 pushop.outgoing)
529 pushop.outgoing)
530 outgoing = pushop.outgoing
530 outgoing = pushop.outgoing
531 unbundle = pushop.remote.capable('unbundle')
531 unbundle = pushop.remote.capable('unbundle')
532 # TODO: get bundlecaps from remote
532 # TODO: get bundlecaps from remote
533 bundlecaps = None
533 bundlecaps = None
534 # create a changegroup from local
534 # create a changegroup from local
535 if pushop.revs is None and not (outgoing.excluded
535 if pushop.revs is None and not (outgoing.excluded
536 or pushop.repo.changelog.filteredrevs):
536 or pushop.repo.changelog.filteredrevs):
537 # push everything,
537 # push everything,
538 # use the fast path, no race possible on push
538 # use the fast path, no race possible on push
539 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
539 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
540 cg = changegroup.getsubset(pushop.repo,
540 cg = changegroup.getsubset(pushop.repo,
541 outgoing,
541 outgoing,
542 bundler,
542 bundler,
543 'push',
543 'push',
544 fastpath=True)
544 fastpath=True)
545 else:
545 else:
546 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
546 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
547 bundlecaps)
547 bundlecaps)
548
548
549 # apply changegroup to remote
549 # apply changegroup to remote
550 if unbundle:
550 if unbundle:
551 # local repo finds heads on server, finds out what
551 # local repo finds heads on server, finds out what
552 # revs it must push. once revs transferred, if server
552 # revs it must push. once revs transferred, if server
553 # finds it has different heads (someone else won
553 # finds it has different heads (someone else won
554 # commit/push race), server aborts.
554 # commit/push race), server aborts.
555 if pushop.force:
555 if pushop.force:
556 remoteheads = ['force']
556 remoteheads = ['force']
557 else:
557 else:
558 remoteheads = pushop.remoteheads
558 remoteheads = pushop.remoteheads
559 # ssh: return remote's addchangegroup()
559 # ssh: return remote's addchangegroup()
560 # http: return remote's addchangegroup() or 0 for error
560 # http: return remote's addchangegroup() or 0 for error
561 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
561 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
562 pushop.repo.url())
562 pushop.repo.url())
563 else:
563 else:
564 # we return an integer indicating remote head count
564 # we return an integer indicating remote head count
565 # change
565 # change
566 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
566 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
567 pushop.repo.url())
567 pushop.repo.url())
568
568
569 def _pushsyncphase(pushop):
569 def _pushsyncphase(pushop):
570 """synchronise phase information locally and remotely"""
570 """synchronise phase information locally and remotely"""
571 cheads = pushop.commonheads
571 cheads = pushop.commonheads
572 # even when we don't push, exchanging phase data is useful
572 # even when we don't push, exchanging phase data is useful
573 remotephases = pushop.remote.listkeys('phases')
573 remotephases = pushop.remote.listkeys('phases')
574 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
574 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
575 and remotephases # server supports phases
575 and remotephases # server supports phases
576 and pushop.cgresult is None # nothing was pushed
576 and pushop.cgresult is None # nothing was pushed
577 and remotephases.get('publishing', False)):
577 and remotephases.get('publishing', False)):
578 # When:
578 # When:
579 # - this is a subrepo push
579 # - this is a subrepo push
580 # - and remote support phase
580 # - and remote support phase
581 # - and no changeset was pushed
581 # - and no changeset was pushed
582 # - and remote is publishing
582 # - and remote is publishing
583 # We may be in issue 3871 case!
583 # We may be in issue 3871 case!
584 # We drop the possible phase synchronisation done by
584 # We drop the possible phase synchronisation done by
585 # courtesy to publish changesets possibly locally draft
585 # courtesy to publish changesets possibly locally draft
586 # on the remote.
586 # on the remote.
587 remotephases = {'publishing': 'True'}
587 remotephases = {'publishing': 'True'}
588 if not remotephases: # old server or public only reply from non-publishing
588 if not remotephases: # old server or public only reply from non-publishing
589 _localphasemove(pushop, cheads)
589 _localphasemove(pushop, cheads)
590 # don't push any phase data as there is nothing to push
590 # don't push any phase data as there is nothing to push
591 else:
591 else:
592 ana = phases.analyzeremotephases(pushop.repo, cheads,
592 ana = phases.analyzeremotephases(pushop.repo, cheads,
593 remotephases)
593 remotephases)
594 pheads, droots = ana
594 pheads, droots = ana
595 ### Apply remote phase on local
595 ### Apply remote phase on local
596 if remotephases.get('publishing', False):
596 if remotephases.get('publishing', False):
597 _localphasemove(pushop, cheads)
597 _localphasemove(pushop, cheads)
598 else: # publish = False
598 else: # publish = False
599 _localphasemove(pushop, pheads)
599 _localphasemove(pushop, pheads)
600 _localphasemove(pushop, cheads, phases.draft)
600 _localphasemove(pushop, cheads, phases.draft)
601 ### Apply local phase on remote
601 ### Apply local phase on remote
602
602
603 if pushop.cgresult:
603 if pushop.cgresult:
604 if 'phases' in pushop.stepsdone:
604 if 'phases' in pushop.stepsdone:
605 # phases already pushed though bundle2
605 # phases already pushed though bundle2
606 return
606 return
607 outdated = pushop.outdatedphases
607 outdated = pushop.outdatedphases
608 else:
608 else:
609 outdated = pushop.fallbackoutdatedphases
609 outdated = pushop.fallbackoutdatedphases
610
610
611 pushop.stepsdone.add('phases')
611 pushop.stepsdone.add('phases')
612
612
613 # filter heads already turned public by the push
613 # filter heads already turned public by the push
614 outdated = [c for c in outdated if c.node() not in pheads]
614 outdated = [c for c in outdated if c.node() not in pheads]
615 b2caps = bundle2.bundle2caps(pushop.remote)
615 b2caps = bundle2.bundle2caps(pushop.remote)
616 if 'b2x:pushkey' in b2caps:
616 if 'b2x:pushkey' in b2caps:
617 # server supports bundle2, let's do a batched push through it
617 # server supports bundle2, let's do a batched push through it
618 #
618 #
619 # This will eventually be unified with the changesets bundle2 push
619 # This will eventually be unified with the changesets bundle2 push
620 bundler = bundle2.bundle20(pushop.ui, b2caps)
620 bundler = bundle2.bundle20(pushop.ui, b2caps)
621 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo))
621 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo))
622 bundler.newpart('b2x:replycaps', data=capsblob)
622 bundler.newpart('b2x:replycaps', data=capsblob)
623 part2node = []
623 part2node = []
624 enc = pushkey.encode
624 enc = pushkey.encode
625 for newremotehead in outdated:
625 for newremotehead in outdated:
626 part = bundler.newpart('b2x:pushkey')
626 part = bundler.newpart('b2x:pushkey')
627 part.addparam('namespace', enc('phases'))
627 part.addparam('namespace', enc('phases'))
628 part.addparam('key', enc(newremotehead.hex()))
628 part.addparam('key', enc(newremotehead.hex()))
629 part.addparam('old', enc(str(phases.draft)))
629 part.addparam('old', enc(str(phases.draft)))
630 part.addparam('new', enc(str(phases.public)))
630 part.addparam('new', enc(str(phases.public)))
631 part2node.append((part.id, newremotehead))
631 part2node.append((part.id, newremotehead))
632 stream = util.chunkbuffer(bundler.getchunks())
632 stream = util.chunkbuffer(bundler.getchunks())
633 try:
633 try:
634 reply = pushop.remote.unbundle(stream, ['force'], 'push')
634 reply = pushop.remote.unbundle(stream, ['force'], 'push')
635 op = bundle2.processbundle(pushop.repo, reply)
635 op = bundle2.processbundle(pushop.repo, reply)
636 except error.BundleValueError, exc:
636 except error.BundleValueError, exc:
637 raise util.Abort('missing support for %s' % exc)
637 raise util.Abort('missing support for %s' % exc)
638 for partid, node in part2node:
638 for partid, node in part2node:
639 partrep = op.records.getreplies(partid)
639 partrep = op.records.getreplies(partid)
640 results = partrep['pushkey']
640 results = partrep['pushkey']
641 assert len(results) <= 1
641 assert len(results) <= 1
642 msg = None
642 msg = None
643 if not results:
643 if not results:
644 msg = _('server ignored update of %s to public!\n') % node
644 msg = _('server ignored update of %s to public!\n') % node
645 elif not int(results[0]['return']):
645 elif not int(results[0]['return']):
646 msg = _('updating %s to public failed!\n') % node
646 msg = _('updating %s to public failed!\n') % node
647 if msg is not None:
647 if msg is not None:
648 pushop.ui.warn(msg)
648 pushop.ui.warn(msg)
649
649
650 else:
650 else:
651 # fallback to independant pushkey command
651 # fallback to independant pushkey command
652 for newremotehead in outdated:
652 for newremotehead in outdated:
653 r = pushop.remote.pushkey('phases',
653 r = pushop.remote.pushkey('phases',
654 newremotehead.hex(),
654 newremotehead.hex(),
655 str(phases.draft),
655 str(phases.draft),
656 str(phases.public))
656 str(phases.public))
657 if not r:
657 if not r:
658 pushop.ui.warn(_('updating %s to public failed!\n')
658 pushop.ui.warn(_('updating %s to public failed!\n')
659 % newremotehead)
659 % newremotehead)
660
660
661 def _localphasemove(pushop, nodes, phase=phases.public):
661 def _localphasemove(pushop, nodes, phase=phases.public):
662 """move <nodes> to <phase> in the local source repo"""
662 """move <nodes> to <phase> in the local source repo"""
663 if pushop.locallocked:
663 if pushop.locallocked:
664 tr = pushop.repo.transaction('push-phase-sync')
664 tr = pushop.repo.transaction('push-phase-sync')
665 try:
665 try:
666 phases.advanceboundary(pushop.repo, tr, phase, nodes)
666 phases.advanceboundary(pushop.repo, tr, phase, nodes)
667 tr.close()
667 tr.close()
668 finally:
668 finally:
669 tr.release()
669 tr.release()
670 else:
670 else:
671 # repo is not locked, do not change any phases!
671 # repo is not locked, do not change any phases!
672 # Informs the user that phases should have been moved when
672 # Informs the user that phases should have been moved when
673 # applicable.
673 # applicable.
674 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
674 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
675 phasestr = phases.phasenames[phase]
675 phasestr = phases.phasenames[phase]
676 if actualmoves:
676 if actualmoves:
677 pushop.ui.status(_('cannot lock source repo, skipping '
677 pushop.ui.status(_('cannot lock source repo, skipping '
678 'local %s phase update\n') % phasestr)
678 'local %s phase update\n') % phasestr)
679
679
680 def _pushobsolete(pushop):
680 def _pushobsolete(pushop):
681 """utility function to push obsolete markers to a remote"""
681 """utility function to push obsolete markers to a remote"""
682 if 'obsmarkers' in pushop.stepsdone:
682 if 'obsmarkers' in pushop.stepsdone:
683 return
683 return
684 pushop.ui.debug('try to push obsolete markers to remote\n')
684 pushop.ui.debug('try to push obsolete markers to remote\n')
685 repo = pushop.repo
685 repo = pushop.repo
686 remote = pushop.remote
686 remote = pushop.remote
687 pushop.stepsdone.add('obsmarkers')
687 pushop.stepsdone.add('obsmarkers')
688 if pushop.outobsmarkers:
688 if pushop.outobsmarkers:
689 rslts = []
689 rslts = []
690 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
690 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
691 for key in sorted(remotedata, reverse=True):
691 for key in sorted(remotedata, reverse=True):
692 # reverse sort to ensure we end with dump0
692 # reverse sort to ensure we end with dump0
693 data = remotedata[key]
693 data = remotedata[key]
694 rslts.append(remote.pushkey('obsolete', key, '', data))
694 rslts.append(remote.pushkey('obsolete', key, '', data))
695 if [r for r in rslts if not r]:
695 if [r for r in rslts if not r]:
696 msg = _('failed to push some obsolete markers!\n')
696 msg = _('failed to push some obsolete markers!\n')
697 repo.ui.warn(msg)
697 repo.ui.warn(msg)
698
698
699 def _pushbookmark(pushop):
699 def _pushbookmark(pushop):
700 """Update bookmark position on remote"""
700 """Update bookmark position on remote"""
701 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
701 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
702 return
702 return
703 pushop.stepsdone.add('bookmarks')
703 pushop.stepsdone.add('bookmarks')
704 ui = pushop.ui
704 ui = pushop.ui
705 remote = pushop.remote
705 remote = pushop.remote
706 for b, old, new in pushop.outbookmarks:
706 for b, old, new in pushop.outbookmarks:
707 if remote.pushkey('bookmarks', b, old, new):
707 if remote.pushkey('bookmarks', b, old, new):
708 ui.status(_("updating bookmark %s\n") % b)
708 ui.status(_("updating bookmark %s\n") % b)
709 else:
709 else:
710 ui.warn(_('updating bookmark %s failed!\n') % b)
710 ui.warn(_('updating bookmark %s failed!\n') % b)
711
711
712 class pulloperation(object):
712 class pulloperation(object):
713 """A object that represent a single pull operation
713 """A object that represent a single pull operation
714
714
715 It purpose is to carry push related state and very common operation.
715 It purpose is to carry push related state and very common operation.
716
716
717 A new should be created at the beginning of each pull and discarded
717 A new should be created at the beginning of each pull and discarded
718 afterward.
718 afterward.
719 """
719 """
720
720
721 def __init__(self, repo, remote, heads=None, force=False):
721 def __init__(self, repo, remote, heads=None, force=False):
722 # repo we pull into
722 # repo we pull into
723 self.repo = repo
723 self.repo = repo
724 # repo we pull from
724 # repo we pull from
725 self.remote = remote
725 self.remote = remote
726 # revision we try to pull (None is "all")
726 # revision we try to pull (None is "all")
727 self.heads = heads
727 self.heads = heads
728 # do we force pull?
728 # do we force pull?
729 self.force = force
729 self.force = force
730 # the name the pull transaction
730 # the name the pull transaction
731 self._trname = 'pull\n' + util.hidepassword(remote.url())
731 self._trname = 'pull\n' + util.hidepassword(remote.url())
732 # hold the transaction once created
732 # hold the transaction once created
733 self._tr = None
733 self._tr = None
734 # set of common changeset between local and remote before pull
734 # set of common changeset between local and remote before pull
735 self.common = None
735 self.common = None
736 # set of pulled head
736 # set of pulled head
737 self.rheads = None
737 self.rheads = None
738 # list of missing changeset to fetch remotely
738 # list of missing changeset to fetch remotely
739 self.fetch = None
739 self.fetch = None
740 # result of changegroup pulling (used as return code by pull)
740 # result of changegroup pulling (used as return code by pull)
741 self.cgresult = None
741 self.cgresult = None
742 # list of step remaining todo (related to future bundle2 usage)
742 # list of step remaining todo (related to future bundle2 usage)
743 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
743 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
744
744
745 @util.propertycache
745 @util.propertycache
746 def pulledsubset(self):
746 def pulledsubset(self):
747 """heads of the set of changeset target by the pull"""
747 """heads of the set of changeset target by the pull"""
748 # compute target subset
748 # compute target subset
749 if self.heads is None:
749 if self.heads is None:
750 # We pulled every thing possible
750 # We pulled every thing possible
751 # sync on everything common
751 # sync on everything common
752 c = set(self.common)
752 c = set(self.common)
753 ret = list(self.common)
753 ret = list(self.common)
754 for n in self.rheads:
754 for n in self.rheads:
755 if n not in c:
755 if n not in c:
756 ret.append(n)
756 ret.append(n)
757 return ret
757 return ret
758 else:
758 else:
759 # We pulled a specific subset
759 # We pulled a specific subset
760 # sync on this subset
760 # sync on this subset
761 return self.heads
761 return self.heads
762
762
763 def gettransaction(self):
763 def gettransaction(self):
764 """get appropriate pull transaction, creating it if needed"""
764 """get appropriate pull transaction, creating it if needed"""
765 if self._tr is None:
765 if self._tr is None:
766 self._tr = self.repo.transaction(self._trname)
766 self._tr = self.repo.transaction(self._trname)
767 return self._tr
767 return self._tr
768
768
769 def closetransaction(self):
769 def closetransaction(self):
770 """close transaction if created"""
770 """close transaction if created"""
771 if self._tr is not None:
771 if self._tr is not None:
772 self._tr.close()
772 self._tr.close()
773
773
774 def releasetransaction(self):
774 def releasetransaction(self):
775 """release transaction if created"""
775 """release transaction if created"""
776 if self._tr is not None:
776 if self._tr is not None:
777 self._tr.release()
777 self._tr.release()
778
778
779 def pull(repo, remote, heads=None, force=False):
779 def pull(repo, remote, heads=None, force=False):
780 pullop = pulloperation(repo, remote, heads, force)
780 pullop = pulloperation(repo, remote, heads, force)
781 if pullop.remote.local():
781 if pullop.remote.local():
782 missing = set(pullop.remote.requirements) - pullop.repo.supported
782 missing = set(pullop.remote.requirements) - pullop.repo.supported
783 if missing:
783 if missing:
784 msg = _("required features are not"
784 msg = _("required features are not"
785 " supported in the destination:"
785 " supported in the destination:"
786 " %s") % (', '.join(sorted(missing)))
786 " %s") % (', '.join(sorted(missing)))
787 raise util.Abort(msg)
787 raise util.Abort(msg)
788
788
789 lock = pullop.repo.lock()
789 lock = pullop.repo.lock()
790 try:
790 try:
791 _pulldiscovery(pullop)
791 _pulldiscovery(pullop)
792 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
792 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
793 and pullop.remote.capable('bundle2-exp')):
793 and pullop.remote.capable('bundle2-exp')):
794 _pullbundle2(pullop)
794 _pullbundle2(pullop)
795 if 'changegroup' in pullop.todosteps:
795 if 'changegroup' in pullop.todosteps:
796 _pullchangeset(pullop)
796 _pullchangeset(pullop)
797 if 'phases' in pullop.todosteps:
797 if 'phases' in pullop.todosteps:
798 _pullphase(pullop)
798 _pullphase(pullop)
799 if 'obsmarkers' in pullop.todosteps:
799 if 'obsmarkers' in pullop.todosteps:
800 _pullobsolete(pullop)
800 _pullobsolete(pullop)
801 pullop.closetransaction()
801 pullop.closetransaction()
802 finally:
802 finally:
803 pullop.releasetransaction()
803 pullop.releasetransaction()
804 lock.release()
804 lock.release()
805
805
806 return pullop.cgresult
806 return pullop.cgresult
807
807
808 def _pulldiscovery(pullop):
808 def _pulldiscovery(pullop):
809 """discovery phase for the pull
809 """discovery phase for the pull
810
810
811 Current handle changeset discovery only, will change handle all discovery
811 Current handle changeset discovery only, will change handle all discovery
812 at some point."""
812 at some point."""
813 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
813 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
814 pullop.remote,
814 pullop.remote,
815 heads=pullop.heads,
815 heads=pullop.heads,
816 force=pullop.force)
816 force=pullop.force)
817 pullop.common, pullop.fetch, pullop.rheads = tmp
817 pullop.common, pullop.fetch, pullop.rheads = tmp
818
818
819 def _pullbundle2(pullop):
819 def _pullbundle2(pullop):
820 """pull data using bundle2
820 """pull data using bundle2
821
821
822 For now, the only supported data are changegroup."""
822 For now, the only supported data are changegroup."""
823 remotecaps = bundle2.bundle2caps(pullop.remote)
823 remotecaps = bundle2.bundle2caps(pullop.remote)
824 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
824 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
825 # pulling changegroup
825 # pulling changegroup
826 pullop.todosteps.remove('changegroup')
826 pullop.todosteps.remove('changegroup')
827
827
828 kwargs['common'] = pullop.common
828 kwargs['common'] = pullop.common
829 kwargs['heads'] = pullop.heads or pullop.rheads
829 kwargs['heads'] = pullop.heads or pullop.rheads
830 kwargs['cg'] = pullop.fetch
830 kwargs['cg'] = pullop.fetch
831 if 'b2x:listkeys' in remotecaps:
831 if 'b2x:listkeys' in remotecaps:
832 kwargs['listkeys'] = ['phase']
832 kwargs['listkeys'] = ['phase']
833 if not pullop.fetch:
833 if not pullop.fetch:
834 pullop.repo.ui.status(_("no changes found\n"))
834 pullop.repo.ui.status(_("no changes found\n"))
835 pullop.cgresult = 0
835 pullop.cgresult = 0
836 else:
836 else:
837 if pullop.heads is None and list(pullop.common) == [nullid]:
837 if pullop.heads is None and list(pullop.common) == [nullid]:
838 pullop.repo.ui.status(_("requesting all changes\n"))
838 pullop.repo.ui.status(_("requesting all changes\n"))
839 if obsolete._enabled:
839 if obsolete._enabled:
840 remoteversions = bundle2.obsmarkersversion(remotecaps)
840 remoteversions = bundle2.obsmarkersversion(remotecaps)
841 if obsolete.commonversion(remoteversions) is not None:
841 if obsolete.commonversion(remoteversions) is not None:
842 kwargs['obsmarkers'] = True
842 kwargs['obsmarkers'] = True
843 pullop.todosteps.remove('obsmarkers')
843 pullop.todosteps.remove('obsmarkers')
844 _pullbundle2extraprepare(pullop, kwargs)
844 _pullbundle2extraprepare(pullop, kwargs)
845 if kwargs.keys() == ['format']:
845 if kwargs.keys() == ['format']:
846 return # nothing to pull
846 return # nothing to pull
847 bundle = pullop.remote.getbundle('pull', **kwargs)
847 bundle = pullop.remote.getbundle('pull', **kwargs)
848 try:
848 try:
849 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
849 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
850 except error.BundleValueError, exc:
850 except error.BundleValueError, exc:
851 raise util.Abort('missing support for %s' % exc)
851 raise util.Abort('missing support for %s' % exc)
852
852
853 if pullop.fetch:
853 if pullop.fetch:
854 assert len(op.records['changegroup']) == 1
854 assert len(op.records['changegroup']) == 1
855 pullop.cgresult = op.records['changegroup'][0]['return']
855 pullop.cgresult = op.records['changegroup'][0]['return']
856
856
857 # processing phases change
857 # processing phases change
858 for namespace, value in op.records['listkeys']:
858 for namespace, value in op.records['listkeys']:
859 if namespace == 'phases':
859 if namespace == 'phases':
860 _pullapplyphases(pullop, value)
860 _pullapplyphases(pullop, value)
861
861
862 def _pullbundle2extraprepare(pullop, kwargs):
862 def _pullbundle2extraprepare(pullop, kwargs):
863 """hook function so that extensions can extend the getbundle call"""
863 """hook function so that extensions can extend the getbundle call"""
864 pass
864 pass
865
865
866 def _pullchangeset(pullop):
866 def _pullchangeset(pullop):
867 """pull changeset from unbundle into the local repo"""
867 """pull changeset from unbundle into the local repo"""
868 # We delay the open of the transaction as late as possible so we
868 # We delay the open of the transaction as late as possible so we
869 # don't open transaction for nothing or you break future useful
869 # don't open transaction for nothing or you break future useful
870 # rollback call
870 # rollback call
871 pullop.todosteps.remove('changegroup')
871 pullop.todosteps.remove('changegroup')
872 if not pullop.fetch:
872 if not pullop.fetch:
873 pullop.repo.ui.status(_("no changes found\n"))
873 pullop.repo.ui.status(_("no changes found\n"))
874 pullop.cgresult = 0
874 pullop.cgresult = 0
875 return
875 return
876 pullop.gettransaction()
876 pullop.gettransaction()
877 if pullop.heads is None and list(pullop.common) == [nullid]:
877 if pullop.heads is None and list(pullop.common) == [nullid]:
878 pullop.repo.ui.status(_("requesting all changes\n"))
878 pullop.repo.ui.status(_("requesting all changes\n"))
879 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
879 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
880 # issue1320, avoid a race if remote changed after discovery
880 # issue1320, avoid a race if remote changed after discovery
881 pullop.heads = pullop.rheads
881 pullop.heads = pullop.rheads
882
882
883 if pullop.remote.capable('getbundle'):
883 if pullop.remote.capable('getbundle'):
884 # TODO: get bundlecaps from remote
884 # TODO: get bundlecaps from remote
885 cg = pullop.remote.getbundle('pull', common=pullop.common,
885 cg = pullop.remote.getbundle('pull', common=pullop.common,
886 heads=pullop.heads or pullop.rheads)
886 heads=pullop.heads or pullop.rheads)
887 elif pullop.heads is None:
887 elif pullop.heads is None:
888 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
888 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
889 elif not pullop.remote.capable('changegroupsubset'):
889 elif not pullop.remote.capable('changegroupsubset'):
890 raise util.Abort(_("partial pull cannot be done because "
890 raise util.Abort(_("partial pull cannot be done because "
891 "other repository doesn't support "
891 "other repository doesn't support "
892 "changegroupsubset."))
892 "changegroupsubset."))
893 else:
893 else:
894 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
894 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
895 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
895 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
896 pullop.remote.url())
896 pullop.remote.url())
897
897
898 def _pullphase(pullop):
898 def _pullphase(pullop):
899 # Get remote phases data from remote
899 # Get remote phases data from remote
900 remotephases = pullop.remote.listkeys('phases')
900 remotephases = pullop.remote.listkeys('phases')
901 _pullapplyphases(pullop, remotephases)
901 _pullapplyphases(pullop, remotephases)
902
902
903 def _pullapplyphases(pullop, remotephases):
903 def _pullapplyphases(pullop, remotephases):
904 """apply phase movement from observed remote state"""
904 """apply phase movement from observed remote state"""
905 pullop.todosteps.remove('phases')
905 pullop.todosteps.remove('phases')
906 publishing = bool(remotephases.get('publishing', False))
906 publishing = bool(remotephases.get('publishing', False))
907 if remotephases and not publishing:
907 if remotephases and not publishing:
908 # remote is new and unpublishing
908 # remote is new and unpublishing
909 pheads, _dr = phases.analyzeremotephases(pullop.repo,
909 pheads, _dr = phases.analyzeremotephases(pullop.repo,
910 pullop.pulledsubset,
910 pullop.pulledsubset,
911 remotephases)
911 remotephases)
912 dheads = pullop.pulledsubset
912 dheads = pullop.pulledsubset
913 else:
913 else:
914 # Remote is old or publishing all common changesets
914 # Remote is old or publishing all common changesets
915 # should be seen as public
915 # should be seen as public
916 pheads = pullop.pulledsubset
916 pheads = pullop.pulledsubset
917 dheads = []
917 dheads = []
918 unfi = pullop.repo.unfiltered()
918 unfi = pullop.repo.unfiltered()
919 phase = unfi._phasecache.phase
919 phase = unfi._phasecache.phase
920 rev = unfi.changelog.nodemap.get
920 rev = unfi.changelog.nodemap.get
921 public = phases.public
921 public = phases.public
922 draft = phases.draft
922 draft = phases.draft
923
923
924 # exclude changesets already public locally and update the others
924 # exclude changesets already public locally and update the others
925 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
925 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
926 if pheads:
926 if pheads:
927 tr = pullop.gettransaction()
927 tr = pullop.gettransaction()
928 phases.advanceboundary(pullop.repo, tr, public, pheads)
928 phases.advanceboundary(pullop.repo, tr, public, pheads)
929
929
930 # exclude changesets already draft locally and update the others
930 # exclude changesets already draft locally and update the others
931 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
931 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
932 if dheads:
932 if dheads:
933 tr = pullop.gettransaction()
933 tr = pullop.gettransaction()
934 phases.advanceboundary(pullop.repo, tr, draft, dheads)
934 phases.advanceboundary(pullop.repo, tr, draft, dheads)
935
935
936 def _pullobsolete(pullop):
936 def _pullobsolete(pullop):
937 """utility function to pull obsolete markers from a remote
937 """utility function to pull obsolete markers from a remote
938
938
939 The `gettransaction` is function that return the pull transaction, creating
939 The `gettransaction` is function that return the pull transaction, creating
940 one if necessary. We return the transaction to inform the calling code that
940 one if necessary. We return the transaction to inform the calling code that
941 a new transaction have been created (when applicable).
941 a new transaction have been created (when applicable).
942
942
943 Exists mostly to allow overriding for experimentation purpose"""
943 Exists mostly to allow overriding for experimentation purpose"""
944 pullop.todosteps.remove('obsmarkers')
944 pullop.todosteps.remove('obsmarkers')
945 tr = None
945 tr = None
946 if obsolete._enabled:
946 if obsolete._enabled:
947 pullop.repo.ui.debug('fetching remote obsolete markers\n')
947 pullop.repo.ui.debug('fetching remote obsolete markers\n')
948 remoteobs = pullop.remote.listkeys('obsolete')
948 remoteobs = pullop.remote.listkeys('obsolete')
949 if 'dump0' in remoteobs:
949 if 'dump0' in remoteobs:
950 tr = pullop.gettransaction()
950 tr = pullop.gettransaction()
951 for key in sorted(remoteobs, reverse=True):
951 for key in sorted(remoteobs, reverse=True):
952 if key.startswith('dump'):
952 if key.startswith('dump'):
953 data = base85.b85decode(remoteobs[key])
953 data = base85.b85decode(remoteobs[key])
954 pullop.repo.obsstore.mergemarkers(tr, data)
954 pullop.repo.obsstore.mergemarkers(tr, data)
955 pullop.repo.invalidatevolatilesets()
955 pullop.repo.invalidatevolatilesets()
956 return tr
956 return tr
957
957
958 def caps20to10(repo):
958 def caps20to10(repo):
959 """return a set with appropriate options to use bundle20 during getbundle"""
959 """return a set with appropriate options to use bundle20 during getbundle"""
960 caps = set(['HG2X'])
960 caps = set(['HG2X'])
961 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
961 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
962 caps.add('bundle2=' + urllib.quote(capsblob))
962 caps.add('bundle2=' + urllib.quote(capsblob))
963 return caps
963 return caps
964
964
965 # List of names of steps to perform for a bundle2 for getbundle, order matters.
965 # List of names of steps to perform for a bundle2 for getbundle, order matters.
966 getbundle2partsorder = []
966 getbundle2partsorder = []
967
967
968 # Mapping between step name and function
968 # Mapping between step name and function
969 #
969 #
970 # This exists to help extensions wrap steps if necessary
970 # This exists to help extensions wrap steps if necessary
971 getbundle2partsmapping = {}
971 getbundle2partsmapping = {}
972
972
973 def getbundle2partsgenerator(stepname):
973 def getbundle2partsgenerator(stepname):
974 """decorator for function generating bundle2 part for getbundle
974 """decorator for function generating bundle2 part for getbundle
975
975
976 The function is added to the step -> function mapping and appended to the
976 The function is added to the step -> function mapping and appended to the
977 list of steps. Beware that decorated functions will be added in order
977 list of steps. Beware that decorated functions will be added in order
978 (this may matter).
978 (this may matter).
979
979
980 You can only use this decorator for new steps, if you want to wrap a step
980 You can only use this decorator for new steps, if you want to wrap a step
981 from an extension, attack the getbundle2partsmapping dictionary directly."""
981 from an extension, attack the getbundle2partsmapping dictionary directly."""
982 def dec(func):
982 def dec(func):
983 assert stepname not in getbundle2partsmapping
983 assert stepname not in getbundle2partsmapping
984 getbundle2partsmapping[stepname] = func
984 getbundle2partsmapping[stepname] = func
985 getbundle2partsorder.append(stepname)
985 getbundle2partsorder.append(stepname)
986 return func
986 return func
987 return dec
987 return dec
988
988
989 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
989 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
990 **kwargs):
990 **kwargs):
991 """return a full bundle (with potentially multiple kind of parts)
991 """return a full bundle (with potentially multiple kind of parts)
992
992
993 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
993 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
994 passed. For now, the bundle can contain only changegroup, but this will
994 passed. For now, the bundle can contain only changegroup, but this will
995 changes when more part type will be available for bundle2.
995 changes when more part type will be available for bundle2.
996
996
997 This is different from changegroup.getchangegroup that only returns an HG10
997 This is different from changegroup.getchangegroup that only returns an HG10
998 changegroup bundle. They may eventually get reunited in the future when we
998 changegroup bundle. They may eventually get reunited in the future when we
999 have a clearer idea of the API we what to query different data.
999 have a clearer idea of the API we what to query different data.
1000
1000
1001 The implementation is at a very early stage and will get massive rework
1001 The implementation is at a very early stage and will get massive rework
1002 when the API of bundle is refined.
1002 when the API of bundle is refined.
1003 """
1003 """
1004 # bundle10 case
1004 # bundle10 case
1005 if bundlecaps is None or 'HG2X' not in bundlecaps:
1005 if bundlecaps is None or 'HG2X' not in bundlecaps:
1006 if bundlecaps and not kwargs.get('cg', True):
1006 if bundlecaps and not kwargs.get('cg', True):
1007 raise ValueError(_('request for bundle10 must include changegroup'))
1007 raise ValueError(_('request for bundle10 must include changegroup'))
1008
1008
1009 if kwargs:
1009 if kwargs:
1010 raise ValueError(_('unsupported getbundle arguments: %s')
1010 raise ValueError(_('unsupported getbundle arguments: %s')
1011 % ', '.join(sorted(kwargs.keys())))
1011 % ', '.join(sorted(kwargs.keys())))
1012 return changegroup.getchangegroup(repo, source, heads=heads,
1012 return changegroup.getchangegroup(repo, source, heads=heads,
1013 common=common, bundlecaps=bundlecaps)
1013 common=common, bundlecaps=bundlecaps)
1014
1014
1015 # bundle20 case
1015 # bundle20 case
1016 b2caps = {}
1016 b2caps = {}
1017 for bcaps in bundlecaps:
1017 for bcaps in bundlecaps:
1018 if bcaps.startswith('bundle2='):
1018 if bcaps.startswith('bundle2='):
1019 blob = urllib.unquote(bcaps[len('bundle2='):])
1019 blob = urllib.unquote(bcaps[len('bundle2='):])
1020 b2caps.update(bundle2.decodecaps(blob))
1020 b2caps.update(bundle2.decodecaps(blob))
1021 bundler = bundle2.bundle20(repo.ui, b2caps)
1021 bundler = bundle2.bundle20(repo.ui, b2caps)
1022
1022
1023 for name in getbundle2partsorder:
1023 for name in getbundle2partsorder:
1024 func = getbundle2partsmapping[name]
1024 func = getbundle2partsmapping[name]
1025 kwargs['heads'] = heads
1025 kwargs['heads'] = heads
1026 kwargs['common'] = common
1026 kwargs['common'] = common
1027 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1027 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1028 **kwargs)
1028 **kwargs)
1029
1029
1030 return util.chunkbuffer(bundler.getchunks())
1030 return util.chunkbuffer(bundler.getchunks())
1031
1031
1032 @getbundle2partsgenerator('changegroup')
1032 @getbundle2partsgenerator('changegroup')
1033 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1033 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1034 b2caps=None, heads=None, common=None, **kwargs):
1034 b2caps=None, heads=None, common=None, **kwargs):
1035 """add a changegroup part to the requested bundle"""
1035 """add a changegroup part to the requested bundle"""
1036 cg = None
1036 cg = None
1037 if kwargs.get('cg', True):
1037 if kwargs.get('cg', True):
1038 # build changegroup bundle here.
1038 # build changegroup bundle here.
1039 cg = changegroup.getchangegroup(repo, source, heads=heads,
1039 cg = changegroup.getchangegroup(repo, source, heads=heads,
1040 common=common, bundlecaps=bundlecaps)
1040 common=common, bundlecaps=bundlecaps)
1041
1041
1042 if cg:
1042 if cg:
1043 bundler.newpart('b2x:changegroup', data=cg.getchunks())
1043 bundler.newpart('b2x:changegroup', data=cg.getchunks())
1044
1044
1045 @getbundle2partsgenerator('listkeys')
1045 @getbundle2partsgenerator('listkeys')
1046 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1046 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1047 b2caps=None, **kwargs):
1047 b2caps=None, **kwargs):
1048 """add parts containing listkeys namespaces to the requested bundle"""
1048 """add parts containing listkeys namespaces to the requested bundle"""
1049 listkeys = kwargs.get('listkeys', ())
1049 listkeys = kwargs.get('listkeys', ())
1050 for namespace in listkeys:
1050 for namespace in listkeys:
1051 part = bundler.newpart('b2x:listkeys')
1051 part = bundler.newpart('b2x:listkeys')
1052 part.addparam('namespace', namespace)
1052 part.addparam('namespace', namespace)
1053 keys = repo.listkeys(namespace).items()
1053 keys = repo.listkeys(namespace).items()
1054 part.data = pushkey.encodekeys(keys)
1054 part.data = pushkey.encodekeys(keys)
1055
1055
1056 @getbundle2partsgenerator('obsmarkers')
1056 @getbundle2partsgenerator('obsmarkers')
1057 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1057 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1058 b2caps=None, heads=None, **kwargs):
1058 b2caps=None, heads=None, **kwargs):
1059 """add an obsolescence markers part to the requested bundle"""
1059 """add an obsolescence markers part to the requested bundle"""
1060 if kwargs.get('obsmarkers', False):
1060 if kwargs.get('obsmarkers', False):
1061 if heads is None:
1061 if heads is None:
1062 heads = repo.heads()
1062 heads = repo.heads()
1063 subset = [c.node() for c in repo.set('::%ln', heads)]
1063 subset = [c.node() for c in repo.set('::%ln', heads)]
1064 markers = repo.obsstore.relevantmarkers(subset)
1064 markers = repo.obsstore.relevantmarkers(subset)
1065 buildobsmarkerspart(bundler, markers)
1065 buildobsmarkerspart(bundler, markers)
1066
1066
1067 @getbundle2partsgenerator('extra')
1067 @getbundle2partsgenerator('extra')
1068 def _getbundleextrapart(bundler, repo, source, bundlecaps=None,
1068 def _getbundleextrapart(bundler, repo, source, bundlecaps=None,
1069 b2caps=None, **kwargs):
1069 b2caps=None, **kwargs):
1070 """hook function to let extensions add parts to the requested bundle"""
1070 """hook function to let extensions add parts to the requested bundle"""
1071 pass
1071 pass
1072
1072
1073 def check_heads(repo, their_heads, context):
1073 def check_heads(repo, their_heads, context):
1074 """check if the heads of a repo have been modified
1074 """check if the heads of a repo have been modified
1075
1075
1076 Used by peer for unbundling.
1076 Used by peer for unbundling.
1077 """
1077 """
1078 heads = repo.heads()
1078 heads = repo.heads()
1079 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1079 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1080 if not (their_heads == ['force'] or their_heads == heads or
1080 if not (their_heads == ['force'] or their_heads == heads or
1081 their_heads == ['hashed', heads_hash]):
1081 their_heads == ['hashed', heads_hash]):
1082 # someone else committed/pushed/unbundled while we
1082 # someone else committed/pushed/unbundled while we
1083 # were transferring data
1083 # were transferring data
1084 raise error.PushRaced('repository changed while %s - '
1084 raise error.PushRaced('repository changed while %s - '
1085 'please try again' % context)
1085 'please try again' % context)
1086
1086
1087 def unbundle(repo, cg, heads, source, url):
1087 def unbundle(repo, cg, heads, source, url):
1088 """Apply a bundle to a repo.
1088 """Apply a bundle to a repo.
1089
1089
1090 this function makes sure the repo is locked during the application and have
1090 this function makes sure the repo is locked during the application and have
1091 mechanism to check that no push race occurred between the creation of the
1091 mechanism to check that no push race occurred between the creation of the
1092 bundle and its application.
1092 bundle and its application.
1093
1093
1094 If the push was raced as PushRaced exception is raised."""
1094 If the push was raced as PushRaced exception is raised."""
1095 r = 0
1095 r = 0
1096 # need a transaction when processing a bundle2 stream
1096 # need a transaction when processing a bundle2 stream
1097 tr = None
1097 tr = None
1098 lock = repo.lock()
1098 lock = repo.lock()
1099 try:
1099 try:
1100 check_heads(repo, heads, 'uploading changes')
1100 check_heads(repo, heads, 'uploading changes')
1101 # push can proceed
1101 # push can proceed
1102 if util.safehasattr(cg, 'params'):
1102 if util.safehasattr(cg, 'params'):
1103 try:
1103 try:
1104 tr = repo.transaction('unbundle')
1104 tr = repo.transaction('unbundle')
1105 tr.hookargs['bundle2-exp'] = '1'
1105 tr.hookargs['bundle2-exp'] = '1'
1106 r = bundle2.processbundle(repo, cg, lambda: tr).reply
1106 r = bundle2.processbundle(repo, cg, lambda: tr).reply
1107 cl = repo.unfiltered().changelog
1107 cl = repo.unfiltered().changelog
1108 p = cl.writepending() and repo.root or ""
1108 p = cl.writepending() and repo.root or ""
1109 repo.hook('b2x-pretransactionclose', throw=True, source=source,
1109 repo.hook('b2x-pretransactionclose', throw=True, source=source,
1110 url=url, pending=p, **tr.hookargs)
1110 url=url, pending=p, **tr.hookargs)
1111 tr.close()
1111 tr.close()
1112 repo.hook('b2x-transactionclose', source=source, url=url,
1112 repo.hook('b2x-transactionclose', source=source, url=url,
1113 **tr.hookargs)
1113 **tr.hookargs)
1114 except Exception, exc:
1114 except Exception, exc:
1115 exc.duringunbundle2 = True
1115 exc.duringunbundle2 = True
1116 raise
1116 raise
1117 else:
1117 else:
1118 r = changegroup.addchangegroup(repo, cg, source, url)
1118 r = changegroup.addchangegroup(repo, cg, source, url)
1119 finally:
1119 finally:
1120 if tr is not None:
1120 if tr is not None:
1121 tr.release()
1121 tr.release()
1122 lock.release()
1122 lock.release()
1123 return r
1123 return r
General Comments 0
You need to be logged in to leave comments. Login now