##// END OF EJS Templates
exchange: remove check for 'format' key...
Martin von Zweigbergk -
r24638:13a19717 default
parent child Browse files
Show More
@@ -1,1305 +1,1303 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno, urllib
10 import errno, urllib
11 import util, scmutil, changegroup, base85, error
11 import util, scmutil, changegroup, base85, error
12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13
13
14 def readbundle(ui, fh, fname, vfs=None):
14 def readbundle(ui, fh, fname, vfs=None):
15 header = changegroup.readexactly(fh, 4)
15 header = changegroup.readexactly(fh, 4)
16
16
17 alg = None
17 alg = None
18 if not fname:
18 if not fname:
19 fname = "stream"
19 fname = "stream"
20 if not header.startswith('HG') and header.startswith('\0'):
20 if not header.startswith('HG') and header.startswith('\0'):
21 fh = changegroup.headerlessfixup(fh, header)
21 fh = changegroup.headerlessfixup(fh, header)
22 header = "HG10"
22 header = "HG10"
23 alg = 'UN'
23 alg = 'UN'
24 elif vfs:
24 elif vfs:
25 fname = vfs.join(fname)
25 fname = vfs.join(fname)
26
26
27 magic, version = header[0:2], header[2:4]
27 magic, version = header[0:2], header[2:4]
28
28
29 if magic != 'HG':
29 if magic != 'HG':
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 if version == '10':
31 if version == '10':
32 if alg is None:
32 if alg is None:
33 alg = changegroup.readexactly(fh, 2)
33 alg = changegroup.readexactly(fh, 2)
34 return changegroup.cg1unpacker(fh, alg)
34 return changegroup.cg1unpacker(fh, alg)
35 elif version == '2Y':
35 elif version == '2Y':
36 return bundle2.unbundle20(ui, fh, header=magic + version)
36 return bundle2.unbundle20(ui, fh, header=magic + version)
37 else:
37 else:
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39
39
40 def buildobsmarkerspart(bundler, markers):
40 def buildobsmarkerspart(bundler, markers):
41 """add an obsmarker part to the bundler with <markers>
41 """add an obsmarker part to the bundler with <markers>
42
42
43 No part is created if markers is empty.
43 No part is created if markers is empty.
44 Raises ValueError if the bundler doesn't support any known obsmarker format.
44 Raises ValueError if the bundler doesn't support any known obsmarker format.
45 """
45 """
46 if markers:
46 if markers:
47 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
47 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
48 version = obsolete.commonversion(remoteversions)
48 version = obsolete.commonversion(remoteversions)
49 if version is None:
49 if version is None:
50 raise ValueError('bundler do not support common obsmarker format')
50 raise ValueError('bundler do not support common obsmarker format')
51 stream = obsolete.encodemarkers(markers, True, version=version)
51 stream = obsolete.encodemarkers(markers, True, version=version)
52 return bundler.newpart('b2x:obsmarkers', data=stream)
52 return bundler.newpart('b2x:obsmarkers', data=stream)
53 return None
53 return None
54
54
55 class pushoperation(object):
55 class pushoperation(object):
56 """A object that represent a single push operation
56 """A object that represent a single push operation
57
57
58 It purpose is to carry push related state and very common operation.
58 It purpose is to carry push related state and very common operation.
59
59
60 A new should be created at the beginning of each push and discarded
60 A new should be created at the beginning of each push and discarded
61 afterward.
61 afterward.
62 """
62 """
63
63
64 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
64 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
65 bookmarks=()):
65 bookmarks=()):
66 # repo we push from
66 # repo we push from
67 self.repo = repo
67 self.repo = repo
68 self.ui = repo.ui
68 self.ui = repo.ui
69 # repo we push to
69 # repo we push to
70 self.remote = remote
70 self.remote = remote
71 # force option provided
71 # force option provided
72 self.force = force
72 self.force = force
73 # revs to be pushed (None is "all")
73 # revs to be pushed (None is "all")
74 self.revs = revs
74 self.revs = revs
75 # bookmark explicitly pushed
75 # bookmark explicitly pushed
76 self.bookmarks = bookmarks
76 self.bookmarks = bookmarks
77 # allow push of new branch
77 # allow push of new branch
78 self.newbranch = newbranch
78 self.newbranch = newbranch
79 # did a local lock get acquired?
79 # did a local lock get acquired?
80 self.locallocked = None
80 self.locallocked = None
81 # step already performed
81 # step already performed
82 # (used to check what steps have been already performed through bundle2)
82 # (used to check what steps have been already performed through bundle2)
83 self.stepsdone = set()
83 self.stepsdone = set()
84 # Integer version of the changegroup push result
84 # Integer version of the changegroup push result
85 # - None means nothing to push
85 # - None means nothing to push
86 # - 0 means HTTP error
86 # - 0 means HTTP error
87 # - 1 means we pushed and remote head count is unchanged *or*
87 # - 1 means we pushed and remote head count is unchanged *or*
88 # we have outgoing changesets but refused to push
88 # we have outgoing changesets but refused to push
89 # - other values as described by addchangegroup()
89 # - other values as described by addchangegroup()
90 self.cgresult = None
90 self.cgresult = None
91 # Boolean value for the bookmark push
91 # Boolean value for the bookmark push
92 self.bkresult = None
92 self.bkresult = None
93 # discover.outgoing object (contains common and outgoing data)
93 # discover.outgoing object (contains common and outgoing data)
94 self.outgoing = None
94 self.outgoing = None
95 # all remote heads before the push
95 # all remote heads before the push
96 self.remoteheads = None
96 self.remoteheads = None
97 # testable as a boolean indicating if any nodes are missing locally.
97 # testable as a boolean indicating if any nodes are missing locally.
98 self.incoming = None
98 self.incoming = None
99 # phases changes that must be pushed along side the changesets
99 # phases changes that must be pushed along side the changesets
100 self.outdatedphases = None
100 self.outdatedphases = None
101 # phases changes that must be pushed if changeset push fails
101 # phases changes that must be pushed if changeset push fails
102 self.fallbackoutdatedphases = None
102 self.fallbackoutdatedphases = None
103 # outgoing obsmarkers
103 # outgoing obsmarkers
104 self.outobsmarkers = set()
104 self.outobsmarkers = set()
105 # outgoing bookmarks
105 # outgoing bookmarks
106 self.outbookmarks = []
106 self.outbookmarks = []
107 # transaction manager
107 # transaction manager
108 self.trmanager = None
108 self.trmanager = None
109
109
110 @util.propertycache
110 @util.propertycache
111 def futureheads(self):
111 def futureheads(self):
112 """future remote heads if the changeset push succeeds"""
112 """future remote heads if the changeset push succeeds"""
113 return self.outgoing.missingheads
113 return self.outgoing.missingheads
114
114
115 @util.propertycache
115 @util.propertycache
116 def fallbackheads(self):
116 def fallbackheads(self):
117 """future remote heads if the changeset push fails"""
117 """future remote heads if the changeset push fails"""
118 if self.revs is None:
118 if self.revs is None:
119 # not target to push, all common are relevant
119 # not target to push, all common are relevant
120 return self.outgoing.commonheads
120 return self.outgoing.commonheads
121 unfi = self.repo.unfiltered()
121 unfi = self.repo.unfiltered()
122 # I want cheads = heads(::missingheads and ::commonheads)
122 # I want cheads = heads(::missingheads and ::commonheads)
123 # (missingheads is revs with secret changeset filtered out)
123 # (missingheads is revs with secret changeset filtered out)
124 #
124 #
125 # This can be expressed as:
125 # This can be expressed as:
126 # cheads = ( (missingheads and ::commonheads)
126 # cheads = ( (missingheads and ::commonheads)
127 # + (commonheads and ::missingheads))"
127 # + (commonheads and ::missingheads))"
128 # )
128 # )
129 #
129 #
130 # while trying to push we already computed the following:
130 # while trying to push we already computed the following:
131 # common = (::commonheads)
131 # common = (::commonheads)
132 # missing = ((commonheads::missingheads) - commonheads)
132 # missing = ((commonheads::missingheads) - commonheads)
133 #
133 #
134 # We can pick:
134 # We can pick:
135 # * missingheads part of common (::commonheads)
135 # * missingheads part of common (::commonheads)
136 common = set(self.outgoing.common)
136 common = set(self.outgoing.common)
137 nm = self.repo.changelog.nodemap
137 nm = self.repo.changelog.nodemap
138 cheads = [node for node in self.revs if nm[node] in common]
138 cheads = [node for node in self.revs if nm[node] in common]
139 # and
139 # and
140 # * commonheads parents on missing
140 # * commonheads parents on missing
141 revset = unfi.set('%ln and parents(roots(%ln))',
141 revset = unfi.set('%ln and parents(roots(%ln))',
142 self.outgoing.commonheads,
142 self.outgoing.commonheads,
143 self.outgoing.missing)
143 self.outgoing.missing)
144 cheads.extend(c.node() for c in revset)
144 cheads.extend(c.node() for c in revset)
145 return cheads
145 return cheads
146
146
147 @property
147 @property
148 def commonheads(self):
148 def commonheads(self):
149 """set of all common heads after changeset bundle push"""
149 """set of all common heads after changeset bundle push"""
150 if self.cgresult:
150 if self.cgresult:
151 return self.futureheads
151 return self.futureheads
152 else:
152 else:
153 return self.fallbackheads
153 return self.fallbackheads
154
154
155 # mapping of message used when pushing bookmark
155 # mapping of message used when pushing bookmark
156 bookmsgmap = {'update': (_("updating bookmark %s\n"),
156 bookmsgmap = {'update': (_("updating bookmark %s\n"),
157 _('updating bookmark %s failed!\n')),
157 _('updating bookmark %s failed!\n')),
158 'export': (_("exporting bookmark %s\n"),
158 'export': (_("exporting bookmark %s\n"),
159 _('exporting bookmark %s failed!\n')),
159 _('exporting bookmark %s failed!\n')),
160 'delete': (_("deleting remote bookmark %s\n"),
160 'delete': (_("deleting remote bookmark %s\n"),
161 _('deleting remote bookmark %s failed!\n')),
161 _('deleting remote bookmark %s failed!\n')),
162 }
162 }
163
163
164
164
165 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
165 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
166 '''Push outgoing changesets (limited by revs) from a local
166 '''Push outgoing changesets (limited by revs) from a local
167 repository to remote. Return an integer:
167 repository to remote. Return an integer:
168 - None means nothing to push
168 - None means nothing to push
169 - 0 means HTTP error
169 - 0 means HTTP error
170 - 1 means we pushed and remote head count is unchanged *or*
170 - 1 means we pushed and remote head count is unchanged *or*
171 we have outgoing changesets but refused to push
171 we have outgoing changesets but refused to push
172 - other values as described by addchangegroup()
172 - other values as described by addchangegroup()
173 '''
173 '''
174 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
174 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
175 if pushop.remote.local():
175 if pushop.remote.local():
176 missing = (set(pushop.repo.requirements)
176 missing = (set(pushop.repo.requirements)
177 - pushop.remote.local().supported)
177 - pushop.remote.local().supported)
178 if missing:
178 if missing:
179 msg = _("required features are not"
179 msg = _("required features are not"
180 " supported in the destination:"
180 " supported in the destination:"
181 " %s") % (', '.join(sorted(missing)))
181 " %s") % (', '.join(sorted(missing)))
182 raise util.Abort(msg)
182 raise util.Abort(msg)
183
183
184 # there are two ways to push to remote repo:
184 # there are two ways to push to remote repo:
185 #
185 #
186 # addchangegroup assumes local user can lock remote
186 # addchangegroup assumes local user can lock remote
187 # repo (local filesystem, old ssh servers).
187 # repo (local filesystem, old ssh servers).
188 #
188 #
189 # unbundle assumes local user cannot lock remote repo (new ssh
189 # unbundle assumes local user cannot lock remote repo (new ssh
190 # servers, http servers).
190 # servers, http servers).
191
191
192 if not pushop.remote.canpush():
192 if not pushop.remote.canpush():
193 raise util.Abort(_("destination does not support push"))
193 raise util.Abort(_("destination does not support push"))
194 # get local lock as we might write phase data
194 # get local lock as we might write phase data
195 locallock = None
195 locallock = None
196 try:
196 try:
197 locallock = pushop.repo.lock()
197 locallock = pushop.repo.lock()
198 pushop.locallocked = True
198 pushop.locallocked = True
199 except IOError, err:
199 except IOError, err:
200 pushop.locallocked = False
200 pushop.locallocked = False
201 if err.errno != errno.EACCES:
201 if err.errno != errno.EACCES:
202 raise
202 raise
203 # source repo cannot be locked.
203 # source repo cannot be locked.
204 # We do not abort the push, but just disable the local phase
204 # We do not abort the push, but just disable the local phase
205 # synchronisation.
205 # synchronisation.
206 msg = 'cannot lock source repository: %s\n' % err
206 msg = 'cannot lock source repository: %s\n' % err
207 pushop.ui.debug(msg)
207 pushop.ui.debug(msg)
208 try:
208 try:
209 if pushop.locallocked:
209 if pushop.locallocked:
210 pushop.trmanager = transactionmanager(repo,
210 pushop.trmanager = transactionmanager(repo,
211 'push-response',
211 'push-response',
212 pushop.remote.url())
212 pushop.remote.url())
213 pushop.repo.checkpush(pushop)
213 pushop.repo.checkpush(pushop)
214 lock = None
214 lock = None
215 unbundle = pushop.remote.capable('unbundle')
215 unbundle = pushop.remote.capable('unbundle')
216 if not unbundle:
216 if not unbundle:
217 lock = pushop.remote.lock()
217 lock = pushop.remote.lock()
218 try:
218 try:
219 _pushdiscovery(pushop)
219 _pushdiscovery(pushop)
220 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
220 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
221 False)
221 False)
222 and pushop.remote.capable('bundle2-exp')):
222 and pushop.remote.capable('bundle2-exp')):
223 _pushbundle2(pushop)
223 _pushbundle2(pushop)
224 _pushchangeset(pushop)
224 _pushchangeset(pushop)
225 _pushsyncphase(pushop)
225 _pushsyncphase(pushop)
226 _pushobsolete(pushop)
226 _pushobsolete(pushop)
227 _pushbookmark(pushop)
227 _pushbookmark(pushop)
228 finally:
228 finally:
229 if lock is not None:
229 if lock is not None:
230 lock.release()
230 lock.release()
231 if pushop.trmanager:
231 if pushop.trmanager:
232 pushop.trmanager.close()
232 pushop.trmanager.close()
233 finally:
233 finally:
234 if pushop.trmanager:
234 if pushop.trmanager:
235 pushop.trmanager.release()
235 pushop.trmanager.release()
236 if locallock is not None:
236 if locallock is not None:
237 locallock.release()
237 locallock.release()
238
238
239 return pushop
239 return pushop
240
240
241 # list of steps to perform discovery before push
241 # list of steps to perform discovery before push
242 pushdiscoveryorder = []
242 pushdiscoveryorder = []
243
243
244 # Mapping between step name and function
244 # Mapping between step name and function
245 #
245 #
246 # This exists to help extensions wrap steps if necessary
246 # This exists to help extensions wrap steps if necessary
247 pushdiscoverymapping = {}
247 pushdiscoverymapping = {}
248
248
249 def pushdiscovery(stepname):
249 def pushdiscovery(stepname):
250 """decorator for function performing discovery before push
250 """decorator for function performing discovery before push
251
251
252 The function is added to the step -> function mapping and appended to the
252 The function is added to the step -> function mapping and appended to the
253 list of steps. Beware that decorated function will be added in order (this
253 list of steps. Beware that decorated function will be added in order (this
254 may matter).
254 may matter).
255
255
256 You can only use this decorator for a new step, if you want to wrap a step
256 You can only use this decorator for a new step, if you want to wrap a step
257 from an extension, change the pushdiscovery dictionary directly."""
257 from an extension, change the pushdiscovery dictionary directly."""
258 def dec(func):
258 def dec(func):
259 assert stepname not in pushdiscoverymapping
259 assert stepname not in pushdiscoverymapping
260 pushdiscoverymapping[stepname] = func
260 pushdiscoverymapping[stepname] = func
261 pushdiscoveryorder.append(stepname)
261 pushdiscoveryorder.append(stepname)
262 return func
262 return func
263 return dec
263 return dec
264
264
265 def _pushdiscovery(pushop):
265 def _pushdiscovery(pushop):
266 """Run all discovery steps"""
266 """Run all discovery steps"""
267 for stepname in pushdiscoveryorder:
267 for stepname in pushdiscoveryorder:
268 step = pushdiscoverymapping[stepname]
268 step = pushdiscoverymapping[stepname]
269 step(pushop)
269 step(pushop)
270
270
271 @pushdiscovery('changeset')
271 @pushdiscovery('changeset')
272 def _pushdiscoverychangeset(pushop):
272 def _pushdiscoverychangeset(pushop):
273 """discover the changeset that need to be pushed"""
273 """discover the changeset that need to be pushed"""
274 fci = discovery.findcommonincoming
274 fci = discovery.findcommonincoming
275 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
275 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
276 common, inc, remoteheads = commoninc
276 common, inc, remoteheads = commoninc
277 fco = discovery.findcommonoutgoing
277 fco = discovery.findcommonoutgoing
278 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
278 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
279 commoninc=commoninc, force=pushop.force)
279 commoninc=commoninc, force=pushop.force)
280 pushop.outgoing = outgoing
280 pushop.outgoing = outgoing
281 pushop.remoteheads = remoteheads
281 pushop.remoteheads = remoteheads
282 pushop.incoming = inc
282 pushop.incoming = inc
283
283
284 @pushdiscovery('phase')
284 @pushdiscovery('phase')
285 def _pushdiscoveryphase(pushop):
285 def _pushdiscoveryphase(pushop):
286 """discover the phase that needs to be pushed
286 """discover the phase that needs to be pushed
287
287
288 (computed for both success and failure case for changesets push)"""
288 (computed for both success and failure case for changesets push)"""
289 outgoing = pushop.outgoing
289 outgoing = pushop.outgoing
290 unfi = pushop.repo.unfiltered()
290 unfi = pushop.repo.unfiltered()
291 remotephases = pushop.remote.listkeys('phases')
291 remotephases = pushop.remote.listkeys('phases')
292 publishing = remotephases.get('publishing', False)
292 publishing = remotephases.get('publishing', False)
293 ana = phases.analyzeremotephases(pushop.repo,
293 ana = phases.analyzeremotephases(pushop.repo,
294 pushop.fallbackheads,
294 pushop.fallbackheads,
295 remotephases)
295 remotephases)
296 pheads, droots = ana
296 pheads, droots = ana
297 extracond = ''
297 extracond = ''
298 if not publishing:
298 if not publishing:
299 extracond = ' and public()'
299 extracond = ' and public()'
300 revset = 'heads((%%ln::%%ln) %s)' % extracond
300 revset = 'heads((%%ln::%%ln) %s)' % extracond
301 # Get the list of all revs draft on remote by public here.
301 # Get the list of all revs draft on remote by public here.
302 # XXX Beware that revset break if droots is not strictly
302 # XXX Beware that revset break if droots is not strictly
303 # XXX root we may want to ensure it is but it is costly
303 # XXX root we may want to ensure it is but it is costly
304 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
304 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
305 if not outgoing.missing:
305 if not outgoing.missing:
306 future = fallback
306 future = fallback
307 else:
307 else:
308 # adds changeset we are going to push as draft
308 # adds changeset we are going to push as draft
309 #
309 #
310 # should not be necessary for publishing server, but because of an
310 # should not be necessary for publishing server, but because of an
311 # issue fixed in xxxxx we have to do it anyway.
311 # issue fixed in xxxxx we have to do it anyway.
312 fdroots = list(unfi.set('roots(%ln + %ln::)',
312 fdroots = list(unfi.set('roots(%ln + %ln::)',
313 outgoing.missing, droots))
313 outgoing.missing, droots))
314 fdroots = [f.node() for f in fdroots]
314 fdroots = [f.node() for f in fdroots]
315 future = list(unfi.set(revset, fdroots, pushop.futureheads))
315 future = list(unfi.set(revset, fdroots, pushop.futureheads))
316 pushop.outdatedphases = future
316 pushop.outdatedphases = future
317 pushop.fallbackoutdatedphases = fallback
317 pushop.fallbackoutdatedphases = fallback
318
318
319 @pushdiscovery('obsmarker')
319 @pushdiscovery('obsmarker')
320 def _pushdiscoveryobsmarkers(pushop):
320 def _pushdiscoveryobsmarkers(pushop):
321 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
321 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
322 and pushop.repo.obsstore
322 and pushop.repo.obsstore
323 and 'obsolete' in pushop.remote.listkeys('namespaces')):
323 and 'obsolete' in pushop.remote.listkeys('namespaces')):
324 repo = pushop.repo
324 repo = pushop.repo
325 # very naive computation, that can be quite expensive on big repo.
325 # very naive computation, that can be quite expensive on big repo.
326 # However: evolution is currently slow on them anyway.
326 # However: evolution is currently slow on them anyway.
327 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
327 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
328 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
328 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
329
329
330 @pushdiscovery('bookmarks')
330 @pushdiscovery('bookmarks')
331 def _pushdiscoverybookmarks(pushop):
331 def _pushdiscoverybookmarks(pushop):
332 ui = pushop.ui
332 ui = pushop.ui
333 repo = pushop.repo.unfiltered()
333 repo = pushop.repo.unfiltered()
334 remote = pushop.remote
334 remote = pushop.remote
335 ui.debug("checking for updated bookmarks\n")
335 ui.debug("checking for updated bookmarks\n")
336 ancestors = ()
336 ancestors = ()
337 if pushop.revs:
337 if pushop.revs:
338 revnums = map(repo.changelog.rev, pushop.revs)
338 revnums = map(repo.changelog.rev, pushop.revs)
339 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
339 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
340 remotebookmark = remote.listkeys('bookmarks')
340 remotebookmark = remote.listkeys('bookmarks')
341
341
342 explicit = set(pushop.bookmarks)
342 explicit = set(pushop.bookmarks)
343
343
344 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
344 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
345 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
345 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
346 for b, scid, dcid in advsrc:
346 for b, scid, dcid in advsrc:
347 if b in explicit:
347 if b in explicit:
348 explicit.remove(b)
348 explicit.remove(b)
349 if not ancestors or repo[scid].rev() in ancestors:
349 if not ancestors or repo[scid].rev() in ancestors:
350 pushop.outbookmarks.append((b, dcid, scid))
350 pushop.outbookmarks.append((b, dcid, scid))
351 # search added bookmark
351 # search added bookmark
352 for b, scid, dcid in addsrc:
352 for b, scid, dcid in addsrc:
353 if b in explicit:
353 if b in explicit:
354 explicit.remove(b)
354 explicit.remove(b)
355 pushop.outbookmarks.append((b, '', scid))
355 pushop.outbookmarks.append((b, '', scid))
356 # search for overwritten bookmark
356 # search for overwritten bookmark
357 for b, scid, dcid in advdst + diverge + differ:
357 for b, scid, dcid in advdst + diverge + differ:
358 if b in explicit:
358 if b in explicit:
359 explicit.remove(b)
359 explicit.remove(b)
360 pushop.outbookmarks.append((b, dcid, scid))
360 pushop.outbookmarks.append((b, dcid, scid))
361 # search for bookmark to delete
361 # search for bookmark to delete
362 for b, scid, dcid in adddst:
362 for b, scid, dcid in adddst:
363 if b in explicit:
363 if b in explicit:
364 explicit.remove(b)
364 explicit.remove(b)
365 # treat as "deleted locally"
365 # treat as "deleted locally"
366 pushop.outbookmarks.append((b, dcid, ''))
366 pushop.outbookmarks.append((b, dcid, ''))
367 # identical bookmarks shouldn't get reported
367 # identical bookmarks shouldn't get reported
368 for b, scid, dcid in same:
368 for b, scid, dcid in same:
369 if b in explicit:
369 if b in explicit:
370 explicit.remove(b)
370 explicit.remove(b)
371
371
372 if explicit:
372 if explicit:
373 explicit = sorted(explicit)
373 explicit = sorted(explicit)
374 # we should probably list all of them
374 # we should probably list all of them
375 ui.warn(_('bookmark %s does not exist on the local '
375 ui.warn(_('bookmark %s does not exist on the local '
376 'or remote repository!\n') % explicit[0])
376 'or remote repository!\n') % explicit[0])
377 pushop.bkresult = 2
377 pushop.bkresult = 2
378
378
379 pushop.outbookmarks.sort()
379 pushop.outbookmarks.sort()
380
380
381 def _pushcheckoutgoing(pushop):
381 def _pushcheckoutgoing(pushop):
382 outgoing = pushop.outgoing
382 outgoing = pushop.outgoing
383 unfi = pushop.repo.unfiltered()
383 unfi = pushop.repo.unfiltered()
384 if not outgoing.missing:
384 if not outgoing.missing:
385 # nothing to push
385 # nothing to push
386 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
386 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
387 return False
387 return False
388 # something to push
388 # something to push
389 if not pushop.force:
389 if not pushop.force:
390 # if repo.obsstore == False --> no obsolete
390 # if repo.obsstore == False --> no obsolete
391 # then, save the iteration
391 # then, save the iteration
392 if unfi.obsstore:
392 if unfi.obsstore:
393 # this message are here for 80 char limit reason
393 # this message are here for 80 char limit reason
394 mso = _("push includes obsolete changeset: %s!")
394 mso = _("push includes obsolete changeset: %s!")
395 mst = {"unstable": _("push includes unstable changeset: %s!"),
395 mst = {"unstable": _("push includes unstable changeset: %s!"),
396 "bumped": _("push includes bumped changeset: %s!"),
396 "bumped": _("push includes bumped changeset: %s!"),
397 "divergent": _("push includes divergent changeset: %s!")}
397 "divergent": _("push includes divergent changeset: %s!")}
398 # If we are to push if there is at least one
398 # If we are to push if there is at least one
399 # obsolete or unstable changeset in missing, at
399 # obsolete or unstable changeset in missing, at
400 # least one of the missinghead will be obsolete or
400 # least one of the missinghead will be obsolete or
401 # unstable. So checking heads only is ok
401 # unstable. So checking heads only is ok
402 for node in outgoing.missingheads:
402 for node in outgoing.missingheads:
403 ctx = unfi[node]
403 ctx = unfi[node]
404 if ctx.obsolete():
404 if ctx.obsolete():
405 raise util.Abort(mso % ctx)
405 raise util.Abort(mso % ctx)
406 elif ctx.troubled():
406 elif ctx.troubled():
407 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
407 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
408 newbm = pushop.ui.configlist('bookmarks', 'pushing')
408 newbm = pushop.ui.configlist('bookmarks', 'pushing')
409 discovery.checkheads(unfi, pushop.remote, outgoing,
409 discovery.checkheads(unfi, pushop.remote, outgoing,
410 pushop.remoteheads,
410 pushop.remoteheads,
411 pushop.newbranch,
411 pushop.newbranch,
412 bool(pushop.incoming),
412 bool(pushop.incoming),
413 newbm)
413 newbm)
414 return True
414 return True
415
415
416 # List of names of steps to perform for an outgoing bundle2, order matters.
416 # List of names of steps to perform for an outgoing bundle2, order matters.
417 b2partsgenorder = []
417 b2partsgenorder = []
418
418
419 # Mapping between step name and function
419 # Mapping between step name and function
420 #
420 #
421 # This exists to help extensions wrap steps if necessary
421 # This exists to help extensions wrap steps if necessary
422 b2partsgenmapping = {}
422 b2partsgenmapping = {}
423
423
424 def b2partsgenerator(stepname):
424 def b2partsgenerator(stepname):
425 """decorator for function generating bundle2 part
425 """decorator for function generating bundle2 part
426
426
427 The function is added to the step -> function mapping and appended to the
427 The function is added to the step -> function mapping and appended to the
428 list of steps. Beware that decorated functions will be added in order
428 list of steps. Beware that decorated functions will be added in order
429 (this may matter).
429 (this may matter).
430
430
431 You can only use this decorator for new steps, if you want to wrap a step
431 You can only use this decorator for new steps, if you want to wrap a step
432 from an extension, attack the b2partsgenmapping dictionary directly."""
432 from an extension, attack the b2partsgenmapping dictionary directly."""
433 def dec(func):
433 def dec(func):
434 assert stepname not in b2partsgenmapping
434 assert stepname not in b2partsgenmapping
435 b2partsgenmapping[stepname] = func
435 b2partsgenmapping[stepname] = func
436 b2partsgenorder.append(stepname)
436 b2partsgenorder.append(stepname)
437 return func
437 return func
438 return dec
438 return dec
439
439
440 @b2partsgenerator('changeset')
440 @b2partsgenerator('changeset')
441 def _pushb2ctx(pushop, bundler):
441 def _pushb2ctx(pushop, bundler):
442 """handle changegroup push through bundle2
442 """handle changegroup push through bundle2
443
443
444 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
444 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
445 """
445 """
446 if 'changesets' in pushop.stepsdone:
446 if 'changesets' in pushop.stepsdone:
447 return
447 return
448 pushop.stepsdone.add('changesets')
448 pushop.stepsdone.add('changesets')
449 # Send known heads to the server for race detection.
449 # Send known heads to the server for race detection.
450 if not _pushcheckoutgoing(pushop):
450 if not _pushcheckoutgoing(pushop):
451 return
451 return
452 pushop.repo.prepushoutgoinghooks(pushop.repo,
452 pushop.repo.prepushoutgoinghooks(pushop.repo,
453 pushop.remote,
453 pushop.remote,
454 pushop.outgoing)
454 pushop.outgoing)
455 if not pushop.force:
455 if not pushop.force:
456 bundler.newpart('b2x:check:heads', data=iter(pushop.remoteheads))
456 bundler.newpart('b2x:check:heads', data=iter(pushop.remoteheads))
457 b2caps = bundle2.bundle2caps(pushop.remote)
457 b2caps = bundle2.bundle2caps(pushop.remote)
458 version = None
458 version = None
459 cgversions = b2caps.get('b2x:changegroup')
459 cgversions = b2caps.get('b2x:changegroup')
460 if not cgversions: # 3.1 and 3.2 ship with an empty value
460 if not cgversions: # 3.1 and 3.2 ship with an empty value
461 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
461 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
462 pushop.outgoing)
462 pushop.outgoing)
463 else:
463 else:
464 cgversions = [v for v in cgversions if v in changegroup.packermap]
464 cgversions = [v for v in cgversions if v in changegroup.packermap]
465 if not cgversions:
465 if not cgversions:
466 raise ValueError(_('no common changegroup version'))
466 raise ValueError(_('no common changegroup version'))
467 version = max(cgversions)
467 version = max(cgversions)
468 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
468 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
469 pushop.outgoing,
469 pushop.outgoing,
470 version=version)
470 version=version)
471 cgpart = bundler.newpart('b2x:changegroup', data=cg)
471 cgpart = bundler.newpart('b2x:changegroup', data=cg)
472 if version is not None:
472 if version is not None:
473 cgpart.addparam('version', version)
473 cgpart.addparam('version', version)
474 def handlereply(op):
474 def handlereply(op):
475 """extract addchangegroup returns from server reply"""
475 """extract addchangegroup returns from server reply"""
476 cgreplies = op.records.getreplies(cgpart.id)
476 cgreplies = op.records.getreplies(cgpart.id)
477 assert len(cgreplies['changegroup']) == 1
477 assert len(cgreplies['changegroup']) == 1
478 pushop.cgresult = cgreplies['changegroup'][0]['return']
478 pushop.cgresult = cgreplies['changegroup'][0]['return']
479 return handlereply
479 return handlereply
480
480
481 @b2partsgenerator('phase')
481 @b2partsgenerator('phase')
482 def _pushb2phases(pushop, bundler):
482 def _pushb2phases(pushop, bundler):
483 """handle phase push through bundle2"""
483 """handle phase push through bundle2"""
484 if 'phases' in pushop.stepsdone:
484 if 'phases' in pushop.stepsdone:
485 return
485 return
486 b2caps = bundle2.bundle2caps(pushop.remote)
486 b2caps = bundle2.bundle2caps(pushop.remote)
487 if not 'b2x:pushkey' in b2caps:
487 if not 'b2x:pushkey' in b2caps:
488 return
488 return
489 pushop.stepsdone.add('phases')
489 pushop.stepsdone.add('phases')
490 part2node = []
490 part2node = []
491 enc = pushkey.encode
491 enc = pushkey.encode
492 for newremotehead in pushop.outdatedphases:
492 for newremotehead in pushop.outdatedphases:
493 part = bundler.newpart('b2x:pushkey')
493 part = bundler.newpart('b2x:pushkey')
494 part.addparam('namespace', enc('phases'))
494 part.addparam('namespace', enc('phases'))
495 part.addparam('key', enc(newremotehead.hex()))
495 part.addparam('key', enc(newremotehead.hex()))
496 part.addparam('old', enc(str(phases.draft)))
496 part.addparam('old', enc(str(phases.draft)))
497 part.addparam('new', enc(str(phases.public)))
497 part.addparam('new', enc(str(phases.public)))
498 part2node.append((part.id, newremotehead))
498 part2node.append((part.id, newremotehead))
499 def handlereply(op):
499 def handlereply(op):
500 for partid, node in part2node:
500 for partid, node in part2node:
501 partrep = op.records.getreplies(partid)
501 partrep = op.records.getreplies(partid)
502 results = partrep['pushkey']
502 results = partrep['pushkey']
503 assert len(results) <= 1
503 assert len(results) <= 1
504 msg = None
504 msg = None
505 if not results:
505 if not results:
506 msg = _('server ignored update of %s to public!\n') % node
506 msg = _('server ignored update of %s to public!\n') % node
507 elif not int(results[0]['return']):
507 elif not int(results[0]['return']):
508 msg = _('updating %s to public failed!\n') % node
508 msg = _('updating %s to public failed!\n') % node
509 if msg is not None:
509 if msg is not None:
510 pushop.ui.warn(msg)
510 pushop.ui.warn(msg)
511 return handlereply
511 return handlereply
512
512
513 @b2partsgenerator('obsmarkers')
513 @b2partsgenerator('obsmarkers')
514 def _pushb2obsmarkers(pushop, bundler):
514 def _pushb2obsmarkers(pushop, bundler):
515 if 'obsmarkers' in pushop.stepsdone:
515 if 'obsmarkers' in pushop.stepsdone:
516 return
516 return
517 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
517 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
518 if obsolete.commonversion(remoteversions) is None:
518 if obsolete.commonversion(remoteversions) is None:
519 return
519 return
520 pushop.stepsdone.add('obsmarkers')
520 pushop.stepsdone.add('obsmarkers')
521 if pushop.outobsmarkers:
521 if pushop.outobsmarkers:
522 buildobsmarkerspart(bundler, pushop.outobsmarkers)
522 buildobsmarkerspart(bundler, pushop.outobsmarkers)
523
523
524 @b2partsgenerator('bookmarks')
524 @b2partsgenerator('bookmarks')
525 def _pushb2bookmarks(pushop, bundler):
525 def _pushb2bookmarks(pushop, bundler):
526 """handle phase push through bundle2"""
526 """handle phase push through bundle2"""
527 if 'bookmarks' in pushop.stepsdone:
527 if 'bookmarks' in pushop.stepsdone:
528 return
528 return
529 b2caps = bundle2.bundle2caps(pushop.remote)
529 b2caps = bundle2.bundle2caps(pushop.remote)
530 if 'b2x:pushkey' not in b2caps:
530 if 'b2x:pushkey' not in b2caps:
531 return
531 return
532 pushop.stepsdone.add('bookmarks')
532 pushop.stepsdone.add('bookmarks')
533 part2book = []
533 part2book = []
534 enc = pushkey.encode
534 enc = pushkey.encode
535 for book, old, new in pushop.outbookmarks:
535 for book, old, new in pushop.outbookmarks:
536 part = bundler.newpart('b2x:pushkey')
536 part = bundler.newpart('b2x:pushkey')
537 part.addparam('namespace', enc('bookmarks'))
537 part.addparam('namespace', enc('bookmarks'))
538 part.addparam('key', enc(book))
538 part.addparam('key', enc(book))
539 part.addparam('old', enc(old))
539 part.addparam('old', enc(old))
540 part.addparam('new', enc(new))
540 part.addparam('new', enc(new))
541 action = 'update'
541 action = 'update'
542 if not old:
542 if not old:
543 action = 'export'
543 action = 'export'
544 elif not new:
544 elif not new:
545 action = 'delete'
545 action = 'delete'
546 part2book.append((part.id, book, action))
546 part2book.append((part.id, book, action))
547
547
548
548
549 def handlereply(op):
549 def handlereply(op):
550 ui = pushop.ui
550 ui = pushop.ui
551 for partid, book, action in part2book:
551 for partid, book, action in part2book:
552 partrep = op.records.getreplies(partid)
552 partrep = op.records.getreplies(partid)
553 results = partrep['pushkey']
553 results = partrep['pushkey']
554 assert len(results) <= 1
554 assert len(results) <= 1
555 if not results:
555 if not results:
556 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
556 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
557 else:
557 else:
558 ret = int(results[0]['return'])
558 ret = int(results[0]['return'])
559 if ret:
559 if ret:
560 ui.status(bookmsgmap[action][0] % book)
560 ui.status(bookmsgmap[action][0] % book)
561 else:
561 else:
562 ui.warn(bookmsgmap[action][1] % book)
562 ui.warn(bookmsgmap[action][1] % book)
563 if pushop.bkresult is not None:
563 if pushop.bkresult is not None:
564 pushop.bkresult = 1
564 pushop.bkresult = 1
565 return handlereply
565 return handlereply
566
566
567
567
568 def _pushbundle2(pushop):
568 def _pushbundle2(pushop):
569 """push data to the remote using bundle2
569 """push data to the remote using bundle2
570
570
571 The only currently supported type of data is changegroup but this will
571 The only currently supported type of data is changegroup but this will
572 evolve in the future."""
572 evolve in the future."""
573 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
573 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
574 pushback = (pushop.trmanager
574 pushback = (pushop.trmanager
575 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
575 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
576
576
577 # create reply capability
577 # create reply capability
578 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
578 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
579 allowpushback=pushback))
579 allowpushback=pushback))
580 bundler.newpart('b2x:replycaps', data=capsblob)
580 bundler.newpart('b2x:replycaps', data=capsblob)
581 replyhandlers = []
581 replyhandlers = []
582 for partgenname in b2partsgenorder:
582 for partgenname in b2partsgenorder:
583 partgen = b2partsgenmapping[partgenname]
583 partgen = b2partsgenmapping[partgenname]
584 ret = partgen(pushop, bundler)
584 ret = partgen(pushop, bundler)
585 if callable(ret):
585 if callable(ret):
586 replyhandlers.append(ret)
586 replyhandlers.append(ret)
587 # do not push if nothing to push
587 # do not push if nothing to push
588 if bundler.nbparts <= 1:
588 if bundler.nbparts <= 1:
589 return
589 return
590 stream = util.chunkbuffer(bundler.getchunks())
590 stream = util.chunkbuffer(bundler.getchunks())
591 try:
591 try:
592 reply = pushop.remote.unbundle(stream, ['force'], 'push')
592 reply = pushop.remote.unbundle(stream, ['force'], 'push')
593 except error.BundleValueError, exc:
593 except error.BundleValueError, exc:
594 raise util.Abort('missing support for %s' % exc)
594 raise util.Abort('missing support for %s' % exc)
595 try:
595 try:
596 trgetter = None
596 trgetter = None
597 if pushback:
597 if pushback:
598 trgetter = pushop.trmanager.transaction
598 trgetter = pushop.trmanager.transaction
599 op = bundle2.processbundle(pushop.repo, reply, trgetter)
599 op = bundle2.processbundle(pushop.repo, reply, trgetter)
600 except error.BundleValueError, exc:
600 except error.BundleValueError, exc:
601 raise util.Abort('missing support for %s' % exc)
601 raise util.Abort('missing support for %s' % exc)
602 for rephand in replyhandlers:
602 for rephand in replyhandlers:
603 rephand(op)
603 rephand(op)
604
604
605 def _pushchangeset(pushop):
605 def _pushchangeset(pushop):
606 """Make the actual push of changeset bundle to remote repo"""
606 """Make the actual push of changeset bundle to remote repo"""
607 if 'changesets' in pushop.stepsdone:
607 if 'changesets' in pushop.stepsdone:
608 return
608 return
609 pushop.stepsdone.add('changesets')
609 pushop.stepsdone.add('changesets')
610 if not _pushcheckoutgoing(pushop):
610 if not _pushcheckoutgoing(pushop):
611 return
611 return
612 pushop.repo.prepushoutgoinghooks(pushop.repo,
612 pushop.repo.prepushoutgoinghooks(pushop.repo,
613 pushop.remote,
613 pushop.remote,
614 pushop.outgoing)
614 pushop.outgoing)
615 outgoing = pushop.outgoing
615 outgoing = pushop.outgoing
616 unbundle = pushop.remote.capable('unbundle')
616 unbundle = pushop.remote.capable('unbundle')
617 # TODO: get bundlecaps from remote
617 # TODO: get bundlecaps from remote
618 bundlecaps = None
618 bundlecaps = None
619 # create a changegroup from local
619 # create a changegroup from local
620 if pushop.revs is None and not (outgoing.excluded
620 if pushop.revs is None and not (outgoing.excluded
621 or pushop.repo.changelog.filteredrevs):
621 or pushop.repo.changelog.filteredrevs):
622 # push everything,
622 # push everything,
623 # use the fast path, no race possible on push
623 # use the fast path, no race possible on push
624 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
624 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
625 cg = changegroup.getsubset(pushop.repo,
625 cg = changegroup.getsubset(pushop.repo,
626 outgoing,
626 outgoing,
627 bundler,
627 bundler,
628 'push',
628 'push',
629 fastpath=True)
629 fastpath=True)
630 else:
630 else:
631 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
631 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
632 bundlecaps)
632 bundlecaps)
633
633
634 # apply changegroup to remote
634 # apply changegroup to remote
635 if unbundle:
635 if unbundle:
636 # local repo finds heads on server, finds out what
636 # local repo finds heads on server, finds out what
637 # revs it must push. once revs transferred, if server
637 # revs it must push. once revs transferred, if server
638 # finds it has different heads (someone else won
638 # finds it has different heads (someone else won
639 # commit/push race), server aborts.
639 # commit/push race), server aborts.
640 if pushop.force:
640 if pushop.force:
641 remoteheads = ['force']
641 remoteheads = ['force']
642 else:
642 else:
643 remoteheads = pushop.remoteheads
643 remoteheads = pushop.remoteheads
644 # ssh: return remote's addchangegroup()
644 # ssh: return remote's addchangegroup()
645 # http: return remote's addchangegroup() or 0 for error
645 # http: return remote's addchangegroup() or 0 for error
646 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
646 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
647 pushop.repo.url())
647 pushop.repo.url())
648 else:
648 else:
649 # we return an integer indicating remote head count
649 # we return an integer indicating remote head count
650 # change
650 # change
651 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
651 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
652 pushop.repo.url())
652 pushop.repo.url())
653
653
654 def _pushsyncphase(pushop):
654 def _pushsyncphase(pushop):
655 """synchronise phase information locally and remotely"""
655 """synchronise phase information locally and remotely"""
656 cheads = pushop.commonheads
656 cheads = pushop.commonheads
657 # even when we don't push, exchanging phase data is useful
657 # even when we don't push, exchanging phase data is useful
658 remotephases = pushop.remote.listkeys('phases')
658 remotephases = pushop.remote.listkeys('phases')
659 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
659 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
660 and remotephases # server supports phases
660 and remotephases # server supports phases
661 and pushop.cgresult is None # nothing was pushed
661 and pushop.cgresult is None # nothing was pushed
662 and remotephases.get('publishing', False)):
662 and remotephases.get('publishing', False)):
663 # When:
663 # When:
664 # - this is a subrepo push
664 # - this is a subrepo push
665 # - and remote support phase
665 # - and remote support phase
666 # - and no changeset was pushed
666 # - and no changeset was pushed
667 # - and remote is publishing
667 # - and remote is publishing
668 # We may be in issue 3871 case!
668 # We may be in issue 3871 case!
669 # We drop the possible phase synchronisation done by
669 # We drop the possible phase synchronisation done by
670 # courtesy to publish changesets possibly locally draft
670 # courtesy to publish changesets possibly locally draft
671 # on the remote.
671 # on the remote.
672 remotephases = {'publishing': 'True'}
672 remotephases = {'publishing': 'True'}
673 if not remotephases: # old server or public only reply from non-publishing
673 if not remotephases: # old server or public only reply from non-publishing
674 _localphasemove(pushop, cheads)
674 _localphasemove(pushop, cheads)
675 # don't push any phase data as there is nothing to push
675 # don't push any phase data as there is nothing to push
676 else:
676 else:
677 ana = phases.analyzeremotephases(pushop.repo, cheads,
677 ana = phases.analyzeremotephases(pushop.repo, cheads,
678 remotephases)
678 remotephases)
679 pheads, droots = ana
679 pheads, droots = ana
680 ### Apply remote phase on local
680 ### Apply remote phase on local
681 if remotephases.get('publishing', False):
681 if remotephases.get('publishing', False):
682 _localphasemove(pushop, cheads)
682 _localphasemove(pushop, cheads)
683 else: # publish = False
683 else: # publish = False
684 _localphasemove(pushop, pheads)
684 _localphasemove(pushop, pheads)
685 _localphasemove(pushop, cheads, phases.draft)
685 _localphasemove(pushop, cheads, phases.draft)
686 ### Apply local phase on remote
686 ### Apply local phase on remote
687
687
688 if pushop.cgresult:
688 if pushop.cgresult:
689 if 'phases' in pushop.stepsdone:
689 if 'phases' in pushop.stepsdone:
690 # phases already pushed though bundle2
690 # phases already pushed though bundle2
691 return
691 return
692 outdated = pushop.outdatedphases
692 outdated = pushop.outdatedphases
693 else:
693 else:
694 outdated = pushop.fallbackoutdatedphases
694 outdated = pushop.fallbackoutdatedphases
695
695
696 pushop.stepsdone.add('phases')
696 pushop.stepsdone.add('phases')
697
697
698 # filter heads already turned public by the push
698 # filter heads already turned public by the push
699 outdated = [c for c in outdated if c.node() not in pheads]
699 outdated = [c for c in outdated if c.node() not in pheads]
700 # fallback to independent pushkey command
700 # fallback to independent pushkey command
701 for newremotehead in outdated:
701 for newremotehead in outdated:
702 r = pushop.remote.pushkey('phases',
702 r = pushop.remote.pushkey('phases',
703 newremotehead.hex(),
703 newremotehead.hex(),
704 str(phases.draft),
704 str(phases.draft),
705 str(phases.public))
705 str(phases.public))
706 if not r:
706 if not r:
707 pushop.ui.warn(_('updating %s to public failed!\n')
707 pushop.ui.warn(_('updating %s to public failed!\n')
708 % newremotehead)
708 % newremotehead)
709
709
710 def _localphasemove(pushop, nodes, phase=phases.public):
710 def _localphasemove(pushop, nodes, phase=phases.public):
711 """move <nodes> to <phase> in the local source repo"""
711 """move <nodes> to <phase> in the local source repo"""
712 if pushop.trmanager:
712 if pushop.trmanager:
713 phases.advanceboundary(pushop.repo,
713 phases.advanceboundary(pushop.repo,
714 pushop.trmanager.transaction(),
714 pushop.trmanager.transaction(),
715 phase,
715 phase,
716 nodes)
716 nodes)
717 else:
717 else:
718 # repo is not locked, do not change any phases!
718 # repo is not locked, do not change any phases!
719 # Informs the user that phases should have been moved when
719 # Informs the user that phases should have been moved when
720 # applicable.
720 # applicable.
721 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
721 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
722 phasestr = phases.phasenames[phase]
722 phasestr = phases.phasenames[phase]
723 if actualmoves:
723 if actualmoves:
724 pushop.ui.status(_('cannot lock source repo, skipping '
724 pushop.ui.status(_('cannot lock source repo, skipping '
725 'local %s phase update\n') % phasestr)
725 'local %s phase update\n') % phasestr)
726
726
727 def _pushobsolete(pushop):
727 def _pushobsolete(pushop):
728 """utility function to push obsolete markers to a remote"""
728 """utility function to push obsolete markers to a remote"""
729 if 'obsmarkers' in pushop.stepsdone:
729 if 'obsmarkers' in pushop.stepsdone:
730 return
730 return
731 pushop.ui.debug('try to push obsolete markers to remote\n')
731 pushop.ui.debug('try to push obsolete markers to remote\n')
732 repo = pushop.repo
732 repo = pushop.repo
733 remote = pushop.remote
733 remote = pushop.remote
734 pushop.stepsdone.add('obsmarkers')
734 pushop.stepsdone.add('obsmarkers')
735 if pushop.outobsmarkers:
735 if pushop.outobsmarkers:
736 rslts = []
736 rslts = []
737 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
737 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
738 for key in sorted(remotedata, reverse=True):
738 for key in sorted(remotedata, reverse=True):
739 # reverse sort to ensure we end with dump0
739 # reverse sort to ensure we end with dump0
740 data = remotedata[key]
740 data = remotedata[key]
741 rslts.append(remote.pushkey('obsolete', key, '', data))
741 rslts.append(remote.pushkey('obsolete', key, '', data))
742 if [r for r in rslts if not r]:
742 if [r for r in rslts if not r]:
743 msg = _('failed to push some obsolete markers!\n')
743 msg = _('failed to push some obsolete markers!\n')
744 repo.ui.warn(msg)
744 repo.ui.warn(msg)
745
745
746 def _pushbookmark(pushop):
746 def _pushbookmark(pushop):
747 """Update bookmark position on remote"""
747 """Update bookmark position on remote"""
748 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
748 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
749 return
749 return
750 pushop.stepsdone.add('bookmarks')
750 pushop.stepsdone.add('bookmarks')
751 ui = pushop.ui
751 ui = pushop.ui
752 remote = pushop.remote
752 remote = pushop.remote
753
753
754 for b, old, new in pushop.outbookmarks:
754 for b, old, new in pushop.outbookmarks:
755 action = 'update'
755 action = 'update'
756 if not old:
756 if not old:
757 action = 'export'
757 action = 'export'
758 elif not new:
758 elif not new:
759 action = 'delete'
759 action = 'delete'
760 if remote.pushkey('bookmarks', b, old, new):
760 if remote.pushkey('bookmarks', b, old, new):
761 ui.status(bookmsgmap[action][0] % b)
761 ui.status(bookmsgmap[action][0] % b)
762 else:
762 else:
763 ui.warn(bookmsgmap[action][1] % b)
763 ui.warn(bookmsgmap[action][1] % b)
764 # discovery can have set the value form invalid entry
764 # discovery can have set the value form invalid entry
765 if pushop.bkresult is not None:
765 if pushop.bkresult is not None:
766 pushop.bkresult = 1
766 pushop.bkresult = 1
767
767
768 class pulloperation(object):
768 class pulloperation(object):
769 """A object that represent a single pull operation
769 """A object that represent a single pull operation
770
770
771 It purpose is to carry pull related state and very common operation.
771 It purpose is to carry pull related state and very common operation.
772
772
773 A new should be created at the beginning of each pull and discarded
773 A new should be created at the beginning of each pull and discarded
774 afterward.
774 afterward.
775 """
775 """
776
776
777 def __init__(self, repo, remote, heads=None, force=False, bookmarks=()):
777 def __init__(self, repo, remote, heads=None, force=False, bookmarks=()):
778 # repo we pull into
778 # repo we pull into
779 self.repo = repo
779 self.repo = repo
780 # repo we pull from
780 # repo we pull from
781 self.remote = remote
781 self.remote = remote
782 # revision we try to pull (None is "all")
782 # revision we try to pull (None is "all")
783 self.heads = heads
783 self.heads = heads
784 # bookmark pulled explicitly
784 # bookmark pulled explicitly
785 self.explicitbookmarks = bookmarks
785 self.explicitbookmarks = bookmarks
786 # do we force pull?
786 # do we force pull?
787 self.force = force
787 self.force = force
788 # transaction manager
788 # transaction manager
789 self.trmanager = None
789 self.trmanager = None
790 # set of common changeset between local and remote before pull
790 # set of common changeset between local and remote before pull
791 self.common = None
791 self.common = None
792 # set of pulled head
792 # set of pulled head
793 self.rheads = None
793 self.rheads = None
794 # list of missing changeset to fetch remotely
794 # list of missing changeset to fetch remotely
795 self.fetch = None
795 self.fetch = None
796 # remote bookmarks data
796 # remote bookmarks data
797 self.remotebookmarks = None
797 self.remotebookmarks = None
798 # result of changegroup pulling (used as return code by pull)
798 # result of changegroup pulling (used as return code by pull)
799 self.cgresult = None
799 self.cgresult = None
800 # list of step already done
800 # list of step already done
801 self.stepsdone = set()
801 self.stepsdone = set()
802
802
803 @util.propertycache
803 @util.propertycache
804 def pulledsubset(self):
804 def pulledsubset(self):
805 """heads of the set of changeset target by the pull"""
805 """heads of the set of changeset target by the pull"""
806 # compute target subset
806 # compute target subset
807 if self.heads is None:
807 if self.heads is None:
808 # We pulled every thing possible
808 # We pulled every thing possible
809 # sync on everything common
809 # sync on everything common
810 c = set(self.common)
810 c = set(self.common)
811 ret = list(self.common)
811 ret = list(self.common)
812 for n in self.rheads:
812 for n in self.rheads:
813 if n not in c:
813 if n not in c:
814 ret.append(n)
814 ret.append(n)
815 return ret
815 return ret
816 else:
816 else:
817 # We pulled a specific subset
817 # We pulled a specific subset
818 # sync on this subset
818 # sync on this subset
819 return self.heads
819 return self.heads
820
820
821 def gettransaction(self):
821 def gettransaction(self):
822 # deprecated; talk to trmanager directly
822 # deprecated; talk to trmanager directly
823 return self.trmanager.transaction()
823 return self.trmanager.transaction()
824
824
825 class transactionmanager(object):
825 class transactionmanager(object):
826 """An object to manage the life cycle of a transaction
826 """An object to manage the life cycle of a transaction
827
827
828 It creates the transaction on demand and calls the appropriate hooks when
828 It creates the transaction on demand and calls the appropriate hooks when
829 closing the transaction."""
829 closing the transaction."""
830 def __init__(self, repo, source, url):
830 def __init__(self, repo, source, url):
831 self.repo = repo
831 self.repo = repo
832 self.source = source
832 self.source = source
833 self.url = url
833 self.url = url
834 self._tr = None
834 self._tr = None
835
835
836 def transaction(self):
836 def transaction(self):
837 """Return an open transaction object, constructing if necessary"""
837 """Return an open transaction object, constructing if necessary"""
838 if not self._tr:
838 if not self._tr:
839 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
839 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
840 self._tr = self.repo.transaction(trname)
840 self._tr = self.repo.transaction(trname)
841 self._tr.hookargs['source'] = self.source
841 self._tr.hookargs['source'] = self.source
842 self._tr.hookargs['url'] = self.url
842 self._tr.hookargs['url'] = self.url
843 return self._tr
843 return self._tr
844
844
845 def close(self):
845 def close(self):
846 """close transaction if created"""
846 """close transaction if created"""
847 if self._tr is not None:
847 if self._tr is not None:
848 repo = self.repo
848 repo = self.repo
849 p = lambda: self._tr.writepending() and repo.root or ""
849 p = lambda: self._tr.writepending() and repo.root or ""
850 repo.hook('b2x-pretransactionclose', throw=True, pending=p,
850 repo.hook('b2x-pretransactionclose', throw=True, pending=p,
851 **self._tr.hookargs)
851 **self._tr.hookargs)
852 hookargs = dict(self._tr.hookargs)
852 hookargs = dict(self._tr.hookargs)
853 def runhooks():
853 def runhooks():
854 repo.hook('b2x-transactionclose', **hookargs)
854 repo.hook('b2x-transactionclose', **hookargs)
855 self._tr.addpostclose('b2x-hook-transactionclose',
855 self._tr.addpostclose('b2x-hook-transactionclose',
856 lambda tr: repo._afterlock(runhooks))
856 lambda tr: repo._afterlock(runhooks))
857 self._tr.close()
857 self._tr.close()
858
858
859 def release(self):
859 def release(self):
860 """release transaction if created"""
860 """release transaction if created"""
861 if self._tr is not None:
861 if self._tr is not None:
862 self._tr.release()
862 self._tr.release()
863
863
864 def pull(repo, remote, heads=None, force=False, bookmarks=()):
864 def pull(repo, remote, heads=None, force=False, bookmarks=()):
865 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks)
865 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks)
866 if pullop.remote.local():
866 if pullop.remote.local():
867 missing = set(pullop.remote.requirements) - pullop.repo.supported
867 missing = set(pullop.remote.requirements) - pullop.repo.supported
868 if missing:
868 if missing:
869 msg = _("required features are not"
869 msg = _("required features are not"
870 " supported in the destination:"
870 " supported in the destination:"
871 " %s") % (', '.join(sorted(missing)))
871 " %s") % (', '.join(sorted(missing)))
872 raise util.Abort(msg)
872 raise util.Abort(msg)
873
873
874 pullop.remotebookmarks = remote.listkeys('bookmarks')
874 pullop.remotebookmarks = remote.listkeys('bookmarks')
875 lock = pullop.repo.lock()
875 lock = pullop.repo.lock()
876 try:
876 try:
877 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
877 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
878 _pulldiscovery(pullop)
878 _pulldiscovery(pullop)
879 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
879 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
880 and pullop.remote.capable('bundle2-exp')):
880 and pullop.remote.capable('bundle2-exp')):
881 _pullbundle2(pullop)
881 _pullbundle2(pullop)
882 _pullchangeset(pullop)
882 _pullchangeset(pullop)
883 _pullphase(pullop)
883 _pullphase(pullop)
884 _pullbookmarks(pullop)
884 _pullbookmarks(pullop)
885 _pullobsolete(pullop)
885 _pullobsolete(pullop)
886 pullop.trmanager.close()
886 pullop.trmanager.close()
887 finally:
887 finally:
888 pullop.trmanager.release()
888 pullop.trmanager.release()
889 lock.release()
889 lock.release()
890
890
891 return pullop
891 return pullop
892
892
893 # list of steps to perform discovery before pull
893 # list of steps to perform discovery before pull
894 pulldiscoveryorder = []
894 pulldiscoveryorder = []
895
895
896 # Mapping between step name and function
896 # Mapping between step name and function
897 #
897 #
898 # This exists to help extensions wrap steps if necessary
898 # This exists to help extensions wrap steps if necessary
899 pulldiscoverymapping = {}
899 pulldiscoverymapping = {}
900
900
901 def pulldiscovery(stepname):
901 def pulldiscovery(stepname):
902 """decorator for function performing discovery before pull
902 """decorator for function performing discovery before pull
903
903
904 The function is added to the step -> function mapping and appended to the
904 The function is added to the step -> function mapping and appended to the
905 list of steps. Beware that decorated function will be added in order (this
905 list of steps. Beware that decorated function will be added in order (this
906 may matter).
906 may matter).
907
907
908 You can only use this decorator for a new step, if you want to wrap a step
908 You can only use this decorator for a new step, if you want to wrap a step
909 from an extension, change the pulldiscovery dictionary directly."""
909 from an extension, change the pulldiscovery dictionary directly."""
910 def dec(func):
910 def dec(func):
911 assert stepname not in pulldiscoverymapping
911 assert stepname not in pulldiscoverymapping
912 pulldiscoverymapping[stepname] = func
912 pulldiscoverymapping[stepname] = func
913 pulldiscoveryorder.append(stepname)
913 pulldiscoveryorder.append(stepname)
914 return func
914 return func
915 return dec
915 return dec
916
916
917 def _pulldiscovery(pullop):
917 def _pulldiscovery(pullop):
918 """Run all discovery steps"""
918 """Run all discovery steps"""
919 for stepname in pulldiscoveryorder:
919 for stepname in pulldiscoveryorder:
920 step = pulldiscoverymapping[stepname]
920 step = pulldiscoverymapping[stepname]
921 step(pullop)
921 step(pullop)
922
922
923 @pulldiscovery('changegroup')
923 @pulldiscovery('changegroup')
924 def _pulldiscoverychangegroup(pullop):
924 def _pulldiscoverychangegroup(pullop):
925 """discovery phase for the pull
925 """discovery phase for the pull
926
926
927 Current handle changeset discovery only, will change handle all discovery
927 Current handle changeset discovery only, will change handle all discovery
928 at some point."""
928 at some point."""
929 tmp = discovery.findcommonincoming(pullop.repo,
929 tmp = discovery.findcommonincoming(pullop.repo,
930 pullop.remote,
930 pullop.remote,
931 heads=pullop.heads,
931 heads=pullop.heads,
932 force=pullop.force)
932 force=pullop.force)
933 common, fetch, rheads = tmp
933 common, fetch, rheads = tmp
934 nm = pullop.repo.unfiltered().changelog.nodemap
934 nm = pullop.repo.unfiltered().changelog.nodemap
935 if fetch and rheads:
935 if fetch and rheads:
936 # If a remote heads in filtered locally, lets drop it from the unknown
936 # If a remote heads in filtered locally, lets drop it from the unknown
937 # remote heads and put in back in common.
937 # remote heads and put in back in common.
938 #
938 #
939 # This is a hackish solution to catch most of "common but locally
939 # This is a hackish solution to catch most of "common but locally
940 # hidden situation". We do not performs discovery on unfiltered
940 # hidden situation". We do not performs discovery on unfiltered
941 # repository because it end up doing a pathological amount of round
941 # repository because it end up doing a pathological amount of round
942 # trip for w huge amount of changeset we do not care about.
942 # trip for w huge amount of changeset we do not care about.
943 #
943 #
944 # If a set of such "common but filtered" changeset exist on the server
944 # If a set of such "common but filtered" changeset exist on the server
945 # but are not including a remote heads, we'll not be able to detect it,
945 # but are not including a remote heads, we'll not be able to detect it,
946 scommon = set(common)
946 scommon = set(common)
947 filteredrheads = []
947 filteredrheads = []
948 for n in rheads:
948 for n in rheads:
949 if n in nm:
949 if n in nm:
950 if n not in scommon:
950 if n not in scommon:
951 common.append(n)
951 common.append(n)
952 else:
952 else:
953 filteredrheads.append(n)
953 filteredrheads.append(n)
954 if not filteredrheads:
954 if not filteredrheads:
955 fetch = []
955 fetch = []
956 rheads = filteredrheads
956 rheads = filteredrheads
957 pullop.common = common
957 pullop.common = common
958 pullop.fetch = fetch
958 pullop.fetch = fetch
959 pullop.rheads = rheads
959 pullop.rheads = rheads
960
960
961 def _pullbundle2(pullop):
961 def _pullbundle2(pullop):
962 """pull data using bundle2
962 """pull data using bundle2
963
963
964 For now, the only supported data are changegroup."""
964 For now, the only supported data are changegroup."""
965 remotecaps = bundle2.bundle2caps(pullop.remote)
965 remotecaps = bundle2.bundle2caps(pullop.remote)
966 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
966 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
967 # pulling changegroup
967 # pulling changegroup
968 pullop.stepsdone.add('changegroup')
968 pullop.stepsdone.add('changegroup')
969
969
970 kwargs['common'] = pullop.common
970 kwargs['common'] = pullop.common
971 kwargs['heads'] = pullop.heads or pullop.rheads
971 kwargs['heads'] = pullop.heads or pullop.rheads
972 kwargs['cg'] = pullop.fetch
972 kwargs['cg'] = pullop.fetch
973 if 'b2x:listkeys' in remotecaps:
973 if 'b2x:listkeys' in remotecaps:
974 kwargs['listkeys'] = ['phase', 'bookmarks']
974 kwargs['listkeys'] = ['phase', 'bookmarks']
975 if not pullop.fetch:
975 if not pullop.fetch:
976 pullop.repo.ui.status(_("no changes found\n"))
976 pullop.repo.ui.status(_("no changes found\n"))
977 pullop.cgresult = 0
977 pullop.cgresult = 0
978 else:
978 else:
979 if pullop.heads is None and list(pullop.common) == [nullid]:
979 if pullop.heads is None and list(pullop.common) == [nullid]:
980 pullop.repo.ui.status(_("requesting all changes\n"))
980 pullop.repo.ui.status(_("requesting all changes\n"))
981 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
981 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
982 remoteversions = bundle2.obsmarkersversion(remotecaps)
982 remoteversions = bundle2.obsmarkersversion(remotecaps)
983 if obsolete.commonversion(remoteversions) is not None:
983 if obsolete.commonversion(remoteversions) is not None:
984 kwargs['obsmarkers'] = True
984 kwargs['obsmarkers'] = True
985 pullop.stepsdone.add('obsmarkers')
985 pullop.stepsdone.add('obsmarkers')
986 _pullbundle2extraprepare(pullop, kwargs)
986 _pullbundle2extraprepare(pullop, kwargs)
987 if kwargs.keys() == ['format']:
988 return # nothing to pull
989 bundle = pullop.remote.getbundle('pull', **kwargs)
987 bundle = pullop.remote.getbundle('pull', **kwargs)
990 try:
988 try:
991 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
989 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
992 except error.BundleValueError, exc:
990 except error.BundleValueError, exc:
993 raise util.Abort('missing support for %s' % exc)
991 raise util.Abort('missing support for %s' % exc)
994
992
995 if pullop.fetch:
993 if pullop.fetch:
996 results = [cg['return'] for cg in op.records['changegroup']]
994 results = [cg['return'] for cg in op.records['changegroup']]
997 pullop.cgresult = changegroup.combineresults(results)
995 pullop.cgresult = changegroup.combineresults(results)
998
996
999 # processing phases change
997 # processing phases change
1000 for namespace, value in op.records['listkeys']:
998 for namespace, value in op.records['listkeys']:
1001 if namespace == 'phases':
999 if namespace == 'phases':
1002 _pullapplyphases(pullop, value)
1000 _pullapplyphases(pullop, value)
1003
1001
1004 # processing bookmark update
1002 # processing bookmark update
1005 for namespace, value in op.records['listkeys']:
1003 for namespace, value in op.records['listkeys']:
1006 if namespace == 'bookmarks':
1004 if namespace == 'bookmarks':
1007 pullop.remotebookmarks = value
1005 pullop.remotebookmarks = value
1008 _pullbookmarks(pullop)
1006 _pullbookmarks(pullop)
1009
1007
1010 def _pullbundle2extraprepare(pullop, kwargs):
1008 def _pullbundle2extraprepare(pullop, kwargs):
1011 """hook function so that extensions can extend the getbundle call"""
1009 """hook function so that extensions can extend the getbundle call"""
1012 pass
1010 pass
1013
1011
1014 def _pullchangeset(pullop):
1012 def _pullchangeset(pullop):
1015 """pull changeset from unbundle into the local repo"""
1013 """pull changeset from unbundle into the local repo"""
1016 # We delay the open of the transaction as late as possible so we
1014 # We delay the open of the transaction as late as possible so we
1017 # don't open transaction for nothing or you break future useful
1015 # don't open transaction for nothing or you break future useful
1018 # rollback call
1016 # rollback call
1019 if 'changegroup' in pullop.stepsdone:
1017 if 'changegroup' in pullop.stepsdone:
1020 return
1018 return
1021 pullop.stepsdone.add('changegroup')
1019 pullop.stepsdone.add('changegroup')
1022 if not pullop.fetch:
1020 if not pullop.fetch:
1023 pullop.repo.ui.status(_("no changes found\n"))
1021 pullop.repo.ui.status(_("no changes found\n"))
1024 pullop.cgresult = 0
1022 pullop.cgresult = 0
1025 return
1023 return
1026 pullop.gettransaction()
1024 pullop.gettransaction()
1027 if pullop.heads is None and list(pullop.common) == [nullid]:
1025 if pullop.heads is None and list(pullop.common) == [nullid]:
1028 pullop.repo.ui.status(_("requesting all changes\n"))
1026 pullop.repo.ui.status(_("requesting all changes\n"))
1029 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1027 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1030 # issue1320, avoid a race if remote changed after discovery
1028 # issue1320, avoid a race if remote changed after discovery
1031 pullop.heads = pullop.rheads
1029 pullop.heads = pullop.rheads
1032
1030
1033 if pullop.remote.capable('getbundle'):
1031 if pullop.remote.capable('getbundle'):
1034 # TODO: get bundlecaps from remote
1032 # TODO: get bundlecaps from remote
1035 cg = pullop.remote.getbundle('pull', common=pullop.common,
1033 cg = pullop.remote.getbundle('pull', common=pullop.common,
1036 heads=pullop.heads or pullop.rheads)
1034 heads=pullop.heads or pullop.rheads)
1037 elif pullop.heads is None:
1035 elif pullop.heads is None:
1038 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1036 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1039 elif not pullop.remote.capable('changegroupsubset'):
1037 elif not pullop.remote.capable('changegroupsubset'):
1040 raise util.Abort(_("partial pull cannot be done because "
1038 raise util.Abort(_("partial pull cannot be done because "
1041 "other repository doesn't support "
1039 "other repository doesn't support "
1042 "changegroupsubset."))
1040 "changegroupsubset."))
1043 else:
1041 else:
1044 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1042 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1045 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1043 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1046 pullop.remote.url())
1044 pullop.remote.url())
1047
1045
1048 def _pullphase(pullop):
1046 def _pullphase(pullop):
1049 # Get remote phases data from remote
1047 # Get remote phases data from remote
1050 if 'phases' in pullop.stepsdone:
1048 if 'phases' in pullop.stepsdone:
1051 return
1049 return
1052 remotephases = pullop.remote.listkeys('phases')
1050 remotephases = pullop.remote.listkeys('phases')
1053 _pullapplyphases(pullop, remotephases)
1051 _pullapplyphases(pullop, remotephases)
1054
1052
1055 def _pullapplyphases(pullop, remotephases):
1053 def _pullapplyphases(pullop, remotephases):
1056 """apply phase movement from observed remote state"""
1054 """apply phase movement from observed remote state"""
1057 if 'phases' in pullop.stepsdone:
1055 if 'phases' in pullop.stepsdone:
1058 return
1056 return
1059 pullop.stepsdone.add('phases')
1057 pullop.stepsdone.add('phases')
1060 publishing = bool(remotephases.get('publishing', False))
1058 publishing = bool(remotephases.get('publishing', False))
1061 if remotephases and not publishing:
1059 if remotephases and not publishing:
1062 # remote is new and unpublishing
1060 # remote is new and unpublishing
1063 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1061 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1064 pullop.pulledsubset,
1062 pullop.pulledsubset,
1065 remotephases)
1063 remotephases)
1066 dheads = pullop.pulledsubset
1064 dheads = pullop.pulledsubset
1067 else:
1065 else:
1068 # Remote is old or publishing all common changesets
1066 # Remote is old or publishing all common changesets
1069 # should be seen as public
1067 # should be seen as public
1070 pheads = pullop.pulledsubset
1068 pheads = pullop.pulledsubset
1071 dheads = []
1069 dheads = []
1072 unfi = pullop.repo.unfiltered()
1070 unfi = pullop.repo.unfiltered()
1073 phase = unfi._phasecache.phase
1071 phase = unfi._phasecache.phase
1074 rev = unfi.changelog.nodemap.get
1072 rev = unfi.changelog.nodemap.get
1075 public = phases.public
1073 public = phases.public
1076 draft = phases.draft
1074 draft = phases.draft
1077
1075
1078 # exclude changesets already public locally and update the others
1076 # exclude changesets already public locally and update the others
1079 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1077 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1080 if pheads:
1078 if pheads:
1081 tr = pullop.gettransaction()
1079 tr = pullop.gettransaction()
1082 phases.advanceboundary(pullop.repo, tr, public, pheads)
1080 phases.advanceboundary(pullop.repo, tr, public, pheads)
1083
1081
1084 # exclude changesets already draft locally and update the others
1082 # exclude changesets already draft locally and update the others
1085 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1083 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1086 if dheads:
1084 if dheads:
1087 tr = pullop.gettransaction()
1085 tr = pullop.gettransaction()
1088 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1086 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1089
1087
1090 def _pullbookmarks(pullop):
1088 def _pullbookmarks(pullop):
1091 """process the remote bookmark information to update the local one"""
1089 """process the remote bookmark information to update the local one"""
1092 if 'bookmarks' in pullop.stepsdone:
1090 if 'bookmarks' in pullop.stepsdone:
1093 return
1091 return
1094 pullop.stepsdone.add('bookmarks')
1092 pullop.stepsdone.add('bookmarks')
1095 repo = pullop.repo
1093 repo = pullop.repo
1096 remotebookmarks = pullop.remotebookmarks
1094 remotebookmarks = pullop.remotebookmarks
1097 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1095 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1098 pullop.remote.url(),
1096 pullop.remote.url(),
1099 pullop.gettransaction,
1097 pullop.gettransaction,
1100 explicit=pullop.explicitbookmarks)
1098 explicit=pullop.explicitbookmarks)
1101
1099
1102 def _pullobsolete(pullop):
1100 def _pullobsolete(pullop):
1103 """utility function to pull obsolete markers from a remote
1101 """utility function to pull obsolete markers from a remote
1104
1102
1105 The `gettransaction` is function that return the pull transaction, creating
1103 The `gettransaction` is function that return the pull transaction, creating
1106 one if necessary. We return the transaction to inform the calling code that
1104 one if necessary. We return the transaction to inform the calling code that
1107 a new transaction have been created (when applicable).
1105 a new transaction have been created (when applicable).
1108
1106
1109 Exists mostly to allow overriding for experimentation purpose"""
1107 Exists mostly to allow overriding for experimentation purpose"""
1110 if 'obsmarkers' in pullop.stepsdone:
1108 if 'obsmarkers' in pullop.stepsdone:
1111 return
1109 return
1112 pullop.stepsdone.add('obsmarkers')
1110 pullop.stepsdone.add('obsmarkers')
1113 tr = None
1111 tr = None
1114 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1112 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1115 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1113 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1116 remoteobs = pullop.remote.listkeys('obsolete')
1114 remoteobs = pullop.remote.listkeys('obsolete')
1117 if 'dump0' in remoteobs:
1115 if 'dump0' in remoteobs:
1118 tr = pullop.gettransaction()
1116 tr = pullop.gettransaction()
1119 for key in sorted(remoteobs, reverse=True):
1117 for key in sorted(remoteobs, reverse=True):
1120 if key.startswith('dump'):
1118 if key.startswith('dump'):
1121 data = base85.b85decode(remoteobs[key])
1119 data = base85.b85decode(remoteobs[key])
1122 pullop.repo.obsstore.mergemarkers(tr, data)
1120 pullop.repo.obsstore.mergemarkers(tr, data)
1123 pullop.repo.invalidatevolatilesets()
1121 pullop.repo.invalidatevolatilesets()
1124 return tr
1122 return tr
1125
1123
1126 def caps20to10(repo):
1124 def caps20to10(repo):
1127 """return a set with appropriate options to use bundle20 during getbundle"""
1125 """return a set with appropriate options to use bundle20 during getbundle"""
1128 caps = set(['HG2Y'])
1126 caps = set(['HG2Y'])
1129 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1127 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1130 caps.add('bundle2=' + urllib.quote(capsblob))
1128 caps.add('bundle2=' + urllib.quote(capsblob))
1131 return caps
1129 return caps
1132
1130
1133 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1131 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1134 getbundle2partsorder = []
1132 getbundle2partsorder = []
1135
1133
1136 # Mapping between step name and function
1134 # Mapping between step name and function
1137 #
1135 #
1138 # This exists to help extensions wrap steps if necessary
1136 # This exists to help extensions wrap steps if necessary
1139 getbundle2partsmapping = {}
1137 getbundle2partsmapping = {}
1140
1138
1141 def getbundle2partsgenerator(stepname):
1139 def getbundle2partsgenerator(stepname):
1142 """decorator for function generating bundle2 part for getbundle
1140 """decorator for function generating bundle2 part for getbundle
1143
1141
1144 The function is added to the step -> function mapping and appended to the
1142 The function is added to the step -> function mapping and appended to the
1145 list of steps. Beware that decorated functions will be added in order
1143 list of steps. Beware that decorated functions will be added in order
1146 (this may matter).
1144 (this may matter).
1147
1145
1148 You can only use this decorator for new steps, if you want to wrap a step
1146 You can only use this decorator for new steps, if you want to wrap a step
1149 from an extension, attack the getbundle2partsmapping dictionary directly."""
1147 from an extension, attack the getbundle2partsmapping dictionary directly."""
1150 def dec(func):
1148 def dec(func):
1151 assert stepname not in getbundle2partsmapping
1149 assert stepname not in getbundle2partsmapping
1152 getbundle2partsmapping[stepname] = func
1150 getbundle2partsmapping[stepname] = func
1153 getbundle2partsorder.append(stepname)
1151 getbundle2partsorder.append(stepname)
1154 return func
1152 return func
1155 return dec
1153 return dec
1156
1154
1157 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1155 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1158 **kwargs):
1156 **kwargs):
1159 """return a full bundle (with potentially multiple kind of parts)
1157 """return a full bundle (with potentially multiple kind of parts)
1160
1158
1161 Could be a bundle HG10 or a bundle HG2Y depending on bundlecaps
1159 Could be a bundle HG10 or a bundle HG2Y depending on bundlecaps
1162 passed. For now, the bundle can contain only changegroup, but this will
1160 passed. For now, the bundle can contain only changegroup, but this will
1163 changes when more part type will be available for bundle2.
1161 changes when more part type will be available for bundle2.
1164
1162
1165 This is different from changegroup.getchangegroup that only returns an HG10
1163 This is different from changegroup.getchangegroup that only returns an HG10
1166 changegroup bundle. They may eventually get reunited in the future when we
1164 changegroup bundle. They may eventually get reunited in the future when we
1167 have a clearer idea of the API we what to query different data.
1165 have a clearer idea of the API we what to query different data.
1168
1166
1169 The implementation is at a very early stage and will get massive rework
1167 The implementation is at a very early stage and will get massive rework
1170 when the API of bundle is refined.
1168 when the API of bundle is refined.
1171 """
1169 """
1172 # bundle10 case
1170 # bundle10 case
1173 if bundlecaps is None or 'HG2Y' not in bundlecaps:
1171 if bundlecaps is None or 'HG2Y' not in bundlecaps:
1174 if bundlecaps and not kwargs.get('cg', True):
1172 if bundlecaps and not kwargs.get('cg', True):
1175 raise ValueError(_('request for bundle10 must include changegroup'))
1173 raise ValueError(_('request for bundle10 must include changegroup'))
1176
1174
1177 if kwargs:
1175 if kwargs:
1178 raise ValueError(_('unsupported getbundle arguments: %s')
1176 raise ValueError(_('unsupported getbundle arguments: %s')
1179 % ', '.join(sorted(kwargs.keys())))
1177 % ', '.join(sorted(kwargs.keys())))
1180 return changegroup.getchangegroup(repo, source, heads=heads,
1178 return changegroup.getchangegroup(repo, source, heads=heads,
1181 common=common, bundlecaps=bundlecaps)
1179 common=common, bundlecaps=bundlecaps)
1182
1180
1183 # bundle20 case
1181 # bundle20 case
1184 b2caps = {}
1182 b2caps = {}
1185 for bcaps in bundlecaps:
1183 for bcaps in bundlecaps:
1186 if bcaps.startswith('bundle2='):
1184 if bcaps.startswith('bundle2='):
1187 blob = urllib.unquote(bcaps[len('bundle2='):])
1185 blob = urllib.unquote(bcaps[len('bundle2='):])
1188 b2caps.update(bundle2.decodecaps(blob))
1186 b2caps.update(bundle2.decodecaps(blob))
1189 bundler = bundle2.bundle20(repo.ui, b2caps)
1187 bundler = bundle2.bundle20(repo.ui, b2caps)
1190
1188
1191 kwargs['heads'] = heads
1189 kwargs['heads'] = heads
1192 kwargs['common'] = common
1190 kwargs['common'] = common
1193
1191
1194 for name in getbundle2partsorder:
1192 for name in getbundle2partsorder:
1195 func = getbundle2partsmapping[name]
1193 func = getbundle2partsmapping[name]
1196 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1194 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1197 **kwargs)
1195 **kwargs)
1198
1196
1199 return util.chunkbuffer(bundler.getchunks())
1197 return util.chunkbuffer(bundler.getchunks())
1200
1198
1201 @getbundle2partsgenerator('changegroup')
1199 @getbundle2partsgenerator('changegroup')
1202 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1200 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1203 b2caps=None, heads=None, common=None, **kwargs):
1201 b2caps=None, heads=None, common=None, **kwargs):
1204 """add a changegroup part to the requested bundle"""
1202 """add a changegroup part to the requested bundle"""
1205 cg = None
1203 cg = None
1206 if kwargs.get('cg', True):
1204 if kwargs.get('cg', True):
1207 # build changegroup bundle here.
1205 # build changegroup bundle here.
1208 version = None
1206 version = None
1209 cgversions = b2caps.get('b2x:changegroup')
1207 cgversions = b2caps.get('b2x:changegroup')
1210 if not cgversions: # 3.1 and 3.2 ship with an empty value
1208 if not cgversions: # 3.1 and 3.2 ship with an empty value
1211 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1209 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1212 common=common,
1210 common=common,
1213 bundlecaps=bundlecaps)
1211 bundlecaps=bundlecaps)
1214 else:
1212 else:
1215 cgversions = [v for v in cgversions if v in changegroup.packermap]
1213 cgversions = [v for v in cgversions if v in changegroup.packermap]
1216 if not cgversions:
1214 if not cgversions:
1217 raise ValueError(_('no common changegroup version'))
1215 raise ValueError(_('no common changegroup version'))
1218 version = max(cgversions)
1216 version = max(cgversions)
1219 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1217 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1220 common=common,
1218 common=common,
1221 bundlecaps=bundlecaps,
1219 bundlecaps=bundlecaps,
1222 version=version)
1220 version=version)
1223
1221
1224 if cg:
1222 if cg:
1225 part = bundler.newpart('b2x:changegroup', data=cg)
1223 part = bundler.newpart('b2x:changegroup', data=cg)
1226 if version is not None:
1224 if version is not None:
1227 part.addparam('version', version)
1225 part.addparam('version', version)
1228
1226
1229 @getbundle2partsgenerator('listkeys')
1227 @getbundle2partsgenerator('listkeys')
1230 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1228 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1231 b2caps=None, **kwargs):
1229 b2caps=None, **kwargs):
1232 """add parts containing listkeys namespaces to the requested bundle"""
1230 """add parts containing listkeys namespaces to the requested bundle"""
1233 listkeys = kwargs.get('listkeys', ())
1231 listkeys = kwargs.get('listkeys', ())
1234 for namespace in listkeys:
1232 for namespace in listkeys:
1235 part = bundler.newpart('b2x:listkeys')
1233 part = bundler.newpart('b2x:listkeys')
1236 part.addparam('namespace', namespace)
1234 part.addparam('namespace', namespace)
1237 keys = repo.listkeys(namespace).items()
1235 keys = repo.listkeys(namespace).items()
1238 part.data = pushkey.encodekeys(keys)
1236 part.data = pushkey.encodekeys(keys)
1239
1237
1240 @getbundle2partsgenerator('obsmarkers')
1238 @getbundle2partsgenerator('obsmarkers')
1241 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1239 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1242 b2caps=None, heads=None, **kwargs):
1240 b2caps=None, heads=None, **kwargs):
1243 """add an obsolescence markers part to the requested bundle"""
1241 """add an obsolescence markers part to the requested bundle"""
1244 if kwargs.get('obsmarkers', False):
1242 if kwargs.get('obsmarkers', False):
1245 if heads is None:
1243 if heads is None:
1246 heads = repo.heads()
1244 heads = repo.heads()
1247 subset = [c.node() for c in repo.set('::%ln', heads)]
1245 subset = [c.node() for c in repo.set('::%ln', heads)]
1248 markers = repo.obsstore.relevantmarkers(subset)
1246 markers = repo.obsstore.relevantmarkers(subset)
1249 buildobsmarkerspart(bundler, markers)
1247 buildobsmarkerspart(bundler, markers)
1250
1248
1251 def check_heads(repo, their_heads, context):
1249 def check_heads(repo, their_heads, context):
1252 """check if the heads of a repo have been modified
1250 """check if the heads of a repo have been modified
1253
1251
1254 Used by peer for unbundling.
1252 Used by peer for unbundling.
1255 """
1253 """
1256 heads = repo.heads()
1254 heads = repo.heads()
1257 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1255 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1258 if not (their_heads == ['force'] or their_heads == heads or
1256 if not (their_heads == ['force'] or their_heads == heads or
1259 their_heads == ['hashed', heads_hash]):
1257 their_heads == ['hashed', heads_hash]):
1260 # someone else committed/pushed/unbundled while we
1258 # someone else committed/pushed/unbundled while we
1261 # were transferring data
1259 # were transferring data
1262 raise error.PushRaced('repository changed while %s - '
1260 raise error.PushRaced('repository changed while %s - '
1263 'please try again' % context)
1261 'please try again' % context)
1264
1262
1265 def unbundle(repo, cg, heads, source, url):
1263 def unbundle(repo, cg, heads, source, url):
1266 """Apply a bundle to a repo.
1264 """Apply a bundle to a repo.
1267
1265
1268 this function makes sure the repo is locked during the application and have
1266 this function makes sure the repo is locked during the application and have
1269 mechanism to check that no push race occurred between the creation of the
1267 mechanism to check that no push race occurred between the creation of the
1270 bundle and its application.
1268 bundle and its application.
1271
1269
1272 If the push was raced as PushRaced exception is raised."""
1270 If the push was raced as PushRaced exception is raised."""
1273 r = 0
1271 r = 0
1274 # need a transaction when processing a bundle2 stream
1272 # need a transaction when processing a bundle2 stream
1275 tr = None
1273 tr = None
1276 lock = repo.lock()
1274 lock = repo.lock()
1277 try:
1275 try:
1278 check_heads(repo, heads, 'uploading changes')
1276 check_heads(repo, heads, 'uploading changes')
1279 # push can proceed
1277 # push can proceed
1280 if util.safehasattr(cg, 'params'):
1278 if util.safehasattr(cg, 'params'):
1281 try:
1279 try:
1282 tr = repo.transaction('unbundle')
1280 tr = repo.transaction('unbundle')
1283 tr.hookargs['source'] = source
1281 tr.hookargs['source'] = source
1284 tr.hookargs['url'] = url
1282 tr.hookargs['url'] = url
1285 tr.hookargs['bundle2-exp'] = '1'
1283 tr.hookargs['bundle2-exp'] = '1'
1286 r = bundle2.processbundle(repo, cg, lambda: tr).reply
1284 r = bundle2.processbundle(repo, cg, lambda: tr).reply
1287 p = lambda: tr.writepending() and repo.root or ""
1285 p = lambda: tr.writepending() and repo.root or ""
1288 repo.hook('b2x-pretransactionclose', throw=True, pending=p,
1286 repo.hook('b2x-pretransactionclose', throw=True, pending=p,
1289 **tr.hookargs)
1287 **tr.hookargs)
1290 hookargs = dict(tr.hookargs)
1288 hookargs = dict(tr.hookargs)
1291 def runhooks():
1289 def runhooks():
1292 repo.hook('b2x-transactionclose', **hookargs)
1290 repo.hook('b2x-transactionclose', **hookargs)
1293 tr.addpostclose('b2x-hook-transactionclose',
1291 tr.addpostclose('b2x-hook-transactionclose',
1294 lambda tr: repo._afterlock(runhooks))
1292 lambda tr: repo._afterlock(runhooks))
1295 tr.close()
1293 tr.close()
1296 except Exception, exc:
1294 except Exception, exc:
1297 exc.duringunbundle2 = True
1295 exc.duringunbundle2 = True
1298 raise
1296 raise
1299 else:
1297 else:
1300 r = changegroup.addchangegroup(repo, cg, source, url)
1298 r = changegroup.addchangegroup(repo, cg, source, url)
1301 finally:
1299 finally:
1302 if tr is not None:
1300 if tr is not None:
1303 tr.release()
1301 tr.release()
1304 lock.release()
1302 lock.release()
1305 return r
1303 return r
General Comments 0
You need to be logged in to leave comments. Login now