##// END OF EJS Templates
bundle2: drop the experimental hooks...
Pierre-Yves David -
r24697:52ff737c default
parent child Browse files
Show More
@@ -1,1311 +1,1294 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno, urllib
10 import errno, urllib
11 import util, scmutil, changegroup, base85, error
11 import util, scmutil, changegroup, base85, error
12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13
13
14 def readbundle(ui, fh, fname, vfs=None):
14 def readbundle(ui, fh, fname, vfs=None):
15 header = changegroup.readexactly(fh, 4)
15 header = changegroup.readexactly(fh, 4)
16
16
17 alg = None
17 alg = None
18 if not fname:
18 if not fname:
19 fname = "stream"
19 fname = "stream"
20 if not header.startswith('HG') and header.startswith('\0'):
20 if not header.startswith('HG') and header.startswith('\0'):
21 fh = changegroup.headerlessfixup(fh, header)
21 fh = changegroup.headerlessfixup(fh, header)
22 header = "HG10"
22 header = "HG10"
23 alg = 'UN'
23 alg = 'UN'
24 elif vfs:
24 elif vfs:
25 fname = vfs.join(fname)
25 fname = vfs.join(fname)
26
26
27 magic, version = header[0:2], header[2:4]
27 magic, version = header[0:2], header[2:4]
28
28
29 if magic != 'HG':
29 if magic != 'HG':
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 if version == '10':
31 if version == '10':
32 if alg is None:
32 if alg is None:
33 alg = changegroup.readexactly(fh, 2)
33 alg = changegroup.readexactly(fh, 2)
34 return changegroup.cg1unpacker(fh, alg)
34 return changegroup.cg1unpacker(fh, alg)
35 elif version.startswith('2'):
35 elif version.startswith('2'):
36 return bundle2.getunbundler(ui, fh, header=magic + version)
36 return bundle2.getunbundler(ui, fh, header=magic + version)
37 else:
37 else:
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39
39
40 def buildobsmarkerspart(bundler, markers):
40 def buildobsmarkerspart(bundler, markers):
41 """add an obsmarker part to the bundler with <markers>
41 """add an obsmarker part to the bundler with <markers>
42
42
43 No part is created if markers is empty.
43 No part is created if markers is empty.
44 Raises ValueError if the bundler doesn't support any known obsmarker format.
44 Raises ValueError if the bundler doesn't support any known obsmarker format.
45 """
45 """
46 if markers:
46 if markers:
47 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
47 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
48 version = obsolete.commonversion(remoteversions)
48 version = obsolete.commonversion(remoteversions)
49 if version is None:
49 if version is None:
50 raise ValueError('bundler do not support common obsmarker format')
50 raise ValueError('bundler do not support common obsmarker format')
51 stream = obsolete.encodemarkers(markers, True, version=version)
51 stream = obsolete.encodemarkers(markers, True, version=version)
52 return bundler.newpart('obsmarkers', data=stream)
52 return bundler.newpart('obsmarkers', data=stream)
53 return None
53 return None
54
54
55 def _canusebundle2(op):
55 def _canusebundle2(op):
56 """return true if a pull/push can use bundle2
56 """return true if a pull/push can use bundle2
57
57
58 Feel free to nuke this function when we drop the experimental option"""
58 Feel free to nuke this function when we drop the experimental option"""
59 return (op.repo.ui.configbool('experimental', 'bundle2-exp', False)
59 return (op.repo.ui.configbool('experimental', 'bundle2-exp', False)
60 and op.remote.capable('bundle2'))
60 and op.remote.capable('bundle2'))
61
61
62
62
63 class pushoperation(object):
63 class pushoperation(object):
64 """A object that represent a single push operation
64 """A object that represent a single push operation
65
65
66 It purpose is to carry push related state and very common operation.
66 It purpose is to carry push related state and very common operation.
67
67
68 A new should be created at the beginning of each push and discarded
68 A new should be created at the beginning of each push and discarded
69 afterward.
69 afterward.
70 """
70 """
71
71
72 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
72 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
73 bookmarks=()):
73 bookmarks=()):
74 # repo we push from
74 # repo we push from
75 self.repo = repo
75 self.repo = repo
76 self.ui = repo.ui
76 self.ui = repo.ui
77 # repo we push to
77 # repo we push to
78 self.remote = remote
78 self.remote = remote
79 # force option provided
79 # force option provided
80 self.force = force
80 self.force = force
81 # revs to be pushed (None is "all")
81 # revs to be pushed (None is "all")
82 self.revs = revs
82 self.revs = revs
83 # bookmark explicitly pushed
83 # bookmark explicitly pushed
84 self.bookmarks = bookmarks
84 self.bookmarks = bookmarks
85 # allow push of new branch
85 # allow push of new branch
86 self.newbranch = newbranch
86 self.newbranch = newbranch
87 # did a local lock get acquired?
87 # did a local lock get acquired?
88 self.locallocked = None
88 self.locallocked = None
89 # step already performed
89 # step already performed
90 # (used to check what steps have been already performed through bundle2)
90 # (used to check what steps have been already performed through bundle2)
91 self.stepsdone = set()
91 self.stepsdone = set()
92 # Integer version of the changegroup push result
92 # Integer version of the changegroup push result
93 # - None means nothing to push
93 # - None means nothing to push
94 # - 0 means HTTP error
94 # - 0 means HTTP error
95 # - 1 means we pushed and remote head count is unchanged *or*
95 # - 1 means we pushed and remote head count is unchanged *or*
96 # we have outgoing changesets but refused to push
96 # we have outgoing changesets but refused to push
97 # - other values as described by addchangegroup()
97 # - other values as described by addchangegroup()
98 self.cgresult = None
98 self.cgresult = None
99 # Boolean value for the bookmark push
99 # Boolean value for the bookmark push
100 self.bkresult = None
100 self.bkresult = None
101 # discover.outgoing object (contains common and outgoing data)
101 # discover.outgoing object (contains common and outgoing data)
102 self.outgoing = None
102 self.outgoing = None
103 # all remote heads before the push
103 # all remote heads before the push
104 self.remoteheads = None
104 self.remoteheads = None
105 # testable as a boolean indicating if any nodes are missing locally.
105 # testable as a boolean indicating if any nodes are missing locally.
106 self.incoming = None
106 self.incoming = None
107 # phases changes that must be pushed along side the changesets
107 # phases changes that must be pushed along side the changesets
108 self.outdatedphases = None
108 self.outdatedphases = None
109 # phases changes that must be pushed if changeset push fails
109 # phases changes that must be pushed if changeset push fails
110 self.fallbackoutdatedphases = None
110 self.fallbackoutdatedphases = None
111 # outgoing obsmarkers
111 # outgoing obsmarkers
112 self.outobsmarkers = set()
112 self.outobsmarkers = set()
113 # outgoing bookmarks
113 # outgoing bookmarks
114 self.outbookmarks = []
114 self.outbookmarks = []
115 # transaction manager
115 # transaction manager
116 self.trmanager = None
116 self.trmanager = None
117
117
118 @util.propertycache
118 @util.propertycache
119 def futureheads(self):
119 def futureheads(self):
120 """future remote heads if the changeset push succeeds"""
120 """future remote heads if the changeset push succeeds"""
121 return self.outgoing.missingheads
121 return self.outgoing.missingheads
122
122
123 @util.propertycache
123 @util.propertycache
124 def fallbackheads(self):
124 def fallbackheads(self):
125 """future remote heads if the changeset push fails"""
125 """future remote heads if the changeset push fails"""
126 if self.revs is None:
126 if self.revs is None:
127 # not target to push, all common are relevant
127 # not target to push, all common are relevant
128 return self.outgoing.commonheads
128 return self.outgoing.commonheads
129 unfi = self.repo.unfiltered()
129 unfi = self.repo.unfiltered()
130 # I want cheads = heads(::missingheads and ::commonheads)
130 # I want cheads = heads(::missingheads and ::commonheads)
131 # (missingheads is revs with secret changeset filtered out)
131 # (missingheads is revs with secret changeset filtered out)
132 #
132 #
133 # This can be expressed as:
133 # This can be expressed as:
134 # cheads = ( (missingheads and ::commonheads)
134 # cheads = ( (missingheads and ::commonheads)
135 # + (commonheads and ::missingheads))"
135 # + (commonheads and ::missingheads))"
136 # )
136 # )
137 #
137 #
138 # while trying to push we already computed the following:
138 # while trying to push we already computed the following:
139 # common = (::commonheads)
139 # common = (::commonheads)
140 # missing = ((commonheads::missingheads) - commonheads)
140 # missing = ((commonheads::missingheads) - commonheads)
141 #
141 #
142 # We can pick:
142 # We can pick:
143 # * missingheads part of common (::commonheads)
143 # * missingheads part of common (::commonheads)
144 common = set(self.outgoing.common)
144 common = set(self.outgoing.common)
145 nm = self.repo.changelog.nodemap
145 nm = self.repo.changelog.nodemap
146 cheads = [node for node in self.revs if nm[node] in common]
146 cheads = [node for node in self.revs if nm[node] in common]
147 # and
147 # and
148 # * commonheads parents on missing
148 # * commonheads parents on missing
149 revset = unfi.set('%ln and parents(roots(%ln))',
149 revset = unfi.set('%ln and parents(roots(%ln))',
150 self.outgoing.commonheads,
150 self.outgoing.commonheads,
151 self.outgoing.missing)
151 self.outgoing.missing)
152 cheads.extend(c.node() for c in revset)
152 cheads.extend(c.node() for c in revset)
153 return cheads
153 return cheads
154
154
155 @property
155 @property
156 def commonheads(self):
156 def commonheads(self):
157 """set of all common heads after changeset bundle push"""
157 """set of all common heads after changeset bundle push"""
158 if self.cgresult:
158 if self.cgresult:
159 return self.futureheads
159 return self.futureheads
160 else:
160 else:
161 return self.fallbackheads
161 return self.fallbackheads
162
162
163 # mapping of message used when pushing bookmark
163 # mapping of message used when pushing bookmark
164 bookmsgmap = {'update': (_("updating bookmark %s\n"),
164 bookmsgmap = {'update': (_("updating bookmark %s\n"),
165 _('updating bookmark %s failed!\n')),
165 _('updating bookmark %s failed!\n')),
166 'export': (_("exporting bookmark %s\n"),
166 'export': (_("exporting bookmark %s\n"),
167 _('exporting bookmark %s failed!\n')),
167 _('exporting bookmark %s failed!\n')),
168 'delete': (_("deleting remote bookmark %s\n"),
168 'delete': (_("deleting remote bookmark %s\n"),
169 _('deleting remote bookmark %s failed!\n')),
169 _('deleting remote bookmark %s failed!\n')),
170 }
170 }
171
171
172
172
173 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
173 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
174 '''Push outgoing changesets (limited by revs) from a local
174 '''Push outgoing changesets (limited by revs) from a local
175 repository to remote. Return an integer:
175 repository to remote. Return an integer:
176 - None means nothing to push
176 - None means nothing to push
177 - 0 means HTTP error
177 - 0 means HTTP error
178 - 1 means we pushed and remote head count is unchanged *or*
178 - 1 means we pushed and remote head count is unchanged *or*
179 we have outgoing changesets but refused to push
179 we have outgoing changesets but refused to push
180 - other values as described by addchangegroup()
180 - other values as described by addchangegroup()
181 '''
181 '''
182 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
182 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
183 if pushop.remote.local():
183 if pushop.remote.local():
184 missing = (set(pushop.repo.requirements)
184 missing = (set(pushop.repo.requirements)
185 - pushop.remote.local().supported)
185 - pushop.remote.local().supported)
186 if missing:
186 if missing:
187 msg = _("required features are not"
187 msg = _("required features are not"
188 " supported in the destination:"
188 " supported in the destination:"
189 " %s") % (', '.join(sorted(missing)))
189 " %s") % (', '.join(sorted(missing)))
190 raise util.Abort(msg)
190 raise util.Abort(msg)
191
191
192 # there are two ways to push to remote repo:
192 # there are two ways to push to remote repo:
193 #
193 #
194 # addchangegroup assumes local user can lock remote
194 # addchangegroup assumes local user can lock remote
195 # repo (local filesystem, old ssh servers).
195 # repo (local filesystem, old ssh servers).
196 #
196 #
197 # unbundle assumes local user cannot lock remote repo (new ssh
197 # unbundle assumes local user cannot lock remote repo (new ssh
198 # servers, http servers).
198 # servers, http servers).
199
199
200 if not pushop.remote.canpush():
200 if not pushop.remote.canpush():
201 raise util.Abort(_("destination does not support push"))
201 raise util.Abort(_("destination does not support push"))
202 # get local lock as we might write phase data
202 # get local lock as we might write phase data
203 locallock = None
203 locallock = None
204 try:
204 try:
205 locallock = pushop.repo.lock()
205 locallock = pushop.repo.lock()
206 pushop.locallocked = True
206 pushop.locallocked = True
207 except IOError, err:
207 except IOError, err:
208 pushop.locallocked = False
208 pushop.locallocked = False
209 if err.errno != errno.EACCES:
209 if err.errno != errno.EACCES:
210 raise
210 raise
211 # source repo cannot be locked.
211 # source repo cannot be locked.
212 # We do not abort the push, but just disable the local phase
212 # We do not abort the push, but just disable the local phase
213 # synchronisation.
213 # synchronisation.
214 msg = 'cannot lock source repository: %s\n' % err
214 msg = 'cannot lock source repository: %s\n' % err
215 pushop.ui.debug(msg)
215 pushop.ui.debug(msg)
216 try:
216 try:
217 if pushop.locallocked:
217 if pushop.locallocked:
218 pushop.trmanager = transactionmanager(repo,
218 pushop.trmanager = transactionmanager(repo,
219 'push-response',
219 'push-response',
220 pushop.remote.url())
220 pushop.remote.url())
221 pushop.repo.checkpush(pushop)
221 pushop.repo.checkpush(pushop)
222 lock = None
222 lock = None
223 unbundle = pushop.remote.capable('unbundle')
223 unbundle = pushop.remote.capable('unbundle')
224 if not unbundle:
224 if not unbundle:
225 lock = pushop.remote.lock()
225 lock = pushop.remote.lock()
226 try:
226 try:
227 _pushdiscovery(pushop)
227 _pushdiscovery(pushop)
228 if _canusebundle2(pushop):
228 if _canusebundle2(pushop):
229 _pushbundle2(pushop)
229 _pushbundle2(pushop)
230 _pushchangeset(pushop)
230 _pushchangeset(pushop)
231 _pushsyncphase(pushop)
231 _pushsyncphase(pushop)
232 _pushobsolete(pushop)
232 _pushobsolete(pushop)
233 _pushbookmark(pushop)
233 _pushbookmark(pushop)
234 finally:
234 finally:
235 if lock is not None:
235 if lock is not None:
236 lock.release()
236 lock.release()
237 if pushop.trmanager:
237 if pushop.trmanager:
238 pushop.trmanager.close()
238 pushop.trmanager.close()
239 finally:
239 finally:
240 if pushop.trmanager:
240 if pushop.trmanager:
241 pushop.trmanager.release()
241 pushop.trmanager.release()
242 if locallock is not None:
242 if locallock is not None:
243 locallock.release()
243 locallock.release()
244
244
245 return pushop
245 return pushop
246
246
247 # list of steps to perform discovery before push
247 # list of steps to perform discovery before push
248 pushdiscoveryorder = []
248 pushdiscoveryorder = []
249
249
250 # Mapping between step name and function
250 # Mapping between step name and function
251 #
251 #
252 # This exists to help extensions wrap steps if necessary
252 # This exists to help extensions wrap steps if necessary
253 pushdiscoverymapping = {}
253 pushdiscoverymapping = {}
254
254
255 def pushdiscovery(stepname):
255 def pushdiscovery(stepname):
256 """decorator for function performing discovery before push
256 """decorator for function performing discovery before push
257
257
258 The function is added to the step -> function mapping and appended to the
258 The function is added to the step -> function mapping and appended to the
259 list of steps. Beware that decorated function will be added in order (this
259 list of steps. Beware that decorated function will be added in order (this
260 may matter).
260 may matter).
261
261
262 You can only use this decorator for a new step, if you want to wrap a step
262 You can only use this decorator for a new step, if you want to wrap a step
263 from an extension, change the pushdiscovery dictionary directly."""
263 from an extension, change the pushdiscovery dictionary directly."""
264 def dec(func):
264 def dec(func):
265 assert stepname not in pushdiscoverymapping
265 assert stepname not in pushdiscoverymapping
266 pushdiscoverymapping[stepname] = func
266 pushdiscoverymapping[stepname] = func
267 pushdiscoveryorder.append(stepname)
267 pushdiscoveryorder.append(stepname)
268 return func
268 return func
269 return dec
269 return dec
270
270
271 def _pushdiscovery(pushop):
271 def _pushdiscovery(pushop):
272 """Run all discovery steps"""
272 """Run all discovery steps"""
273 for stepname in pushdiscoveryorder:
273 for stepname in pushdiscoveryorder:
274 step = pushdiscoverymapping[stepname]
274 step = pushdiscoverymapping[stepname]
275 step(pushop)
275 step(pushop)
276
276
277 @pushdiscovery('changeset')
277 @pushdiscovery('changeset')
278 def _pushdiscoverychangeset(pushop):
278 def _pushdiscoverychangeset(pushop):
279 """discover the changeset that need to be pushed"""
279 """discover the changeset that need to be pushed"""
280 fci = discovery.findcommonincoming
280 fci = discovery.findcommonincoming
281 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
281 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
282 common, inc, remoteheads = commoninc
282 common, inc, remoteheads = commoninc
283 fco = discovery.findcommonoutgoing
283 fco = discovery.findcommonoutgoing
284 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
284 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
285 commoninc=commoninc, force=pushop.force)
285 commoninc=commoninc, force=pushop.force)
286 pushop.outgoing = outgoing
286 pushop.outgoing = outgoing
287 pushop.remoteheads = remoteheads
287 pushop.remoteheads = remoteheads
288 pushop.incoming = inc
288 pushop.incoming = inc
289
289
290 @pushdiscovery('phase')
290 @pushdiscovery('phase')
291 def _pushdiscoveryphase(pushop):
291 def _pushdiscoveryphase(pushop):
292 """discover the phase that needs to be pushed
292 """discover the phase that needs to be pushed
293
293
294 (computed for both success and failure case for changesets push)"""
294 (computed for both success and failure case for changesets push)"""
295 outgoing = pushop.outgoing
295 outgoing = pushop.outgoing
296 unfi = pushop.repo.unfiltered()
296 unfi = pushop.repo.unfiltered()
297 remotephases = pushop.remote.listkeys('phases')
297 remotephases = pushop.remote.listkeys('phases')
298 publishing = remotephases.get('publishing', False)
298 publishing = remotephases.get('publishing', False)
299 ana = phases.analyzeremotephases(pushop.repo,
299 ana = phases.analyzeremotephases(pushop.repo,
300 pushop.fallbackheads,
300 pushop.fallbackheads,
301 remotephases)
301 remotephases)
302 pheads, droots = ana
302 pheads, droots = ana
303 extracond = ''
303 extracond = ''
304 if not publishing:
304 if not publishing:
305 extracond = ' and public()'
305 extracond = ' and public()'
306 revset = 'heads((%%ln::%%ln) %s)' % extracond
306 revset = 'heads((%%ln::%%ln) %s)' % extracond
307 # Get the list of all revs draft on remote by public here.
307 # Get the list of all revs draft on remote by public here.
308 # XXX Beware that revset break if droots is not strictly
308 # XXX Beware that revset break if droots is not strictly
309 # XXX root we may want to ensure it is but it is costly
309 # XXX root we may want to ensure it is but it is costly
310 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
310 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
311 if not outgoing.missing:
311 if not outgoing.missing:
312 future = fallback
312 future = fallback
313 else:
313 else:
314 # adds changeset we are going to push as draft
314 # adds changeset we are going to push as draft
315 #
315 #
316 # should not be necessary for publishing server, but because of an
316 # should not be necessary for publishing server, but because of an
317 # issue fixed in xxxxx we have to do it anyway.
317 # issue fixed in xxxxx we have to do it anyway.
318 fdroots = list(unfi.set('roots(%ln + %ln::)',
318 fdroots = list(unfi.set('roots(%ln + %ln::)',
319 outgoing.missing, droots))
319 outgoing.missing, droots))
320 fdroots = [f.node() for f in fdroots]
320 fdroots = [f.node() for f in fdroots]
321 future = list(unfi.set(revset, fdroots, pushop.futureheads))
321 future = list(unfi.set(revset, fdroots, pushop.futureheads))
322 pushop.outdatedphases = future
322 pushop.outdatedphases = future
323 pushop.fallbackoutdatedphases = fallback
323 pushop.fallbackoutdatedphases = fallback
324
324
325 @pushdiscovery('obsmarker')
325 @pushdiscovery('obsmarker')
326 def _pushdiscoveryobsmarkers(pushop):
326 def _pushdiscoveryobsmarkers(pushop):
327 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
327 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
328 and pushop.repo.obsstore
328 and pushop.repo.obsstore
329 and 'obsolete' in pushop.remote.listkeys('namespaces')):
329 and 'obsolete' in pushop.remote.listkeys('namespaces')):
330 repo = pushop.repo
330 repo = pushop.repo
331 # very naive computation, that can be quite expensive on big repo.
331 # very naive computation, that can be quite expensive on big repo.
332 # However: evolution is currently slow on them anyway.
332 # However: evolution is currently slow on them anyway.
333 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
333 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
334 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
334 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
335
335
336 @pushdiscovery('bookmarks')
336 @pushdiscovery('bookmarks')
337 def _pushdiscoverybookmarks(pushop):
337 def _pushdiscoverybookmarks(pushop):
338 ui = pushop.ui
338 ui = pushop.ui
339 repo = pushop.repo.unfiltered()
339 repo = pushop.repo.unfiltered()
340 remote = pushop.remote
340 remote = pushop.remote
341 ui.debug("checking for updated bookmarks\n")
341 ui.debug("checking for updated bookmarks\n")
342 ancestors = ()
342 ancestors = ()
343 if pushop.revs:
343 if pushop.revs:
344 revnums = map(repo.changelog.rev, pushop.revs)
344 revnums = map(repo.changelog.rev, pushop.revs)
345 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
345 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
346 remotebookmark = remote.listkeys('bookmarks')
346 remotebookmark = remote.listkeys('bookmarks')
347
347
348 explicit = set(pushop.bookmarks)
348 explicit = set(pushop.bookmarks)
349
349
350 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
350 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
351 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
351 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
352 for b, scid, dcid in advsrc:
352 for b, scid, dcid in advsrc:
353 if b in explicit:
353 if b in explicit:
354 explicit.remove(b)
354 explicit.remove(b)
355 if not ancestors or repo[scid].rev() in ancestors:
355 if not ancestors or repo[scid].rev() in ancestors:
356 pushop.outbookmarks.append((b, dcid, scid))
356 pushop.outbookmarks.append((b, dcid, scid))
357 # search added bookmark
357 # search added bookmark
358 for b, scid, dcid in addsrc:
358 for b, scid, dcid in addsrc:
359 if b in explicit:
359 if b in explicit:
360 explicit.remove(b)
360 explicit.remove(b)
361 pushop.outbookmarks.append((b, '', scid))
361 pushop.outbookmarks.append((b, '', scid))
362 # search for overwritten bookmark
362 # search for overwritten bookmark
363 for b, scid, dcid in advdst + diverge + differ:
363 for b, scid, dcid in advdst + diverge + differ:
364 if b in explicit:
364 if b in explicit:
365 explicit.remove(b)
365 explicit.remove(b)
366 pushop.outbookmarks.append((b, dcid, scid))
366 pushop.outbookmarks.append((b, dcid, scid))
367 # search for bookmark to delete
367 # search for bookmark to delete
368 for b, scid, dcid in adddst:
368 for b, scid, dcid in adddst:
369 if b in explicit:
369 if b in explicit:
370 explicit.remove(b)
370 explicit.remove(b)
371 # treat as "deleted locally"
371 # treat as "deleted locally"
372 pushop.outbookmarks.append((b, dcid, ''))
372 pushop.outbookmarks.append((b, dcid, ''))
373 # identical bookmarks shouldn't get reported
373 # identical bookmarks shouldn't get reported
374 for b, scid, dcid in same:
374 for b, scid, dcid in same:
375 if b in explicit:
375 if b in explicit:
376 explicit.remove(b)
376 explicit.remove(b)
377
377
378 if explicit:
378 if explicit:
379 explicit = sorted(explicit)
379 explicit = sorted(explicit)
380 # we should probably list all of them
380 # we should probably list all of them
381 ui.warn(_('bookmark %s does not exist on the local '
381 ui.warn(_('bookmark %s does not exist on the local '
382 'or remote repository!\n') % explicit[0])
382 'or remote repository!\n') % explicit[0])
383 pushop.bkresult = 2
383 pushop.bkresult = 2
384
384
385 pushop.outbookmarks.sort()
385 pushop.outbookmarks.sort()
386
386
387 def _pushcheckoutgoing(pushop):
387 def _pushcheckoutgoing(pushop):
388 outgoing = pushop.outgoing
388 outgoing = pushop.outgoing
389 unfi = pushop.repo.unfiltered()
389 unfi = pushop.repo.unfiltered()
390 if not outgoing.missing:
390 if not outgoing.missing:
391 # nothing to push
391 # nothing to push
392 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
392 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
393 return False
393 return False
394 # something to push
394 # something to push
395 if not pushop.force:
395 if not pushop.force:
396 # if repo.obsstore == False --> no obsolete
396 # if repo.obsstore == False --> no obsolete
397 # then, save the iteration
397 # then, save the iteration
398 if unfi.obsstore:
398 if unfi.obsstore:
399 # this message are here for 80 char limit reason
399 # this message are here for 80 char limit reason
400 mso = _("push includes obsolete changeset: %s!")
400 mso = _("push includes obsolete changeset: %s!")
401 mst = {"unstable": _("push includes unstable changeset: %s!"),
401 mst = {"unstable": _("push includes unstable changeset: %s!"),
402 "bumped": _("push includes bumped changeset: %s!"),
402 "bumped": _("push includes bumped changeset: %s!"),
403 "divergent": _("push includes divergent changeset: %s!")}
403 "divergent": _("push includes divergent changeset: %s!")}
404 # If we are to push if there is at least one
404 # If we are to push if there is at least one
405 # obsolete or unstable changeset in missing, at
405 # obsolete or unstable changeset in missing, at
406 # least one of the missinghead will be obsolete or
406 # least one of the missinghead will be obsolete or
407 # unstable. So checking heads only is ok
407 # unstable. So checking heads only is ok
408 for node in outgoing.missingheads:
408 for node in outgoing.missingheads:
409 ctx = unfi[node]
409 ctx = unfi[node]
410 if ctx.obsolete():
410 if ctx.obsolete():
411 raise util.Abort(mso % ctx)
411 raise util.Abort(mso % ctx)
412 elif ctx.troubled():
412 elif ctx.troubled():
413 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
413 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
414 newbm = pushop.ui.configlist('bookmarks', 'pushing')
414 newbm = pushop.ui.configlist('bookmarks', 'pushing')
415 discovery.checkheads(unfi, pushop.remote, outgoing,
415 discovery.checkheads(unfi, pushop.remote, outgoing,
416 pushop.remoteheads,
416 pushop.remoteheads,
417 pushop.newbranch,
417 pushop.newbranch,
418 bool(pushop.incoming),
418 bool(pushop.incoming),
419 newbm)
419 newbm)
420 return True
420 return True
421
421
422 # List of names of steps to perform for an outgoing bundle2, order matters.
422 # List of names of steps to perform for an outgoing bundle2, order matters.
423 b2partsgenorder = []
423 b2partsgenorder = []
424
424
425 # Mapping between step name and function
425 # Mapping between step name and function
426 #
426 #
427 # This exists to help extensions wrap steps if necessary
427 # This exists to help extensions wrap steps if necessary
428 b2partsgenmapping = {}
428 b2partsgenmapping = {}
429
429
430 def b2partsgenerator(stepname):
430 def b2partsgenerator(stepname):
431 """decorator for function generating bundle2 part
431 """decorator for function generating bundle2 part
432
432
433 The function is added to the step -> function mapping and appended to the
433 The function is added to the step -> function mapping and appended to the
434 list of steps. Beware that decorated functions will be added in order
434 list of steps. Beware that decorated functions will be added in order
435 (this may matter).
435 (this may matter).
436
436
437 You can only use this decorator for new steps, if you want to wrap a step
437 You can only use this decorator for new steps, if you want to wrap a step
438 from an extension, attack the b2partsgenmapping dictionary directly."""
438 from an extension, attack the b2partsgenmapping dictionary directly."""
439 def dec(func):
439 def dec(func):
440 assert stepname not in b2partsgenmapping
440 assert stepname not in b2partsgenmapping
441 b2partsgenmapping[stepname] = func
441 b2partsgenmapping[stepname] = func
442 b2partsgenorder.append(stepname)
442 b2partsgenorder.append(stepname)
443 return func
443 return func
444 return dec
444 return dec
445
445
446 @b2partsgenerator('changeset')
446 @b2partsgenerator('changeset')
447 def _pushb2ctx(pushop, bundler):
447 def _pushb2ctx(pushop, bundler):
448 """handle changegroup push through bundle2
448 """handle changegroup push through bundle2
449
449
450 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
450 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
451 """
451 """
452 if 'changesets' in pushop.stepsdone:
452 if 'changesets' in pushop.stepsdone:
453 return
453 return
454 pushop.stepsdone.add('changesets')
454 pushop.stepsdone.add('changesets')
455 # Send known heads to the server for race detection.
455 # Send known heads to the server for race detection.
456 if not _pushcheckoutgoing(pushop):
456 if not _pushcheckoutgoing(pushop):
457 return
457 return
458 pushop.repo.prepushoutgoinghooks(pushop.repo,
458 pushop.repo.prepushoutgoinghooks(pushop.repo,
459 pushop.remote,
459 pushop.remote,
460 pushop.outgoing)
460 pushop.outgoing)
461 if not pushop.force:
461 if not pushop.force:
462 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
462 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
463 b2caps = bundle2.bundle2caps(pushop.remote)
463 b2caps = bundle2.bundle2caps(pushop.remote)
464 version = None
464 version = None
465 cgversions = b2caps.get('changegroup')
465 cgversions = b2caps.get('changegroup')
466 if not cgversions: # 3.1 and 3.2 ship with an empty value
466 if not cgversions: # 3.1 and 3.2 ship with an empty value
467 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
467 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
468 pushop.outgoing)
468 pushop.outgoing)
469 else:
469 else:
470 cgversions = [v for v in cgversions if v in changegroup.packermap]
470 cgversions = [v for v in cgversions if v in changegroup.packermap]
471 if not cgversions:
471 if not cgversions:
472 raise ValueError(_('no common changegroup version'))
472 raise ValueError(_('no common changegroup version'))
473 version = max(cgversions)
473 version = max(cgversions)
474 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
474 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
475 pushop.outgoing,
475 pushop.outgoing,
476 version=version)
476 version=version)
477 cgpart = bundler.newpart('changegroup', data=cg)
477 cgpart = bundler.newpart('changegroup', data=cg)
478 if version is not None:
478 if version is not None:
479 cgpart.addparam('version', version)
479 cgpart.addparam('version', version)
480 def handlereply(op):
480 def handlereply(op):
481 """extract addchangegroup returns from server reply"""
481 """extract addchangegroup returns from server reply"""
482 cgreplies = op.records.getreplies(cgpart.id)
482 cgreplies = op.records.getreplies(cgpart.id)
483 assert len(cgreplies['changegroup']) == 1
483 assert len(cgreplies['changegroup']) == 1
484 pushop.cgresult = cgreplies['changegroup'][0]['return']
484 pushop.cgresult = cgreplies['changegroup'][0]['return']
485 return handlereply
485 return handlereply
486
486
487 @b2partsgenerator('phase')
487 @b2partsgenerator('phase')
488 def _pushb2phases(pushop, bundler):
488 def _pushb2phases(pushop, bundler):
489 """handle phase push through bundle2"""
489 """handle phase push through bundle2"""
490 if 'phases' in pushop.stepsdone:
490 if 'phases' in pushop.stepsdone:
491 return
491 return
492 b2caps = bundle2.bundle2caps(pushop.remote)
492 b2caps = bundle2.bundle2caps(pushop.remote)
493 if not 'pushkey' in b2caps:
493 if not 'pushkey' in b2caps:
494 return
494 return
495 pushop.stepsdone.add('phases')
495 pushop.stepsdone.add('phases')
496 part2node = []
496 part2node = []
497 enc = pushkey.encode
497 enc = pushkey.encode
498 for newremotehead in pushop.outdatedphases:
498 for newremotehead in pushop.outdatedphases:
499 part = bundler.newpart('pushkey')
499 part = bundler.newpart('pushkey')
500 part.addparam('namespace', enc('phases'))
500 part.addparam('namespace', enc('phases'))
501 part.addparam('key', enc(newremotehead.hex()))
501 part.addparam('key', enc(newremotehead.hex()))
502 part.addparam('old', enc(str(phases.draft)))
502 part.addparam('old', enc(str(phases.draft)))
503 part.addparam('new', enc(str(phases.public)))
503 part.addparam('new', enc(str(phases.public)))
504 part2node.append((part.id, newremotehead))
504 part2node.append((part.id, newremotehead))
505 def handlereply(op):
505 def handlereply(op):
506 for partid, node in part2node:
506 for partid, node in part2node:
507 partrep = op.records.getreplies(partid)
507 partrep = op.records.getreplies(partid)
508 results = partrep['pushkey']
508 results = partrep['pushkey']
509 assert len(results) <= 1
509 assert len(results) <= 1
510 msg = None
510 msg = None
511 if not results:
511 if not results:
512 msg = _('server ignored update of %s to public!\n') % node
512 msg = _('server ignored update of %s to public!\n') % node
513 elif not int(results[0]['return']):
513 elif not int(results[0]['return']):
514 msg = _('updating %s to public failed!\n') % node
514 msg = _('updating %s to public failed!\n') % node
515 if msg is not None:
515 if msg is not None:
516 pushop.ui.warn(msg)
516 pushop.ui.warn(msg)
517 return handlereply
517 return handlereply
518
518
519 @b2partsgenerator('obsmarkers')
519 @b2partsgenerator('obsmarkers')
520 def _pushb2obsmarkers(pushop, bundler):
520 def _pushb2obsmarkers(pushop, bundler):
521 if 'obsmarkers' in pushop.stepsdone:
521 if 'obsmarkers' in pushop.stepsdone:
522 return
522 return
523 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
523 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
524 if obsolete.commonversion(remoteversions) is None:
524 if obsolete.commonversion(remoteversions) is None:
525 return
525 return
526 pushop.stepsdone.add('obsmarkers')
526 pushop.stepsdone.add('obsmarkers')
527 if pushop.outobsmarkers:
527 if pushop.outobsmarkers:
528 buildobsmarkerspart(bundler, pushop.outobsmarkers)
528 buildobsmarkerspart(bundler, pushop.outobsmarkers)
529
529
530 @b2partsgenerator('bookmarks')
530 @b2partsgenerator('bookmarks')
531 def _pushb2bookmarks(pushop, bundler):
531 def _pushb2bookmarks(pushop, bundler):
532 """handle phase push through bundle2"""
532 """handle phase push through bundle2"""
533 if 'bookmarks' in pushop.stepsdone:
533 if 'bookmarks' in pushop.stepsdone:
534 return
534 return
535 b2caps = bundle2.bundle2caps(pushop.remote)
535 b2caps = bundle2.bundle2caps(pushop.remote)
536 if 'pushkey' not in b2caps:
536 if 'pushkey' not in b2caps:
537 return
537 return
538 pushop.stepsdone.add('bookmarks')
538 pushop.stepsdone.add('bookmarks')
539 part2book = []
539 part2book = []
540 enc = pushkey.encode
540 enc = pushkey.encode
541 for book, old, new in pushop.outbookmarks:
541 for book, old, new in pushop.outbookmarks:
542 part = bundler.newpart('pushkey')
542 part = bundler.newpart('pushkey')
543 part.addparam('namespace', enc('bookmarks'))
543 part.addparam('namespace', enc('bookmarks'))
544 part.addparam('key', enc(book))
544 part.addparam('key', enc(book))
545 part.addparam('old', enc(old))
545 part.addparam('old', enc(old))
546 part.addparam('new', enc(new))
546 part.addparam('new', enc(new))
547 action = 'update'
547 action = 'update'
548 if not old:
548 if not old:
549 action = 'export'
549 action = 'export'
550 elif not new:
550 elif not new:
551 action = 'delete'
551 action = 'delete'
552 part2book.append((part.id, book, action))
552 part2book.append((part.id, book, action))
553
553
554
554
555 def handlereply(op):
555 def handlereply(op):
556 ui = pushop.ui
556 ui = pushop.ui
557 for partid, book, action in part2book:
557 for partid, book, action in part2book:
558 partrep = op.records.getreplies(partid)
558 partrep = op.records.getreplies(partid)
559 results = partrep['pushkey']
559 results = partrep['pushkey']
560 assert len(results) <= 1
560 assert len(results) <= 1
561 if not results:
561 if not results:
562 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
562 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
563 else:
563 else:
564 ret = int(results[0]['return'])
564 ret = int(results[0]['return'])
565 if ret:
565 if ret:
566 ui.status(bookmsgmap[action][0] % book)
566 ui.status(bookmsgmap[action][0] % book)
567 else:
567 else:
568 ui.warn(bookmsgmap[action][1] % book)
568 ui.warn(bookmsgmap[action][1] % book)
569 if pushop.bkresult is not None:
569 if pushop.bkresult is not None:
570 pushop.bkresult = 1
570 pushop.bkresult = 1
571 return handlereply
571 return handlereply
572
572
573
573
574 def _pushbundle2(pushop):
574 def _pushbundle2(pushop):
575 """push data to the remote using bundle2
575 """push data to the remote using bundle2
576
576
577 The only currently supported type of data is changegroup but this will
577 The only currently supported type of data is changegroup but this will
578 evolve in the future."""
578 evolve in the future."""
579 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
579 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
580 pushback = (pushop.trmanager
580 pushback = (pushop.trmanager
581 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
581 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
582
582
583 # create reply capability
583 # create reply capability
584 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
584 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
585 allowpushback=pushback))
585 allowpushback=pushback))
586 bundler.newpart('replycaps', data=capsblob)
586 bundler.newpart('replycaps', data=capsblob)
587 replyhandlers = []
587 replyhandlers = []
588 for partgenname in b2partsgenorder:
588 for partgenname in b2partsgenorder:
589 partgen = b2partsgenmapping[partgenname]
589 partgen = b2partsgenmapping[partgenname]
590 ret = partgen(pushop, bundler)
590 ret = partgen(pushop, bundler)
591 if callable(ret):
591 if callable(ret):
592 replyhandlers.append(ret)
592 replyhandlers.append(ret)
593 # do not push if nothing to push
593 # do not push if nothing to push
594 if bundler.nbparts <= 1:
594 if bundler.nbparts <= 1:
595 return
595 return
596 stream = util.chunkbuffer(bundler.getchunks())
596 stream = util.chunkbuffer(bundler.getchunks())
597 try:
597 try:
598 reply = pushop.remote.unbundle(stream, ['force'], 'push')
598 reply = pushop.remote.unbundle(stream, ['force'], 'push')
599 except error.BundleValueError, exc:
599 except error.BundleValueError, exc:
600 raise util.Abort('missing support for %s' % exc)
600 raise util.Abort('missing support for %s' % exc)
601 try:
601 try:
602 trgetter = None
602 trgetter = None
603 if pushback:
603 if pushback:
604 trgetter = pushop.trmanager.transaction
604 trgetter = pushop.trmanager.transaction
605 op = bundle2.processbundle(pushop.repo, reply, trgetter)
605 op = bundle2.processbundle(pushop.repo, reply, trgetter)
606 except error.BundleValueError, exc:
606 except error.BundleValueError, exc:
607 raise util.Abort('missing support for %s' % exc)
607 raise util.Abort('missing support for %s' % exc)
608 for rephand in replyhandlers:
608 for rephand in replyhandlers:
609 rephand(op)
609 rephand(op)
610
610
611 def _pushchangeset(pushop):
611 def _pushchangeset(pushop):
612 """Make the actual push of changeset bundle to remote repo"""
612 """Make the actual push of changeset bundle to remote repo"""
613 if 'changesets' in pushop.stepsdone:
613 if 'changesets' in pushop.stepsdone:
614 return
614 return
615 pushop.stepsdone.add('changesets')
615 pushop.stepsdone.add('changesets')
616 if not _pushcheckoutgoing(pushop):
616 if not _pushcheckoutgoing(pushop):
617 return
617 return
618 pushop.repo.prepushoutgoinghooks(pushop.repo,
618 pushop.repo.prepushoutgoinghooks(pushop.repo,
619 pushop.remote,
619 pushop.remote,
620 pushop.outgoing)
620 pushop.outgoing)
621 outgoing = pushop.outgoing
621 outgoing = pushop.outgoing
622 unbundle = pushop.remote.capable('unbundle')
622 unbundle = pushop.remote.capable('unbundle')
623 # TODO: get bundlecaps from remote
623 # TODO: get bundlecaps from remote
624 bundlecaps = None
624 bundlecaps = None
625 # create a changegroup from local
625 # create a changegroup from local
626 if pushop.revs is None and not (outgoing.excluded
626 if pushop.revs is None and not (outgoing.excluded
627 or pushop.repo.changelog.filteredrevs):
627 or pushop.repo.changelog.filteredrevs):
628 # push everything,
628 # push everything,
629 # use the fast path, no race possible on push
629 # use the fast path, no race possible on push
630 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
630 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
631 cg = changegroup.getsubset(pushop.repo,
631 cg = changegroup.getsubset(pushop.repo,
632 outgoing,
632 outgoing,
633 bundler,
633 bundler,
634 'push',
634 'push',
635 fastpath=True)
635 fastpath=True)
636 else:
636 else:
637 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
637 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
638 bundlecaps)
638 bundlecaps)
639
639
640 # apply changegroup to remote
640 # apply changegroup to remote
641 if unbundle:
641 if unbundle:
642 # local repo finds heads on server, finds out what
642 # local repo finds heads on server, finds out what
643 # revs it must push. once revs transferred, if server
643 # revs it must push. once revs transferred, if server
644 # finds it has different heads (someone else won
644 # finds it has different heads (someone else won
645 # commit/push race), server aborts.
645 # commit/push race), server aborts.
646 if pushop.force:
646 if pushop.force:
647 remoteheads = ['force']
647 remoteheads = ['force']
648 else:
648 else:
649 remoteheads = pushop.remoteheads
649 remoteheads = pushop.remoteheads
650 # ssh: return remote's addchangegroup()
650 # ssh: return remote's addchangegroup()
651 # http: return remote's addchangegroup() or 0 for error
651 # http: return remote's addchangegroup() or 0 for error
652 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
652 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
653 pushop.repo.url())
653 pushop.repo.url())
654 else:
654 else:
655 # we return an integer indicating remote head count
655 # we return an integer indicating remote head count
656 # change
656 # change
657 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
657 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
658 pushop.repo.url())
658 pushop.repo.url())
659
659
660 def _pushsyncphase(pushop):
660 def _pushsyncphase(pushop):
661 """synchronise phase information locally and remotely"""
661 """synchronise phase information locally and remotely"""
662 cheads = pushop.commonheads
662 cheads = pushop.commonheads
663 # even when we don't push, exchanging phase data is useful
663 # even when we don't push, exchanging phase data is useful
664 remotephases = pushop.remote.listkeys('phases')
664 remotephases = pushop.remote.listkeys('phases')
665 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
665 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
666 and remotephases # server supports phases
666 and remotephases # server supports phases
667 and pushop.cgresult is None # nothing was pushed
667 and pushop.cgresult is None # nothing was pushed
668 and remotephases.get('publishing', False)):
668 and remotephases.get('publishing', False)):
669 # When:
669 # When:
670 # - this is a subrepo push
670 # - this is a subrepo push
671 # - and remote support phase
671 # - and remote support phase
672 # - and no changeset was pushed
672 # - and no changeset was pushed
673 # - and remote is publishing
673 # - and remote is publishing
674 # We may be in issue 3871 case!
674 # We may be in issue 3871 case!
675 # We drop the possible phase synchronisation done by
675 # We drop the possible phase synchronisation done by
676 # courtesy to publish changesets possibly locally draft
676 # courtesy to publish changesets possibly locally draft
677 # on the remote.
677 # on the remote.
678 remotephases = {'publishing': 'True'}
678 remotephases = {'publishing': 'True'}
679 if not remotephases: # old server or public only reply from non-publishing
679 if not remotephases: # old server or public only reply from non-publishing
680 _localphasemove(pushop, cheads)
680 _localphasemove(pushop, cheads)
681 # don't push any phase data as there is nothing to push
681 # don't push any phase data as there is nothing to push
682 else:
682 else:
683 ana = phases.analyzeremotephases(pushop.repo, cheads,
683 ana = phases.analyzeremotephases(pushop.repo, cheads,
684 remotephases)
684 remotephases)
685 pheads, droots = ana
685 pheads, droots = ana
686 ### Apply remote phase on local
686 ### Apply remote phase on local
687 if remotephases.get('publishing', False):
687 if remotephases.get('publishing', False):
688 _localphasemove(pushop, cheads)
688 _localphasemove(pushop, cheads)
689 else: # publish = False
689 else: # publish = False
690 _localphasemove(pushop, pheads)
690 _localphasemove(pushop, pheads)
691 _localphasemove(pushop, cheads, phases.draft)
691 _localphasemove(pushop, cheads, phases.draft)
692 ### Apply local phase on remote
692 ### Apply local phase on remote
693
693
694 if pushop.cgresult:
694 if pushop.cgresult:
695 if 'phases' in pushop.stepsdone:
695 if 'phases' in pushop.stepsdone:
696 # phases already pushed though bundle2
696 # phases already pushed though bundle2
697 return
697 return
698 outdated = pushop.outdatedphases
698 outdated = pushop.outdatedphases
699 else:
699 else:
700 outdated = pushop.fallbackoutdatedphases
700 outdated = pushop.fallbackoutdatedphases
701
701
702 pushop.stepsdone.add('phases')
702 pushop.stepsdone.add('phases')
703
703
704 # filter heads already turned public by the push
704 # filter heads already turned public by the push
705 outdated = [c for c in outdated if c.node() not in pheads]
705 outdated = [c for c in outdated if c.node() not in pheads]
706 # fallback to independent pushkey command
706 # fallback to independent pushkey command
707 for newremotehead in outdated:
707 for newremotehead in outdated:
708 r = pushop.remote.pushkey('phases',
708 r = pushop.remote.pushkey('phases',
709 newremotehead.hex(),
709 newremotehead.hex(),
710 str(phases.draft),
710 str(phases.draft),
711 str(phases.public))
711 str(phases.public))
712 if not r:
712 if not r:
713 pushop.ui.warn(_('updating %s to public failed!\n')
713 pushop.ui.warn(_('updating %s to public failed!\n')
714 % newremotehead)
714 % newremotehead)
715
715
716 def _localphasemove(pushop, nodes, phase=phases.public):
716 def _localphasemove(pushop, nodes, phase=phases.public):
717 """move <nodes> to <phase> in the local source repo"""
717 """move <nodes> to <phase> in the local source repo"""
718 if pushop.trmanager:
718 if pushop.trmanager:
719 phases.advanceboundary(pushop.repo,
719 phases.advanceboundary(pushop.repo,
720 pushop.trmanager.transaction(),
720 pushop.trmanager.transaction(),
721 phase,
721 phase,
722 nodes)
722 nodes)
723 else:
723 else:
724 # repo is not locked, do not change any phases!
724 # repo is not locked, do not change any phases!
725 # Informs the user that phases should have been moved when
725 # Informs the user that phases should have been moved when
726 # applicable.
726 # applicable.
727 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
727 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
728 phasestr = phases.phasenames[phase]
728 phasestr = phases.phasenames[phase]
729 if actualmoves:
729 if actualmoves:
730 pushop.ui.status(_('cannot lock source repo, skipping '
730 pushop.ui.status(_('cannot lock source repo, skipping '
731 'local %s phase update\n') % phasestr)
731 'local %s phase update\n') % phasestr)
732
732
733 def _pushobsolete(pushop):
733 def _pushobsolete(pushop):
734 """utility function to push obsolete markers to a remote"""
734 """utility function to push obsolete markers to a remote"""
735 if 'obsmarkers' in pushop.stepsdone:
735 if 'obsmarkers' in pushop.stepsdone:
736 return
736 return
737 pushop.ui.debug('try to push obsolete markers to remote\n')
737 pushop.ui.debug('try to push obsolete markers to remote\n')
738 repo = pushop.repo
738 repo = pushop.repo
739 remote = pushop.remote
739 remote = pushop.remote
740 pushop.stepsdone.add('obsmarkers')
740 pushop.stepsdone.add('obsmarkers')
741 if pushop.outobsmarkers:
741 if pushop.outobsmarkers:
742 rslts = []
742 rslts = []
743 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
743 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
744 for key in sorted(remotedata, reverse=True):
744 for key in sorted(remotedata, reverse=True):
745 # reverse sort to ensure we end with dump0
745 # reverse sort to ensure we end with dump0
746 data = remotedata[key]
746 data = remotedata[key]
747 rslts.append(remote.pushkey('obsolete', key, '', data))
747 rslts.append(remote.pushkey('obsolete', key, '', data))
748 if [r for r in rslts if not r]:
748 if [r for r in rslts if not r]:
749 msg = _('failed to push some obsolete markers!\n')
749 msg = _('failed to push some obsolete markers!\n')
750 repo.ui.warn(msg)
750 repo.ui.warn(msg)
751
751
752 def _pushbookmark(pushop):
752 def _pushbookmark(pushop):
753 """Update bookmark position on remote"""
753 """Update bookmark position on remote"""
754 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
754 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
755 return
755 return
756 pushop.stepsdone.add('bookmarks')
756 pushop.stepsdone.add('bookmarks')
757 ui = pushop.ui
757 ui = pushop.ui
758 remote = pushop.remote
758 remote = pushop.remote
759
759
760 for b, old, new in pushop.outbookmarks:
760 for b, old, new in pushop.outbookmarks:
761 action = 'update'
761 action = 'update'
762 if not old:
762 if not old:
763 action = 'export'
763 action = 'export'
764 elif not new:
764 elif not new:
765 action = 'delete'
765 action = 'delete'
766 if remote.pushkey('bookmarks', b, old, new):
766 if remote.pushkey('bookmarks', b, old, new):
767 ui.status(bookmsgmap[action][0] % b)
767 ui.status(bookmsgmap[action][0] % b)
768 else:
768 else:
769 ui.warn(bookmsgmap[action][1] % b)
769 ui.warn(bookmsgmap[action][1] % b)
770 # discovery can have set the value form invalid entry
770 # discovery can have set the value form invalid entry
771 if pushop.bkresult is not None:
771 if pushop.bkresult is not None:
772 pushop.bkresult = 1
772 pushop.bkresult = 1
773
773
774 class pulloperation(object):
774 class pulloperation(object):
775 """A object that represent a single pull operation
775 """A object that represent a single pull operation
776
776
777 It purpose is to carry pull related state and very common operation.
777 It purpose is to carry pull related state and very common operation.
778
778
779 A new should be created at the beginning of each pull and discarded
779 A new should be created at the beginning of each pull and discarded
780 afterward.
780 afterward.
781 """
781 """
782
782
783 def __init__(self, repo, remote, heads=None, force=False, bookmarks=()):
783 def __init__(self, repo, remote, heads=None, force=False, bookmarks=()):
784 # repo we pull into
784 # repo we pull into
785 self.repo = repo
785 self.repo = repo
786 # repo we pull from
786 # repo we pull from
787 self.remote = remote
787 self.remote = remote
788 # revision we try to pull (None is "all")
788 # revision we try to pull (None is "all")
789 self.heads = heads
789 self.heads = heads
790 # bookmark pulled explicitly
790 # bookmark pulled explicitly
791 self.explicitbookmarks = bookmarks
791 self.explicitbookmarks = bookmarks
792 # do we force pull?
792 # do we force pull?
793 self.force = force
793 self.force = force
794 # transaction manager
794 # transaction manager
795 self.trmanager = None
795 self.trmanager = None
796 # set of common changeset between local and remote before pull
796 # set of common changeset between local and remote before pull
797 self.common = None
797 self.common = None
798 # set of pulled head
798 # set of pulled head
799 self.rheads = None
799 self.rheads = None
800 # list of missing changeset to fetch remotely
800 # list of missing changeset to fetch remotely
801 self.fetch = None
801 self.fetch = None
802 # remote bookmarks data
802 # remote bookmarks data
803 self.remotebookmarks = None
803 self.remotebookmarks = None
804 # result of changegroup pulling (used as return code by pull)
804 # result of changegroup pulling (used as return code by pull)
805 self.cgresult = None
805 self.cgresult = None
806 # list of step already done
806 # list of step already done
807 self.stepsdone = set()
807 self.stepsdone = set()
808
808
809 @util.propertycache
809 @util.propertycache
810 def pulledsubset(self):
810 def pulledsubset(self):
811 """heads of the set of changeset target by the pull"""
811 """heads of the set of changeset target by the pull"""
812 # compute target subset
812 # compute target subset
813 if self.heads is None:
813 if self.heads is None:
814 # We pulled every thing possible
814 # We pulled every thing possible
815 # sync on everything common
815 # sync on everything common
816 c = set(self.common)
816 c = set(self.common)
817 ret = list(self.common)
817 ret = list(self.common)
818 for n in self.rheads:
818 for n in self.rheads:
819 if n not in c:
819 if n not in c:
820 ret.append(n)
820 ret.append(n)
821 return ret
821 return ret
822 else:
822 else:
823 # We pulled a specific subset
823 # We pulled a specific subset
824 # sync on this subset
824 # sync on this subset
825 return self.heads
825 return self.heads
826
826
827 def gettransaction(self):
827 def gettransaction(self):
828 # deprecated; talk to trmanager directly
828 # deprecated; talk to trmanager directly
829 return self.trmanager.transaction()
829 return self.trmanager.transaction()
830
830
831 class transactionmanager(object):
831 class transactionmanager(object):
832 """An object to manage the life cycle of a transaction
832 """An object to manage the life cycle of a transaction
833
833
834 It creates the transaction on demand and calls the appropriate hooks when
834 It creates the transaction on demand and calls the appropriate hooks when
835 closing the transaction."""
835 closing the transaction."""
836 def __init__(self, repo, source, url):
836 def __init__(self, repo, source, url):
837 self.repo = repo
837 self.repo = repo
838 self.source = source
838 self.source = source
839 self.url = url
839 self.url = url
840 self._tr = None
840 self._tr = None
841
841
842 def transaction(self):
842 def transaction(self):
843 """Return an open transaction object, constructing if necessary"""
843 """Return an open transaction object, constructing if necessary"""
844 if not self._tr:
844 if not self._tr:
845 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
845 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
846 self._tr = self.repo.transaction(trname)
846 self._tr = self.repo.transaction(trname)
847 self._tr.hookargs['source'] = self.source
847 self._tr.hookargs['source'] = self.source
848 self._tr.hookargs['url'] = self.url
848 self._tr.hookargs['url'] = self.url
849 return self._tr
849 return self._tr
850
850
851 def close(self):
851 def close(self):
852 """close transaction if created"""
852 """close transaction if created"""
853 if self._tr is not None:
853 if self._tr is not None:
854 repo = self.repo
855 p = lambda: self._tr.writepending() and repo.root or ""
856 repo.hook('b2x-pretransactionclose', throw=True, pending=p,
857 **self._tr.hookargs)
858 hookargs = dict(self._tr.hookargs)
859 def runhooks():
860 repo.hook('b2x-transactionclose', **hookargs)
861 self._tr.addpostclose('b2x-hook-transactionclose',
862 lambda tr: repo._afterlock(runhooks))
863 self._tr.close()
854 self._tr.close()
864
855
865 def release(self):
856 def release(self):
866 """release transaction if created"""
857 """release transaction if created"""
867 if self._tr is not None:
858 if self._tr is not None:
868 self._tr.release()
859 self._tr.release()
869
860
870 def pull(repo, remote, heads=None, force=False, bookmarks=()):
861 def pull(repo, remote, heads=None, force=False, bookmarks=()):
871 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks)
862 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks)
872 if pullop.remote.local():
863 if pullop.remote.local():
873 missing = set(pullop.remote.requirements) - pullop.repo.supported
864 missing = set(pullop.remote.requirements) - pullop.repo.supported
874 if missing:
865 if missing:
875 msg = _("required features are not"
866 msg = _("required features are not"
876 " supported in the destination:"
867 " supported in the destination:"
877 " %s") % (', '.join(sorted(missing)))
868 " %s") % (', '.join(sorted(missing)))
878 raise util.Abort(msg)
869 raise util.Abort(msg)
879
870
880 pullop.remotebookmarks = remote.listkeys('bookmarks')
871 pullop.remotebookmarks = remote.listkeys('bookmarks')
881 lock = pullop.repo.lock()
872 lock = pullop.repo.lock()
882 try:
873 try:
883 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
874 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
884 _pulldiscovery(pullop)
875 _pulldiscovery(pullop)
885 if _canusebundle2(pullop):
876 if _canusebundle2(pullop):
886 _pullbundle2(pullop)
877 _pullbundle2(pullop)
887 _pullchangeset(pullop)
878 _pullchangeset(pullop)
888 _pullphase(pullop)
879 _pullphase(pullop)
889 _pullbookmarks(pullop)
880 _pullbookmarks(pullop)
890 _pullobsolete(pullop)
881 _pullobsolete(pullop)
891 pullop.trmanager.close()
882 pullop.trmanager.close()
892 finally:
883 finally:
893 pullop.trmanager.release()
884 pullop.trmanager.release()
894 lock.release()
885 lock.release()
895
886
896 return pullop
887 return pullop
897
888
898 # list of steps to perform discovery before pull
889 # list of steps to perform discovery before pull
899 pulldiscoveryorder = []
890 pulldiscoveryorder = []
900
891
901 # Mapping between step name and function
892 # Mapping between step name and function
902 #
893 #
903 # This exists to help extensions wrap steps if necessary
894 # This exists to help extensions wrap steps if necessary
904 pulldiscoverymapping = {}
895 pulldiscoverymapping = {}
905
896
906 def pulldiscovery(stepname):
897 def pulldiscovery(stepname):
907 """decorator for function performing discovery before pull
898 """decorator for function performing discovery before pull
908
899
909 The function is added to the step -> function mapping and appended to the
900 The function is added to the step -> function mapping and appended to the
910 list of steps. Beware that decorated function will be added in order (this
901 list of steps. Beware that decorated function will be added in order (this
911 may matter).
902 may matter).
912
903
913 You can only use this decorator for a new step, if you want to wrap a step
904 You can only use this decorator for a new step, if you want to wrap a step
914 from an extension, change the pulldiscovery dictionary directly."""
905 from an extension, change the pulldiscovery dictionary directly."""
915 def dec(func):
906 def dec(func):
916 assert stepname not in pulldiscoverymapping
907 assert stepname not in pulldiscoverymapping
917 pulldiscoverymapping[stepname] = func
908 pulldiscoverymapping[stepname] = func
918 pulldiscoveryorder.append(stepname)
909 pulldiscoveryorder.append(stepname)
919 return func
910 return func
920 return dec
911 return dec
921
912
922 def _pulldiscovery(pullop):
913 def _pulldiscovery(pullop):
923 """Run all discovery steps"""
914 """Run all discovery steps"""
924 for stepname in pulldiscoveryorder:
915 for stepname in pulldiscoveryorder:
925 step = pulldiscoverymapping[stepname]
916 step = pulldiscoverymapping[stepname]
926 step(pullop)
917 step(pullop)
927
918
928 @pulldiscovery('changegroup')
919 @pulldiscovery('changegroup')
929 def _pulldiscoverychangegroup(pullop):
920 def _pulldiscoverychangegroup(pullop):
930 """discovery phase for the pull
921 """discovery phase for the pull
931
922
932 Current handle changeset discovery only, will change handle all discovery
923 Current handle changeset discovery only, will change handle all discovery
933 at some point."""
924 at some point."""
934 tmp = discovery.findcommonincoming(pullop.repo,
925 tmp = discovery.findcommonincoming(pullop.repo,
935 pullop.remote,
926 pullop.remote,
936 heads=pullop.heads,
927 heads=pullop.heads,
937 force=pullop.force)
928 force=pullop.force)
938 common, fetch, rheads = tmp
929 common, fetch, rheads = tmp
939 nm = pullop.repo.unfiltered().changelog.nodemap
930 nm = pullop.repo.unfiltered().changelog.nodemap
940 if fetch and rheads:
931 if fetch and rheads:
941 # If a remote heads in filtered locally, lets drop it from the unknown
932 # If a remote heads in filtered locally, lets drop it from the unknown
942 # remote heads and put in back in common.
933 # remote heads and put in back in common.
943 #
934 #
944 # This is a hackish solution to catch most of "common but locally
935 # This is a hackish solution to catch most of "common but locally
945 # hidden situation". We do not performs discovery on unfiltered
936 # hidden situation". We do not performs discovery on unfiltered
946 # repository because it end up doing a pathological amount of round
937 # repository because it end up doing a pathological amount of round
947 # trip for w huge amount of changeset we do not care about.
938 # trip for w huge amount of changeset we do not care about.
948 #
939 #
949 # If a set of such "common but filtered" changeset exist on the server
940 # If a set of such "common but filtered" changeset exist on the server
950 # but are not including a remote heads, we'll not be able to detect it,
941 # but are not including a remote heads, we'll not be able to detect it,
951 scommon = set(common)
942 scommon = set(common)
952 filteredrheads = []
943 filteredrheads = []
953 for n in rheads:
944 for n in rheads:
954 if n in nm:
945 if n in nm:
955 if n not in scommon:
946 if n not in scommon:
956 common.append(n)
947 common.append(n)
957 else:
948 else:
958 filteredrheads.append(n)
949 filteredrheads.append(n)
959 if not filteredrheads:
950 if not filteredrheads:
960 fetch = []
951 fetch = []
961 rheads = filteredrheads
952 rheads = filteredrheads
962 pullop.common = common
953 pullop.common = common
963 pullop.fetch = fetch
954 pullop.fetch = fetch
964 pullop.rheads = rheads
955 pullop.rheads = rheads
965
956
966 def _pullbundle2(pullop):
957 def _pullbundle2(pullop):
967 """pull data using bundle2
958 """pull data using bundle2
968
959
969 For now, the only supported data are changegroup."""
960 For now, the only supported data are changegroup."""
970 remotecaps = bundle2.bundle2caps(pullop.remote)
961 remotecaps = bundle2.bundle2caps(pullop.remote)
971 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
962 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
972 # pulling changegroup
963 # pulling changegroup
973 pullop.stepsdone.add('changegroup')
964 pullop.stepsdone.add('changegroup')
974
965
975 kwargs['common'] = pullop.common
966 kwargs['common'] = pullop.common
976 kwargs['heads'] = pullop.heads or pullop.rheads
967 kwargs['heads'] = pullop.heads or pullop.rheads
977 kwargs['cg'] = pullop.fetch
968 kwargs['cg'] = pullop.fetch
978 if 'listkeys' in remotecaps:
969 if 'listkeys' in remotecaps:
979 kwargs['listkeys'] = ['phase', 'bookmarks']
970 kwargs['listkeys'] = ['phase', 'bookmarks']
980 if not pullop.fetch:
971 if not pullop.fetch:
981 pullop.repo.ui.status(_("no changes found\n"))
972 pullop.repo.ui.status(_("no changes found\n"))
982 pullop.cgresult = 0
973 pullop.cgresult = 0
983 else:
974 else:
984 if pullop.heads is None and list(pullop.common) == [nullid]:
975 if pullop.heads is None and list(pullop.common) == [nullid]:
985 pullop.repo.ui.status(_("requesting all changes\n"))
976 pullop.repo.ui.status(_("requesting all changes\n"))
986 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
977 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
987 remoteversions = bundle2.obsmarkersversion(remotecaps)
978 remoteversions = bundle2.obsmarkersversion(remotecaps)
988 if obsolete.commonversion(remoteversions) is not None:
979 if obsolete.commonversion(remoteversions) is not None:
989 kwargs['obsmarkers'] = True
980 kwargs['obsmarkers'] = True
990 pullop.stepsdone.add('obsmarkers')
981 pullop.stepsdone.add('obsmarkers')
991 _pullbundle2extraprepare(pullop, kwargs)
982 _pullbundle2extraprepare(pullop, kwargs)
992 bundle = pullop.remote.getbundle('pull', **kwargs)
983 bundle = pullop.remote.getbundle('pull', **kwargs)
993 try:
984 try:
994 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
985 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
995 except error.BundleValueError, exc:
986 except error.BundleValueError, exc:
996 raise util.Abort('missing support for %s' % exc)
987 raise util.Abort('missing support for %s' % exc)
997
988
998 if pullop.fetch:
989 if pullop.fetch:
999 results = [cg['return'] for cg in op.records['changegroup']]
990 results = [cg['return'] for cg in op.records['changegroup']]
1000 pullop.cgresult = changegroup.combineresults(results)
991 pullop.cgresult = changegroup.combineresults(results)
1001
992
1002 # processing phases change
993 # processing phases change
1003 for namespace, value in op.records['listkeys']:
994 for namespace, value in op.records['listkeys']:
1004 if namespace == 'phases':
995 if namespace == 'phases':
1005 _pullapplyphases(pullop, value)
996 _pullapplyphases(pullop, value)
1006
997
1007 # processing bookmark update
998 # processing bookmark update
1008 for namespace, value in op.records['listkeys']:
999 for namespace, value in op.records['listkeys']:
1009 if namespace == 'bookmarks':
1000 if namespace == 'bookmarks':
1010 pullop.remotebookmarks = value
1001 pullop.remotebookmarks = value
1011 _pullbookmarks(pullop)
1002 _pullbookmarks(pullop)
1012
1003
1013 def _pullbundle2extraprepare(pullop, kwargs):
1004 def _pullbundle2extraprepare(pullop, kwargs):
1014 """hook function so that extensions can extend the getbundle call"""
1005 """hook function so that extensions can extend the getbundle call"""
1015 pass
1006 pass
1016
1007
1017 def _pullchangeset(pullop):
1008 def _pullchangeset(pullop):
1018 """pull changeset from unbundle into the local repo"""
1009 """pull changeset from unbundle into the local repo"""
1019 # We delay the open of the transaction as late as possible so we
1010 # We delay the open of the transaction as late as possible so we
1020 # don't open transaction for nothing or you break future useful
1011 # don't open transaction for nothing or you break future useful
1021 # rollback call
1012 # rollback call
1022 if 'changegroup' in pullop.stepsdone:
1013 if 'changegroup' in pullop.stepsdone:
1023 return
1014 return
1024 pullop.stepsdone.add('changegroup')
1015 pullop.stepsdone.add('changegroup')
1025 if not pullop.fetch:
1016 if not pullop.fetch:
1026 pullop.repo.ui.status(_("no changes found\n"))
1017 pullop.repo.ui.status(_("no changes found\n"))
1027 pullop.cgresult = 0
1018 pullop.cgresult = 0
1028 return
1019 return
1029 pullop.gettransaction()
1020 pullop.gettransaction()
1030 if pullop.heads is None and list(pullop.common) == [nullid]:
1021 if pullop.heads is None and list(pullop.common) == [nullid]:
1031 pullop.repo.ui.status(_("requesting all changes\n"))
1022 pullop.repo.ui.status(_("requesting all changes\n"))
1032 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1023 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1033 # issue1320, avoid a race if remote changed after discovery
1024 # issue1320, avoid a race if remote changed after discovery
1034 pullop.heads = pullop.rheads
1025 pullop.heads = pullop.rheads
1035
1026
1036 if pullop.remote.capable('getbundle'):
1027 if pullop.remote.capable('getbundle'):
1037 # TODO: get bundlecaps from remote
1028 # TODO: get bundlecaps from remote
1038 cg = pullop.remote.getbundle('pull', common=pullop.common,
1029 cg = pullop.remote.getbundle('pull', common=pullop.common,
1039 heads=pullop.heads or pullop.rheads)
1030 heads=pullop.heads or pullop.rheads)
1040 elif pullop.heads is None:
1031 elif pullop.heads is None:
1041 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1032 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1042 elif not pullop.remote.capable('changegroupsubset'):
1033 elif not pullop.remote.capable('changegroupsubset'):
1043 raise util.Abort(_("partial pull cannot be done because "
1034 raise util.Abort(_("partial pull cannot be done because "
1044 "other repository doesn't support "
1035 "other repository doesn't support "
1045 "changegroupsubset."))
1036 "changegroupsubset."))
1046 else:
1037 else:
1047 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1038 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1048 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1039 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1049 pullop.remote.url())
1040 pullop.remote.url())
1050
1041
1051 def _pullphase(pullop):
1042 def _pullphase(pullop):
1052 # Get remote phases data from remote
1043 # Get remote phases data from remote
1053 if 'phases' in pullop.stepsdone:
1044 if 'phases' in pullop.stepsdone:
1054 return
1045 return
1055 remotephases = pullop.remote.listkeys('phases')
1046 remotephases = pullop.remote.listkeys('phases')
1056 _pullapplyphases(pullop, remotephases)
1047 _pullapplyphases(pullop, remotephases)
1057
1048
1058 def _pullapplyphases(pullop, remotephases):
1049 def _pullapplyphases(pullop, remotephases):
1059 """apply phase movement from observed remote state"""
1050 """apply phase movement from observed remote state"""
1060 if 'phases' in pullop.stepsdone:
1051 if 'phases' in pullop.stepsdone:
1061 return
1052 return
1062 pullop.stepsdone.add('phases')
1053 pullop.stepsdone.add('phases')
1063 publishing = bool(remotephases.get('publishing', False))
1054 publishing = bool(remotephases.get('publishing', False))
1064 if remotephases and not publishing:
1055 if remotephases and not publishing:
1065 # remote is new and unpublishing
1056 # remote is new and unpublishing
1066 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1057 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1067 pullop.pulledsubset,
1058 pullop.pulledsubset,
1068 remotephases)
1059 remotephases)
1069 dheads = pullop.pulledsubset
1060 dheads = pullop.pulledsubset
1070 else:
1061 else:
1071 # Remote is old or publishing all common changesets
1062 # Remote is old or publishing all common changesets
1072 # should be seen as public
1063 # should be seen as public
1073 pheads = pullop.pulledsubset
1064 pheads = pullop.pulledsubset
1074 dheads = []
1065 dheads = []
1075 unfi = pullop.repo.unfiltered()
1066 unfi = pullop.repo.unfiltered()
1076 phase = unfi._phasecache.phase
1067 phase = unfi._phasecache.phase
1077 rev = unfi.changelog.nodemap.get
1068 rev = unfi.changelog.nodemap.get
1078 public = phases.public
1069 public = phases.public
1079 draft = phases.draft
1070 draft = phases.draft
1080
1071
1081 # exclude changesets already public locally and update the others
1072 # exclude changesets already public locally and update the others
1082 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1073 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1083 if pheads:
1074 if pheads:
1084 tr = pullop.gettransaction()
1075 tr = pullop.gettransaction()
1085 phases.advanceboundary(pullop.repo, tr, public, pheads)
1076 phases.advanceboundary(pullop.repo, tr, public, pheads)
1086
1077
1087 # exclude changesets already draft locally and update the others
1078 # exclude changesets already draft locally and update the others
1088 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1079 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1089 if dheads:
1080 if dheads:
1090 tr = pullop.gettransaction()
1081 tr = pullop.gettransaction()
1091 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1082 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1092
1083
1093 def _pullbookmarks(pullop):
1084 def _pullbookmarks(pullop):
1094 """process the remote bookmark information to update the local one"""
1085 """process the remote bookmark information to update the local one"""
1095 if 'bookmarks' in pullop.stepsdone:
1086 if 'bookmarks' in pullop.stepsdone:
1096 return
1087 return
1097 pullop.stepsdone.add('bookmarks')
1088 pullop.stepsdone.add('bookmarks')
1098 repo = pullop.repo
1089 repo = pullop.repo
1099 remotebookmarks = pullop.remotebookmarks
1090 remotebookmarks = pullop.remotebookmarks
1100 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1091 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1101 pullop.remote.url(),
1092 pullop.remote.url(),
1102 pullop.gettransaction,
1093 pullop.gettransaction,
1103 explicit=pullop.explicitbookmarks)
1094 explicit=pullop.explicitbookmarks)
1104
1095
1105 def _pullobsolete(pullop):
1096 def _pullobsolete(pullop):
1106 """utility function to pull obsolete markers from a remote
1097 """utility function to pull obsolete markers from a remote
1107
1098
1108 The `gettransaction` is function that return the pull transaction, creating
1099 The `gettransaction` is function that return the pull transaction, creating
1109 one if necessary. We return the transaction to inform the calling code that
1100 one if necessary. We return the transaction to inform the calling code that
1110 a new transaction have been created (when applicable).
1101 a new transaction have been created (when applicable).
1111
1102
1112 Exists mostly to allow overriding for experimentation purpose"""
1103 Exists mostly to allow overriding for experimentation purpose"""
1113 if 'obsmarkers' in pullop.stepsdone:
1104 if 'obsmarkers' in pullop.stepsdone:
1114 return
1105 return
1115 pullop.stepsdone.add('obsmarkers')
1106 pullop.stepsdone.add('obsmarkers')
1116 tr = None
1107 tr = None
1117 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1108 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1118 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1109 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1119 remoteobs = pullop.remote.listkeys('obsolete')
1110 remoteobs = pullop.remote.listkeys('obsolete')
1120 if 'dump0' in remoteobs:
1111 if 'dump0' in remoteobs:
1121 tr = pullop.gettransaction()
1112 tr = pullop.gettransaction()
1122 for key in sorted(remoteobs, reverse=True):
1113 for key in sorted(remoteobs, reverse=True):
1123 if key.startswith('dump'):
1114 if key.startswith('dump'):
1124 data = base85.b85decode(remoteobs[key])
1115 data = base85.b85decode(remoteobs[key])
1125 pullop.repo.obsstore.mergemarkers(tr, data)
1116 pullop.repo.obsstore.mergemarkers(tr, data)
1126 pullop.repo.invalidatevolatilesets()
1117 pullop.repo.invalidatevolatilesets()
1127 return tr
1118 return tr
1128
1119
1129 def caps20to10(repo):
1120 def caps20to10(repo):
1130 """return a set with appropriate options to use bundle20 during getbundle"""
1121 """return a set with appropriate options to use bundle20 during getbundle"""
1131 caps = set(['HG20'])
1122 caps = set(['HG20'])
1132 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1123 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1133 caps.add('bundle2=' + urllib.quote(capsblob))
1124 caps.add('bundle2=' + urllib.quote(capsblob))
1134 return caps
1125 return caps
1135
1126
1136 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1127 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1137 getbundle2partsorder = []
1128 getbundle2partsorder = []
1138
1129
1139 # Mapping between step name and function
1130 # Mapping between step name and function
1140 #
1131 #
1141 # This exists to help extensions wrap steps if necessary
1132 # This exists to help extensions wrap steps if necessary
1142 getbundle2partsmapping = {}
1133 getbundle2partsmapping = {}
1143
1134
1144 def getbundle2partsgenerator(stepname):
1135 def getbundle2partsgenerator(stepname):
1145 """decorator for function generating bundle2 part for getbundle
1136 """decorator for function generating bundle2 part for getbundle
1146
1137
1147 The function is added to the step -> function mapping and appended to the
1138 The function is added to the step -> function mapping and appended to the
1148 list of steps. Beware that decorated functions will be added in order
1139 list of steps. Beware that decorated functions will be added in order
1149 (this may matter).
1140 (this may matter).
1150
1141
1151 You can only use this decorator for new steps, if you want to wrap a step
1142 You can only use this decorator for new steps, if you want to wrap a step
1152 from an extension, attack the getbundle2partsmapping dictionary directly."""
1143 from an extension, attack the getbundle2partsmapping dictionary directly."""
1153 def dec(func):
1144 def dec(func):
1154 assert stepname not in getbundle2partsmapping
1145 assert stepname not in getbundle2partsmapping
1155 getbundle2partsmapping[stepname] = func
1146 getbundle2partsmapping[stepname] = func
1156 getbundle2partsorder.append(stepname)
1147 getbundle2partsorder.append(stepname)
1157 return func
1148 return func
1158 return dec
1149 return dec
1159
1150
1160 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1151 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1161 **kwargs):
1152 **kwargs):
1162 """return a full bundle (with potentially multiple kind of parts)
1153 """return a full bundle (with potentially multiple kind of parts)
1163
1154
1164 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1155 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1165 passed. For now, the bundle can contain only changegroup, but this will
1156 passed. For now, the bundle can contain only changegroup, but this will
1166 changes when more part type will be available for bundle2.
1157 changes when more part type will be available for bundle2.
1167
1158
1168 This is different from changegroup.getchangegroup that only returns an HG10
1159 This is different from changegroup.getchangegroup that only returns an HG10
1169 changegroup bundle. They may eventually get reunited in the future when we
1160 changegroup bundle. They may eventually get reunited in the future when we
1170 have a clearer idea of the API we what to query different data.
1161 have a clearer idea of the API we what to query different data.
1171
1162
1172 The implementation is at a very early stage and will get massive rework
1163 The implementation is at a very early stage and will get massive rework
1173 when the API of bundle is refined.
1164 when the API of bundle is refined.
1174 """
1165 """
1175 # bundle10 case
1166 # bundle10 case
1176 usebundle2 = False
1167 usebundle2 = False
1177 if bundlecaps is not None:
1168 if bundlecaps is not None:
1178 usebundle2 = util.any((cap.startswith('HG2') for cap in bundlecaps))
1169 usebundle2 = util.any((cap.startswith('HG2') for cap in bundlecaps))
1179 if not usebundle2:
1170 if not usebundle2:
1180 if bundlecaps and not kwargs.get('cg', True):
1171 if bundlecaps and not kwargs.get('cg', True):
1181 raise ValueError(_('request for bundle10 must include changegroup'))
1172 raise ValueError(_('request for bundle10 must include changegroup'))
1182
1173
1183 if kwargs:
1174 if kwargs:
1184 raise ValueError(_('unsupported getbundle arguments: %s')
1175 raise ValueError(_('unsupported getbundle arguments: %s')
1185 % ', '.join(sorted(kwargs.keys())))
1176 % ', '.join(sorted(kwargs.keys())))
1186 return changegroup.getchangegroup(repo, source, heads=heads,
1177 return changegroup.getchangegroup(repo, source, heads=heads,
1187 common=common, bundlecaps=bundlecaps)
1178 common=common, bundlecaps=bundlecaps)
1188
1179
1189 # bundle20 case
1180 # bundle20 case
1190 b2caps = {}
1181 b2caps = {}
1191 for bcaps in bundlecaps:
1182 for bcaps in bundlecaps:
1192 if bcaps.startswith('bundle2='):
1183 if bcaps.startswith('bundle2='):
1193 blob = urllib.unquote(bcaps[len('bundle2='):])
1184 blob = urllib.unquote(bcaps[len('bundle2='):])
1194 b2caps.update(bundle2.decodecaps(blob))
1185 b2caps.update(bundle2.decodecaps(blob))
1195 bundler = bundle2.bundle20(repo.ui, b2caps)
1186 bundler = bundle2.bundle20(repo.ui, b2caps)
1196
1187
1197 kwargs['heads'] = heads
1188 kwargs['heads'] = heads
1198 kwargs['common'] = common
1189 kwargs['common'] = common
1199
1190
1200 for name in getbundle2partsorder:
1191 for name in getbundle2partsorder:
1201 func = getbundle2partsmapping[name]
1192 func = getbundle2partsmapping[name]
1202 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1193 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1203 **kwargs)
1194 **kwargs)
1204
1195
1205 return util.chunkbuffer(bundler.getchunks())
1196 return util.chunkbuffer(bundler.getchunks())
1206
1197
1207 @getbundle2partsgenerator('changegroup')
1198 @getbundle2partsgenerator('changegroup')
1208 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1199 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1209 b2caps=None, heads=None, common=None, **kwargs):
1200 b2caps=None, heads=None, common=None, **kwargs):
1210 """add a changegroup part to the requested bundle"""
1201 """add a changegroup part to the requested bundle"""
1211 cg = None
1202 cg = None
1212 if kwargs.get('cg', True):
1203 if kwargs.get('cg', True):
1213 # build changegroup bundle here.
1204 # build changegroup bundle here.
1214 version = None
1205 version = None
1215 cgversions = b2caps.get('changegroup')
1206 cgversions = b2caps.get('changegroup')
1216 if not cgversions: # 3.1 and 3.2 ship with an empty value
1207 if not cgversions: # 3.1 and 3.2 ship with an empty value
1217 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1208 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1218 common=common,
1209 common=common,
1219 bundlecaps=bundlecaps)
1210 bundlecaps=bundlecaps)
1220 else:
1211 else:
1221 cgversions = [v for v in cgversions if v in changegroup.packermap]
1212 cgversions = [v for v in cgversions if v in changegroup.packermap]
1222 if not cgversions:
1213 if not cgversions:
1223 raise ValueError(_('no common changegroup version'))
1214 raise ValueError(_('no common changegroup version'))
1224 version = max(cgversions)
1215 version = max(cgversions)
1225 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1216 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1226 common=common,
1217 common=common,
1227 bundlecaps=bundlecaps,
1218 bundlecaps=bundlecaps,
1228 version=version)
1219 version=version)
1229
1220
1230 if cg:
1221 if cg:
1231 part = bundler.newpart('changegroup', data=cg)
1222 part = bundler.newpart('changegroup', data=cg)
1232 if version is not None:
1223 if version is not None:
1233 part.addparam('version', version)
1224 part.addparam('version', version)
1234
1225
1235 @getbundle2partsgenerator('listkeys')
1226 @getbundle2partsgenerator('listkeys')
1236 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1227 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1237 b2caps=None, **kwargs):
1228 b2caps=None, **kwargs):
1238 """add parts containing listkeys namespaces to the requested bundle"""
1229 """add parts containing listkeys namespaces to the requested bundle"""
1239 listkeys = kwargs.get('listkeys', ())
1230 listkeys = kwargs.get('listkeys', ())
1240 for namespace in listkeys:
1231 for namespace in listkeys:
1241 part = bundler.newpart('listkeys')
1232 part = bundler.newpart('listkeys')
1242 part.addparam('namespace', namespace)
1233 part.addparam('namespace', namespace)
1243 keys = repo.listkeys(namespace).items()
1234 keys = repo.listkeys(namespace).items()
1244 part.data = pushkey.encodekeys(keys)
1235 part.data = pushkey.encodekeys(keys)
1245
1236
1246 @getbundle2partsgenerator('obsmarkers')
1237 @getbundle2partsgenerator('obsmarkers')
1247 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1238 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1248 b2caps=None, heads=None, **kwargs):
1239 b2caps=None, heads=None, **kwargs):
1249 """add an obsolescence markers part to the requested bundle"""
1240 """add an obsolescence markers part to the requested bundle"""
1250 if kwargs.get('obsmarkers', False):
1241 if kwargs.get('obsmarkers', False):
1251 if heads is None:
1242 if heads is None:
1252 heads = repo.heads()
1243 heads = repo.heads()
1253 subset = [c.node() for c in repo.set('::%ln', heads)]
1244 subset = [c.node() for c in repo.set('::%ln', heads)]
1254 markers = repo.obsstore.relevantmarkers(subset)
1245 markers = repo.obsstore.relevantmarkers(subset)
1255 buildobsmarkerspart(bundler, markers)
1246 buildobsmarkerspart(bundler, markers)
1256
1247
1257 def check_heads(repo, their_heads, context):
1248 def check_heads(repo, their_heads, context):
1258 """check if the heads of a repo have been modified
1249 """check if the heads of a repo have been modified
1259
1250
1260 Used by peer for unbundling.
1251 Used by peer for unbundling.
1261 """
1252 """
1262 heads = repo.heads()
1253 heads = repo.heads()
1263 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1254 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1264 if not (their_heads == ['force'] or their_heads == heads or
1255 if not (their_heads == ['force'] or their_heads == heads or
1265 their_heads == ['hashed', heads_hash]):
1256 their_heads == ['hashed', heads_hash]):
1266 # someone else committed/pushed/unbundled while we
1257 # someone else committed/pushed/unbundled while we
1267 # were transferring data
1258 # were transferring data
1268 raise error.PushRaced('repository changed while %s - '
1259 raise error.PushRaced('repository changed while %s - '
1269 'please try again' % context)
1260 'please try again' % context)
1270
1261
1271 def unbundle(repo, cg, heads, source, url):
1262 def unbundle(repo, cg, heads, source, url):
1272 """Apply a bundle to a repo.
1263 """Apply a bundle to a repo.
1273
1264
1274 this function makes sure the repo is locked during the application and have
1265 this function makes sure the repo is locked during the application and have
1275 mechanism to check that no push race occurred between the creation of the
1266 mechanism to check that no push race occurred between the creation of the
1276 bundle and its application.
1267 bundle and its application.
1277
1268
1278 If the push was raced as PushRaced exception is raised."""
1269 If the push was raced as PushRaced exception is raised."""
1279 r = 0
1270 r = 0
1280 # need a transaction when processing a bundle2 stream
1271 # need a transaction when processing a bundle2 stream
1281 tr = None
1272 tr = None
1282 lock = repo.lock()
1273 lock = repo.lock()
1283 try:
1274 try:
1284 check_heads(repo, heads, 'uploading changes')
1275 check_heads(repo, heads, 'uploading changes')
1285 # push can proceed
1276 # push can proceed
1286 if util.safehasattr(cg, 'params'):
1277 if util.safehasattr(cg, 'params'):
1287 try:
1278 try:
1288 tr = repo.transaction('unbundle')
1279 tr = repo.transaction('unbundle')
1289 tr.hookargs['source'] = source
1280 tr.hookargs['source'] = source
1290 tr.hookargs['url'] = url
1281 tr.hookargs['url'] = url
1291 tr.hookargs['bundle2'] = '1'
1282 tr.hookargs['bundle2'] = '1'
1292 r = bundle2.processbundle(repo, cg, lambda: tr).reply
1283 r = bundle2.processbundle(repo, cg, lambda: tr).reply
1293 p = lambda: tr.writepending() and repo.root or ""
1294 repo.hook('b2x-pretransactionclose', throw=True, pending=p,
1295 **tr.hookargs)
1296 hookargs = dict(tr.hookargs)
1297 def runhooks():
1298 repo.hook('b2x-transactionclose', **hookargs)
1299 tr.addpostclose('b2x-hook-transactionclose',
1300 lambda tr: repo._afterlock(runhooks))
1301 tr.close()
1284 tr.close()
1302 except Exception, exc:
1285 except Exception, exc:
1303 exc.duringunbundle2 = True
1286 exc.duringunbundle2 = True
1304 raise
1287 raise
1305 else:
1288 else:
1306 r = changegroup.addchangegroup(repo, cg, source, url)
1289 r = changegroup.addchangegroup(repo, cg, source, url)
1307 finally:
1290 finally:
1308 if tr is not None:
1291 if tr is not None:
1309 tr.release()
1292 tr.release()
1310 lock.release()
1293 lock.release()
1311 return r
1294 return r
General Comments 0
You need to be logged in to leave comments. Login now