##// END OF EJS Templates
bundle2: add an 'idx' argument to the 'getbundle2partsgenerator'...
Pierre-Yves David -
r24732:8f70b529 default
parent child Browse files
Show More
@@ -1,1297 +1,1300
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno, urllib
10 import errno, urllib
11 import util, scmutil, changegroup, base85, error
11 import util, scmutil, changegroup, base85, error
12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13
13
14 def readbundle(ui, fh, fname, vfs=None):
14 def readbundle(ui, fh, fname, vfs=None):
15 header = changegroup.readexactly(fh, 4)
15 header = changegroup.readexactly(fh, 4)
16
16
17 alg = None
17 alg = None
18 if not fname:
18 if not fname:
19 fname = "stream"
19 fname = "stream"
20 if not header.startswith('HG') and header.startswith('\0'):
20 if not header.startswith('HG') and header.startswith('\0'):
21 fh = changegroup.headerlessfixup(fh, header)
21 fh = changegroup.headerlessfixup(fh, header)
22 header = "HG10"
22 header = "HG10"
23 alg = 'UN'
23 alg = 'UN'
24 elif vfs:
24 elif vfs:
25 fname = vfs.join(fname)
25 fname = vfs.join(fname)
26
26
27 magic, version = header[0:2], header[2:4]
27 magic, version = header[0:2], header[2:4]
28
28
29 if magic != 'HG':
29 if magic != 'HG':
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 if version == '10':
31 if version == '10':
32 if alg is None:
32 if alg is None:
33 alg = changegroup.readexactly(fh, 2)
33 alg = changegroup.readexactly(fh, 2)
34 return changegroup.cg1unpacker(fh, alg)
34 return changegroup.cg1unpacker(fh, alg)
35 elif version.startswith('2'):
35 elif version.startswith('2'):
36 return bundle2.getunbundler(ui, fh, header=magic + version)
36 return bundle2.getunbundler(ui, fh, header=magic + version)
37 else:
37 else:
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39
39
40 def buildobsmarkerspart(bundler, markers):
40 def buildobsmarkerspart(bundler, markers):
41 """add an obsmarker part to the bundler with <markers>
41 """add an obsmarker part to the bundler with <markers>
42
42
43 No part is created if markers is empty.
43 No part is created if markers is empty.
44 Raises ValueError if the bundler doesn't support any known obsmarker format.
44 Raises ValueError if the bundler doesn't support any known obsmarker format.
45 """
45 """
46 if markers:
46 if markers:
47 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
47 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
48 version = obsolete.commonversion(remoteversions)
48 version = obsolete.commonversion(remoteversions)
49 if version is None:
49 if version is None:
50 raise ValueError('bundler do not support common obsmarker format')
50 raise ValueError('bundler do not support common obsmarker format')
51 stream = obsolete.encodemarkers(markers, True, version=version)
51 stream = obsolete.encodemarkers(markers, True, version=version)
52 return bundler.newpart('obsmarkers', data=stream)
52 return bundler.newpart('obsmarkers', data=stream)
53 return None
53 return None
54
54
55 def _canusebundle2(op):
55 def _canusebundle2(op):
56 """return true if a pull/push can use bundle2
56 """return true if a pull/push can use bundle2
57
57
58 Feel free to nuke this function when we drop the experimental option"""
58 Feel free to nuke this function when we drop the experimental option"""
59 return (op.repo.ui.configbool('experimental', 'bundle2-exp', False)
59 return (op.repo.ui.configbool('experimental', 'bundle2-exp', False)
60 and op.remote.capable('bundle2'))
60 and op.remote.capable('bundle2'))
61
61
62
62
63 class pushoperation(object):
63 class pushoperation(object):
64 """A object that represent a single push operation
64 """A object that represent a single push operation
65
65
66 It purpose is to carry push related state and very common operation.
66 It purpose is to carry push related state and very common operation.
67
67
68 A new should be created at the beginning of each push and discarded
68 A new should be created at the beginning of each push and discarded
69 afterward.
69 afterward.
70 """
70 """
71
71
72 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
72 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
73 bookmarks=()):
73 bookmarks=()):
74 # repo we push from
74 # repo we push from
75 self.repo = repo
75 self.repo = repo
76 self.ui = repo.ui
76 self.ui = repo.ui
77 # repo we push to
77 # repo we push to
78 self.remote = remote
78 self.remote = remote
79 # force option provided
79 # force option provided
80 self.force = force
80 self.force = force
81 # revs to be pushed (None is "all")
81 # revs to be pushed (None is "all")
82 self.revs = revs
82 self.revs = revs
83 # bookmark explicitly pushed
83 # bookmark explicitly pushed
84 self.bookmarks = bookmarks
84 self.bookmarks = bookmarks
85 # allow push of new branch
85 # allow push of new branch
86 self.newbranch = newbranch
86 self.newbranch = newbranch
87 # did a local lock get acquired?
87 # did a local lock get acquired?
88 self.locallocked = None
88 self.locallocked = None
89 # step already performed
89 # step already performed
90 # (used to check what steps have been already performed through bundle2)
90 # (used to check what steps have been already performed through bundle2)
91 self.stepsdone = set()
91 self.stepsdone = set()
92 # Integer version of the changegroup push result
92 # Integer version of the changegroup push result
93 # - None means nothing to push
93 # - None means nothing to push
94 # - 0 means HTTP error
94 # - 0 means HTTP error
95 # - 1 means we pushed and remote head count is unchanged *or*
95 # - 1 means we pushed and remote head count is unchanged *or*
96 # we have outgoing changesets but refused to push
96 # we have outgoing changesets but refused to push
97 # - other values as described by addchangegroup()
97 # - other values as described by addchangegroup()
98 self.cgresult = None
98 self.cgresult = None
99 # Boolean value for the bookmark push
99 # Boolean value for the bookmark push
100 self.bkresult = None
100 self.bkresult = None
101 # discover.outgoing object (contains common and outgoing data)
101 # discover.outgoing object (contains common and outgoing data)
102 self.outgoing = None
102 self.outgoing = None
103 # all remote heads before the push
103 # all remote heads before the push
104 self.remoteheads = None
104 self.remoteheads = None
105 # testable as a boolean indicating if any nodes are missing locally.
105 # testable as a boolean indicating if any nodes are missing locally.
106 self.incoming = None
106 self.incoming = None
107 # phases changes that must be pushed along side the changesets
107 # phases changes that must be pushed along side the changesets
108 self.outdatedphases = None
108 self.outdatedphases = None
109 # phases changes that must be pushed if changeset push fails
109 # phases changes that must be pushed if changeset push fails
110 self.fallbackoutdatedphases = None
110 self.fallbackoutdatedphases = None
111 # outgoing obsmarkers
111 # outgoing obsmarkers
112 self.outobsmarkers = set()
112 self.outobsmarkers = set()
113 # outgoing bookmarks
113 # outgoing bookmarks
114 self.outbookmarks = []
114 self.outbookmarks = []
115 # transaction manager
115 # transaction manager
116 self.trmanager = None
116 self.trmanager = None
117
117
118 @util.propertycache
118 @util.propertycache
119 def futureheads(self):
119 def futureheads(self):
120 """future remote heads if the changeset push succeeds"""
120 """future remote heads if the changeset push succeeds"""
121 return self.outgoing.missingheads
121 return self.outgoing.missingheads
122
122
123 @util.propertycache
123 @util.propertycache
124 def fallbackheads(self):
124 def fallbackheads(self):
125 """future remote heads if the changeset push fails"""
125 """future remote heads if the changeset push fails"""
126 if self.revs is None:
126 if self.revs is None:
127 # not target to push, all common are relevant
127 # not target to push, all common are relevant
128 return self.outgoing.commonheads
128 return self.outgoing.commonheads
129 unfi = self.repo.unfiltered()
129 unfi = self.repo.unfiltered()
130 # I want cheads = heads(::missingheads and ::commonheads)
130 # I want cheads = heads(::missingheads and ::commonheads)
131 # (missingheads is revs with secret changeset filtered out)
131 # (missingheads is revs with secret changeset filtered out)
132 #
132 #
133 # This can be expressed as:
133 # This can be expressed as:
134 # cheads = ( (missingheads and ::commonheads)
134 # cheads = ( (missingheads and ::commonheads)
135 # + (commonheads and ::missingheads))"
135 # + (commonheads and ::missingheads))"
136 # )
136 # )
137 #
137 #
138 # while trying to push we already computed the following:
138 # while trying to push we already computed the following:
139 # common = (::commonheads)
139 # common = (::commonheads)
140 # missing = ((commonheads::missingheads) - commonheads)
140 # missing = ((commonheads::missingheads) - commonheads)
141 #
141 #
142 # We can pick:
142 # We can pick:
143 # * missingheads part of common (::commonheads)
143 # * missingheads part of common (::commonheads)
144 common = set(self.outgoing.common)
144 common = set(self.outgoing.common)
145 nm = self.repo.changelog.nodemap
145 nm = self.repo.changelog.nodemap
146 cheads = [node for node in self.revs if nm[node] in common]
146 cheads = [node for node in self.revs if nm[node] in common]
147 # and
147 # and
148 # * commonheads parents on missing
148 # * commonheads parents on missing
149 revset = unfi.set('%ln and parents(roots(%ln))',
149 revset = unfi.set('%ln and parents(roots(%ln))',
150 self.outgoing.commonheads,
150 self.outgoing.commonheads,
151 self.outgoing.missing)
151 self.outgoing.missing)
152 cheads.extend(c.node() for c in revset)
152 cheads.extend(c.node() for c in revset)
153 return cheads
153 return cheads
154
154
155 @property
155 @property
156 def commonheads(self):
156 def commonheads(self):
157 """set of all common heads after changeset bundle push"""
157 """set of all common heads after changeset bundle push"""
158 if self.cgresult:
158 if self.cgresult:
159 return self.futureheads
159 return self.futureheads
160 else:
160 else:
161 return self.fallbackheads
161 return self.fallbackheads
162
162
163 # mapping of message used when pushing bookmark
163 # mapping of message used when pushing bookmark
164 bookmsgmap = {'update': (_("updating bookmark %s\n"),
164 bookmsgmap = {'update': (_("updating bookmark %s\n"),
165 _('updating bookmark %s failed!\n')),
165 _('updating bookmark %s failed!\n')),
166 'export': (_("exporting bookmark %s\n"),
166 'export': (_("exporting bookmark %s\n"),
167 _('exporting bookmark %s failed!\n')),
167 _('exporting bookmark %s failed!\n')),
168 'delete': (_("deleting remote bookmark %s\n"),
168 'delete': (_("deleting remote bookmark %s\n"),
169 _('deleting remote bookmark %s failed!\n')),
169 _('deleting remote bookmark %s failed!\n')),
170 }
170 }
171
171
172
172
173 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
173 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
174 '''Push outgoing changesets (limited by revs) from a local
174 '''Push outgoing changesets (limited by revs) from a local
175 repository to remote. Return an integer:
175 repository to remote. Return an integer:
176 - None means nothing to push
176 - None means nothing to push
177 - 0 means HTTP error
177 - 0 means HTTP error
178 - 1 means we pushed and remote head count is unchanged *or*
178 - 1 means we pushed and remote head count is unchanged *or*
179 we have outgoing changesets but refused to push
179 we have outgoing changesets but refused to push
180 - other values as described by addchangegroup()
180 - other values as described by addchangegroup()
181 '''
181 '''
182 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
182 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
183 if pushop.remote.local():
183 if pushop.remote.local():
184 missing = (set(pushop.repo.requirements)
184 missing = (set(pushop.repo.requirements)
185 - pushop.remote.local().supported)
185 - pushop.remote.local().supported)
186 if missing:
186 if missing:
187 msg = _("required features are not"
187 msg = _("required features are not"
188 " supported in the destination:"
188 " supported in the destination:"
189 " %s") % (', '.join(sorted(missing)))
189 " %s") % (', '.join(sorted(missing)))
190 raise util.Abort(msg)
190 raise util.Abort(msg)
191
191
192 # there are two ways to push to remote repo:
192 # there are two ways to push to remote repo:
193 #
193 #
194 # addchangegroup assumes local user can lock remote
194 # addchangegroup assumes local user can lock remote
195 # repo (local filesystem, old ssh servers).
195 # repo (local filesystem, old ssh servers).
196 #
196 #
197 # unbundle assumes local user cannot lock remote repo (new ssh
197 # unbundle assumes local user cannot lock remote repo (new ssh
198 # servers, http servers).
198 # servers, http servers).
199
199
200 if not pushop.remote.canpush():
200 if not pushop.remote.canpush():
201 raise util.Abort(_("destination does not support push"))
201 raise util.Abort(_("destination does not support push"))
202 # get local lock as we might write phase data
202 # get local lock as we might write phase data
203 locallock = None
203 locallock = None
204 try:
204 try:
205 locallock = pushop.repo.lock()
205 locallock = pushop.repo.lock()
206 pushop.locallocked = True
206 pushop.locallocked = True
207 except IOError, err:
207 except IOError, err:
208 pushop.locallocked = False
208 pushop.locallocked = False
209 if err.errno != errno.EACCES:
209 if err.errno != errno.EACCES:
210 raise
210 raise
211 # source repo cannot be locked.
211 # source repo cannot be locked.
212 # We do not abort the push, but just disable the local phase
212 # We do not abort the push, but just disable the local phase
213 # synchronisation.
213 # synchronisation.
214 msg = 'cannot lock source repository: %s\n' % err
214 msg = 'cannot lock source repository: %s\n' % err
215 pushop.ui.debug(msg)
215 pushop.ui.debug(msg)
216 try:
216 try:
217 if pushop.locallocked:
217 if pushop.locallocked:
218 pushop.trmanager = transactionmanager(repo,
218 pushop.trmanager = transactionmanager(repo,
219 'push-response',
219 'push-response',
220 pushop.remote.url())
220 pushop.remote.url())
221 pushop.repo.checkpush(pushop)
221 pushop.repo.checkpush(pushop)
222 lock = None
222 lock = None
223 unbundle = pushop.remote.capable('unbundle')
223 unbundle = pushop.remote.capable('unbundle')
224 if not unbundle:
224 if not unbundle:
225 lock = pushop.remote.lock()
225 lock = pushop.remote.lock()
226 try:
226 try:
227 _pushdiscovery(pushop)
227 _pushdiscovery(pushop)
228 if _canusebundle2(pushop):
228 if _canusebundle2(pushop):
229 _pushbundle2(pushop)
229 _pushbundle2(pushop)
230 _pushchangeset(pushop)
230 _pushchangeset(pushop)
231 _pushsyncphase(pushop)
231 _pushsyncphase(pushop)
232 _pushobsolete(pushop)
232 _pushobsolete(pushop)
233 _pushbookmark(pushop)
233 _pushbookmark(pushop)
234 finally:
234 finally:
235 if lock is not None:
235 if lock is not None:
236 lock.release()
236 lock.release()
237 if pushop.trmanager:
237 if pushop.trmanager:
238 pushop.trmanager.close()
238 pushop.trmanager.close()
239 finally:
239 finally:
240 if pushop.trmanager:
240 if pushop.trmanager:
241 pushop.trmanager.release()
241 pushop.trmanager.release()
242 if locallock is not None:
242 if locallock is not None:
243 locallock.release()
243 locallock.release()
244
244
245 return pushop
245 return pushop
246
246
247 # list of steps to perform discovery before push
247 # list of steps to perform discovery before push
248 pushdiscoveryorder = []
248 pushdiscoveryorder = []
249
249
250 # Mapping between step name and function
250 # Mapping between step name and function
251 #
251 #
252 # This exists to help extensions wrap steps if necessary
252 # This exists to help extensions wrap steps if necessary
253 pushdiscoverymapping = {}
253 pushdiscoverymapping = {}
254
254
255 def pushdiscovery(stepname):
255 def pushdiscovery(stepname):
256 """decorator for function performing discovery before push
256 """decorator for function performing discovery before push
257
257
258 The function is added to the step -> function mapping and appended to the
258 The function is added to the step -> function mapping and appended to the
259 list of steps. Beware that decorated function will be added in order (this
259 list of steps. Beware that decorated function will be added in order (this
260 may matter).
260 may matter).
261
261
262 You can only use this decorator for a new step, if you want to wrap a step
262 You can only use this decorator for a new step, if you want to wrap a step
263 from an extension, change the pushdiscovery dictionary directly."""
263 from an extension, change the pushdiscovery dictionary directly."""
264 def dec(func):
264 def dec(func):
265 assert stepname not in pushdiscoverymapping
265 assert stepname not in pushdiscoverymapping
266 pushdiscoverymapping[stepname] = func
266 pushdiscoverymapping[stepname] = func
267 pushdiscoveryorder.append(stepname)
267 pushdiscoveryorder.append(stepname)
268 return func
268 return func
269 return dec
269 return dec
270
270
271 def _pushdiscovery(pushop):
271 def _pushdiscovery(pushop):
272 """Run all discovery steps"""
272 """Run all discovery steps"""
273 for stepname in pushdiscoveryorder:
273 for stepname in pushdiscoveryorder:
274 step = pushdiscoverymapping[stepname]
274 step = pushdiscoverymapping[stepname]
275 step(pushop)
275 step(pushop)
276
276
277 @pushdiscovery('changeset')
277 @pushdiscovery('changeset')
278 def _pushdiscoverychangeset(pushop):
278 def _pushdiscoverychangeset(pushop):
279 """discover the changeset that need to be pushed"""
279 """discover the changeset that need to be pushed"""
280 fci = discovery.findcommonincoming
280 fci = discovery.findcommonincoming
281 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
281 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
282 common, inc, remoteheads = commoninc
282 common, inc, remoteheads = commoninc
283 fco = discovery.findcommonoutgoing
283 fco = discovery.findcommonoutgoing
284 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
284 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
285 commoninc=commoninc, force=pushop.force)
285 commoninc=commoninc, force=pushop.force)
286 pushop.outgoing = outgoing
286 pushop.outgoing = outgoing
287 pushop.remoteheads = remoteheads
287 pushop.remoteheads = remoteheads
288 pushop.incoming = inc
288 pushop.incoming = inc
289
289
290 @pushdiscovery('phase')
290 @pushdiscovery('phase')
291 def _pushdiscoveryphase(pushop):
291 def _pushdiscoveryphase(pushop):
292 """discover the phase that needs to be pushed
292 """discover the phase that needs to be pushed
293
293
294 (computed for both success and failure case for changesets push)"""
294 (computed for both success and failure case for changesets push)"""
295 outgoing = pushop.outgoing
295 outgoing = pushop.outgoing
296 unfi = pushop.repo.unfiltered()
296 unfi = pushop.repo.unfiltered()
297 remotephases = pushop.remote.listkeys('phases')
297 remotephases = pushop.remote.listkeys('phases')
298 publishing = remotephases.get('publishing', False)
298 publishing = remotephases.get('publishing', False)
299 ana = phases.analyzeremotephases(pushop.repo,
299 ana = phases.analyzeremotephases(pushop.repo,
300 pushop.fallbackheads,
300 pushop.fallbackheads,
301 remotephases)
301 remotephases)
302 pheads, droots = ana
302 pheads, droots = ana
303 extracond = ''
303 extracond = ''
304 if not publishing:
304 if not publishing:
305 extracond = ' and public()'
305 extracond = ' and public()'
306 revset = 'heads((%%ln::%%ln) %s)' % extracond
306 revset = 'heads((%%ln::%%ln) %s)' % extracond
307 # Get the list of all revs draft on remote by public here.
307 # Get the list of all revs draft on remote by public here.
308 # XXX Beware that revset break if droots is not strictly
308 # XXX Beware that revset break if droots is not strictly
309 # XXX root we may want to ensure it is but it is costly
309 # XXX root we may want to ensure it is but it is costly
310 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
310 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
311 if not outgoing.missing:
311 if not outgoing.missing:
312 future = fallback
312 future = fallback
313 else:
313 else:
314 # adds changeset we are going to push as draft
314 # adds changeset we are going to push as draft
315 #
315 #
316 # should not be necessary for publishing server, but because of an
316 # should not be necessary for publishing server, but because of an
317 # issue fixed in xxxxx we have to do it anyway.
317 # issue fixed in xxxxx we have to do it anyway.
318 fdroots = list(unfi.set('roots(%ln + %ln::)',
318 fdroots = list(unfi.set('roots(%ln + %ln::)',
319 outgoing.missing, droots))
319 outgoing.missing, droots))
320 fdroots = [f.node() for f in fdroots]
320 fdroots = [f.node() for f in fdroots]
321 future = list(unfi.set(revset, fdroots, pushop.futureheads))
321 future = list(unfi.set(revset, fdroots, pushop.futureheads))
322 pushop.outdatedphases = future
322 pushop.outdatedphases = future
323 pushop.fallbackoutdatedphases = fallback
323 pushop.fallbackoutdatedphases = fallback
324
324
325 @pushdiscovery('obsmarker')
325 @pushdiscovery('obsmarker')
326 def _pushdiscoveryobsmarkers(pushop):
326 def _pushdiscoveryobsmarkers(pushop):
327 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
327 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
328 and pushop.repo.obsstore
328 and pushop.repo.obsstore
329 and 'obsolete' in pushop.remote.listkeys('namespaces')):
329 and 'obsolete' in pushop.remote.listkeys('namespaces')):
330 repo = pushop.repo
330 repo = pushop.repo
331 # very naive computation, that can be quite expensive on big repo.
331 # very naive computation, that can be quite expensive on big repo.
332 # However: evolution is currently slow on them anyway.
332 # However: evolution is currently slow on them anyway.
333 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
333 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
334 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
334 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
335
335
336 @pushdiscovery('bookmarks')
336 @pushdiscovery('bookmarks')
337 def _pushdiscoverybookmarks(pushop):
337 def _pushdiscoverybookmarks(pushop):
338 ui = pushop.ui
338 ui = pushop.ui
339 repo = pushop.repo.unfiltered()
339 repo = pushop.repo.unfiltered()
340 remote = pushop.remote
340 remote = pushop.remote
341 ui.debug("checking for updated bookmarks\n")
341 ui.debug("checking for updated bookmarks\n")
342 ancestors = ()
342 ancestors = ()
343 if pushop.revs:
343 if pushop.revs:
344 revnums = map(repo.changelog.rev, pushop.revs)
344 revnums = map(repo.changelog.rev, pushop.revs)
345 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
345 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
346 remotebookmark = remote.listkeys('bookmarks')
346 remotebookmark = remote.listkeys('bookmarks')
347
347
348 explicit = set(pushop.bookmarks)
348 explicit = set(pushop.bookmarks)
349
349
350 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
350 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
351 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
351 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
352 for b, scid, dcid in advsrc:
352 for b, scid, dcid in advsrc:
353 if b in explicit:
353 if b in explicit:
354 explicit.remove(b)
354 explicit.remove(b)
355 if not ancestors or repo[scid].rev() in ancestors:
355 if not ancestors or repo[scid].rev() in ancestors:
356 pushop.outbookmarks.append((b, dcid, scid))
356 pushop.outbookmarks.append((b, dcid, scid))
357 # search added bookmark
357 # search added bookmark
358 for b, scid, dcid in addsrc:
358 for b, scid, dcid in addsrc:
359 if b in explicit:
359 if b in explicit:
360 explicit.remove(b)
360 explicit.remove(b)
361 pushop.outbookmarks.append((b, '', scid))
361 pushop.outbookmarks.append((b, '', scid))
362 # search for overwritten bookmark
362 # search for overwritten bookmark
363 for b, scid, dcid in advdst + diverge + differ:
363 for b, scid, dcid in advdst + diverge + differ:
364 if b in explicit:
364 if b in explicit:
365 explicit.remove(b)
365 explicit.remove(b)
366 pushop.outbookmarks.append((b, dcid, scid))
366 pushop.outbookmarks.append((b, dcid, scid))
367 # search for bookmark to delete
367 # search for bookmark to delete
368 for b, scid, dcid in adddst:
368 for b, scid, dcid in adddst:
369 if b in explicit:
369 if b in explicit:
370 explicit.remove(b)
370 explicit.remove(b)
371 # treat as "deleted locally"
371 # treat as "deleted locally"
372 pushop.outbookmarks.append((b, dcid, ''))
372 pushop.outbookmarks.append((b, dcid, ''))
373 # identical bookmarks shouldn't get reported
373 # identical bookmarks shouldn't get reported
374 for b, scid, dcid in same:
374 for b, scid, dcid in same:
375 if b in explicit:
375 if b in explicit:
376 explicit.remove(b)
376 explicit.remove(b)
377
377
378 if explicit:
378 if explicit:
379 explicit = sorted(explicit)
379 explicit = sorted(explicit)
380 # we should probably list all of them
380 # we should probably list all of them
381 ui.warn(_('bookmark %s does not exist on the local '
381 ui.warn(_('bookmark %s does not exist on the local '
382 'or remote repository!\n') % explicit[0])
382 'or remote repository!\n') % explicit[0])
383 pushop.bkresult = 2
383 pushop.bkresult = 2
384
384
385 pushop.outbookmarks.sort()
385 pushop.outbookmarks.sort()
386
386
387 def _pushcheckoutgoing(pushop):
387 def _pushcheckoutgoing(pushop):
388 outgoing = pushop.outgoing
388 outgoing = pushop.outgoing
389 unfi = pushop.repo.unfiltered()
389 unfi = pushop.repo.unfiltered()
390 if not outgoing.missing:
390 if not outgoing.missing:
391 # nothing to push
391 # nothing to push
392 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
392 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
393 return False
393 return False
394 # something to push
394 # something to push
395 if not pushop.force:
395 if not pushop.force:
396 # if repo.obsstore == False --> no obsolete
396 # if repo.obsstore == False --> no obsolete
397 # then, save the iteration
397 # then, save the iteration
398 if unfi.obsstore:
398 if unfi.obsstore:
399 # this message are here for 80 char limit reason
399 # this message are here for 80 char limit reason
400 mso = _("push includes obsolete changeset: %s!")
400 mso = _("push includes obsolete changeset: %s!")
401 mst = {"unstable": _("push includes unstable changeset: %s!"),
401 mst = {"unstable": _("push includes unstable changeset: %s!"),
402 "bumped": _("push includes bumped changeset: %s!"),
402 "bumped": _("push includes bumped changeset: %s!"),
403 "divergent": _("push includes divergent changeset: %s!")}
403 "divergent": _("push includes divergent changeset: %s!")}
404 # If we are to push if there is at least one
404 # If we are to push if there is at least one
405 # obsolete or unstable changeset in missing, at
405 # obsolete or unstable changeset in missing, at
406 # least one of the missinghead will be obsolete or
406 # least one of the missinghead will be obsolete or
407 # unstable. So checking heads only is ok
407 # unstable. So checking heads only is ok
408 for node in outgoing.missingheads:
408 for node in outgoing.missingheads:
409 ctx = unfi[node]
409 ctx = unfi[node]
410 if ctx.obsolete():
410 if ctx.obsolete():
411 raise util.Abort(mso % ctx)
411 raise util.Abort(mso % ctx)
412 elif ctx.troubled():
412 elif ctx.troubled():
413 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
413 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
414 newbm = pushop.ui.configlist('bookmarks', 'pushing')
414 newbm = pushop.ui.configlist('bookmarks', 'pushing')
415 discovery.checkheads(unfi, pushop.remote, outgoing,
415 discovery.checkheads(unfi, pushop.remote, outgoing,
416 pushop.remoteheads,
416 pushop.remoteheads,
417 pushop.newbranch,
417 pushop.newbranch,
418 bool(pushop.incoming),
418 bool(pushop.incoming),
419 newbm)
419 newbm)
420 return True
420 return True
421
421
422 # List of names of steps to perform for an outgoing bundle2, order matters.
422 # List of names of steps to perform for an outgoing bundle2, order matters.
423 b2partsgenorder = []
423 b2partsgenorder = []
424
424
425 # Mapping between step name and function
425 # Mapping between step name and function
426 #
426 #
427 # This exists to help extensions wrap steps if necessary
427 # This exists to help extensions wrap steps if necessary
428 b2partsgenmapping = {}
428 b2partsgenmapping = {}
429
429
430 def b2partsgenerator(stepname, idx=None):
430 def b2partsgenerator(stepname, idx=None):
431 """decorator for function generating bundle2 part
431 """decorator for function generating bundle2 part
432
432
433 The function is added to the step -> function mapping and appended to the
433 The function is added to the step -> function mapping and appended to the
434 list of steps. Beware that decorated functions will be added in order
434 list of steps. Beware that decorated functions will be added in order
435 (this may matter).
435 (this may matter).
436
436
437 You can only use this decorator for new steps, if you want to wrap a step
437 You can only use this decorator for new steps, if you want to wrap a step
438 from an extension, attack the b2partsgenmapping dictionary directly."""
438 from an extension, attack the b2partsgenmapping dictionary directly."""
439 def dec(func):
439 def dec(func):
440 assert stepname not in b2partsgenmapping
440 assert stepname not in b2partsgenmapping
441 b2partsgenmapping[stepname] = func
441 b2partsgenmapping[stepname] = func
442 if idx is None:
442 if idx is None:
443 b2partsgenorder.append(stepname)
443 b2partsgenorder.append(stepname)
444 else:
444 else:
445 b2partsgenorder.insert(idx, stepname)
445 b2partsgenorder.insert(idx, stepname)
446 return func
446 return func
447 return dec
447 return dec
448
448
449 @b2partsgenerator('changeset')
449 @b2partsgenerator('changeset')
450 def _pushb2ctx(pushop, bundler):
450 def _pushb2ctx(pushop, bundler):
451 """handle changegroup push through bundle2
451 """handle changegroup push through bundle2
452
452
453 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
453 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
454 """
454 """
455 if 'changesets' in pushop.stepsdone:
455 if 'changesets' in pushop.stepsdone:
456 return
456 return
457 pushop.stepsdone.add('changesets')
457 pushop.stepsdone.add('changesets')
458 # Send known heads to the server for race detection.
458 # Send known heads to the server for race detection.
459 if not _pushcheckoutgoing(pushop):
459 if not _pushcheckoutgoing(pushop):
460 return
460 return
461 pushop.repo.prepushoutgoinghooks(pushop.repo,
461 pushop.repo.prepushoutgoinghooks(pushop.repo,
462 pushop.remote,
462 pushop.remote,
463 pushop.outgoing)
463 pushop.outgoing)
464 if not pushop.force:
464 if not pushop.force:
465 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
465 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
466 b2caps = bundle2.bundle2caps(pushop.remote)
466 b2caps = bundle2.bundle2caps(pushop.remote)
467 version = None
467 version = None
468 cgversions = b2caps.get('changegroup')
468 cgversions = b2caps.get('changegroup')
469 if not cgversions: # 3.1 and 3.2 ship with an empty value
469 if not cgversions: # 3.1 and 3.2 ship with an empty value
470 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
470 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
471 pushop.outgoing)
471 pushop.outgoing)
472 else:
472 else:
473 cgversions = [v for v in cgversions if v in changegroup.packermap]
473 cgversions = [v for v in cgversions if v in changegroup.packermap]
474 if not cgversions:
474 if not cgversions:
475 raise ValueError(_('no common changegroup version'))
475 raise ValueError(_('no common changegroup version'))
476 version = max(cgversions)
476 version = max(cgversions)
477 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
477 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
478 pushop.outgoing,
478 pushop.outgoing,
479 version=version)
479 version=version)
480 cgpart = bundler.newpart('changegroup', data=cg)
480 cgpart = bundler.newpart('changegroup', data=cg)
481 if version is not None:
481 if version is not None:
482 cgpart.addparam('version', version)
482 cgpart.addparam('version', version)
483 def handlereply(op):
483 def handlereply(op):
484 """extract addchangegroup returns from server reply"""
484 """extract addchangegroup returns from server reply"""
485 cgreplies = op.records.getreplies(cgpart.id)
485 cgreplies = op.records.getreplies(cgpart.id)
486 assert len(cgreplies['changegroup']) == 1
486 assert len(cgreplies['changegroup']) == 1
487 pushop.cgresult = cgreplies['changegroup'][0]['return']
487 pushop.cgresult = cgreplies['changegroup'][0]['return']
488 return handlereply
488 return handlereply
489
489
490 @b2partsgenerator('phase')
490 @b2partsgenerator('phase')
491 def _pushb2phases(pushop, bundler):
491 def _pushb2phases(pushop, bundler):
492 """handle phase push through bundle2"""
492 """handle phase push through bundle2"""
493 if 'phases' in pushop.stepsdone:
493 if 'phases' in pushop.stepsdone:
494 return
494 return
495 b2caps = bundle2.bundle2caps(pushop.remote)
495 b2caps = bundle2.bundle2caps(pushop.remote)
496 if not 'pushkey' in b2caps:
496 if not 'pushkey' in b2caps:
497 return
497 return
498 pushop.stepsdone.add('phases')
498 pushop.stepsdone.add('phases')
499 part2node = []
499 part2node = []
500 enc = pushkey.encode
500 enc = pushkey.encode
501 for newremotehead in pushop.outdatedphases:
501 for newremotehead in pushop.outdatedphases:
502 part = bundler.newpart('pushkey')
502 part = bundler.newpart('pushkey')
503 part.addparam('namespace', enc('phases'))
503 part.addparam('namespace', enc('phases'))
504 part.addparam('key', enc(newremotehead.hex()))
504 part.addparam('key', enc(newremotehead.hex()))
505 part.addparam('old', enc(str(phases.draft)))
505 part.addparam('old', enc(str(phases.draft)))
506 part.addparam('new', enc(str(phases.public)))
506 part.addparam('new', enc(str(phases.public)))
507 part2node.append((part.id, newremotehead))
507 part2node.append((part.id, newremotehead))
508 def handlereply(op):
508 def handlereply(op):
509 for partid, node in part2node:
509 for partid, node in part2node:
510 partrep = op.records.getreplies(partid)
510 partrep = op.records.getreplies(partid)
511 results = partrep['pushkey']
511 results = partrep['pushkey']
512 assert len(results) <= 1
512 assert len(results) <= 1
513 msg = None
513 msg = None
514 if not results:
514 if not results:
515 msg = _('server ignored update of %s to public!\n') % node
515 msg = _('server ignored update of %s to public!\n') % node
516 elif not int(results[0]['return']):
516 elif not int(results[0]['return']):
517 msg = _('updating %s to public failed!\n') % node
517 msg = _('updating %s to public failed!\n') % node
518 if msg is not None:
518 if msg is not None:
519 pushop.ui.warn(msg)
519 pushop.ui.warn(msg)
520 return handlereply
520 return handlereply
521
521
522 @b2partsgenerator('obsmarkers')
522 @b2partsgenerator('obsmarkers')
523 def _pushb2obsmarkers(pushop, bundler):
523 def _pushb2obsmarkers(pushop, bundler):
524 if 'obsmarkers' in pushop.stepsdone:
524 if 'obsmarkers' in pushop.stepsdone:
525 return
525 return
526 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
526 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
527 if obsolete.commonversion(remoteversions) is None:
527 if obsolete.commonversion(remoteversions) is None:
528 return
528 return
529 pushop.stepsdone.add('obsmarkers')
529 pushop.stepsdone.add('obsmarkers')
530 if pushop.outobsmarkers:
530 if pushop.outobsmarkers:
531 buildobsmarkerspart(bundler, pushop.outobsmarkers)
531 buildobsmarkerspart(bundler, pushop.outobsmarkers)
532
532
533 @b2partsgenerator('bookmarks')
533 @b2partsgenerator('bookmarks')
534 def _pushb2bookmarks(pushop, bundler):
534 def _pushb2bookmarks(pushop, bundler):
535 """handle phase push through bundle2"""
535 """handle phase push through bundle2"""
536 if 'bookmarks' in pushop.stepsdone:
536 if 'bookmarks' in pushop.stepsdone:
537 return
537 return
538 b2caps = bundle2.bundle2caps(pushop.remote)
538 b2caps = bundle2.bundle2caps(pushop.remote)
539 if 'pushkey' not in b2caps:
539 if 'pushkey' not in b2caps:
540 return
540 return
541 pushop.stepsdone.add('bookmarks')
541 pushop.stepsdone.add('bookmarks')
542 part2book = []
542 part2book = []
543 enc = pushkey.encode
543 enc = pushkey.encode
544 for book, old, new in pushop.outbookmarks:
544 for book, old, new in pushop.outbookmarks:
545 part = bundler.newpart('pushkey')
545 part = bundler.newpart('pushkey')
546 part.addparam('namespace', enc('bookmarks'))
546 part.addparam('namespace', enc('bookmarks'))
547 part.addparam('key', enc(book))
547 part.addparam('key', enc(book))
548 part.addparam('old', enc(old))
548 part.addparam('old', enc(old))
549 part.addparam('new', enc(new))
549 part.addparam('new', enc(new))
550 action = 'update'
550 action = 'update'
551 if not old:
551 if not old:
552 action = 'export'
552 action = 'export'
553 elif not new:
553 elif not new:
554 action = 'delete'
554 action = 'delete'
555 part2book.append((part.id, book, action))
555 part2book.append((part.id, book, action))
556
556
557
557
558 def handlereply(op):
558 def handlereply(op):
559 ui = pushop.ui
559 ui = pushop.ui
560 for partid, book, action in part2book:
560 for partid, book, action in part2book:
561 partrep = op.records.getreplies(partid)
561 partrep = op.records.getreplies(partid)
562 results = partrep['pushkey']
562 results = partrep['pushkey']
563 assert len(results) <= 1
563 assert len(results) <= 1
564 if not results:
564 if not results:
565 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
565 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
566 else:
566 else:
567 ret = int(results[0]['return'])
567 ret = int(results[0]['return'])
568 if ret:
568 if ret:
569 ui.status(bookmsgmap[action][0] % book)
569 ui.status(bookmsgmap[action][0] % book)
570 else:
570 else:
571 ui.warn(bookmsgmap[action][1] % book)
571 ui.warn(bookmsgmap[action][1] % book)
572 if pushop.bkresult is not None:
572 if pushop.bkresult is not None:
573 pushop.bkresult = 1
573 pushop.bkresult = 1
574 return handlereply
574 return handlereply
575
575
576
576
577 def _pushbundle2(pushop):
577 def _pushbundle2(pushop):
578 """push data to the remote using bundle2
578 """push data to the remote using bundle2
579
579
580 The only currently supported type of data is changegroup but this will
580 The only currently supported type of data is changegroup but this will
581 evolve in the future."""
581 evolve in the future."""
582 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
582 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
583 pushback = (pushop.trmanager
583 pushback = (pushop.trmanager
584 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
584 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
585
585
586 # create reply capability
586 # create reply capability
587 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
587 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
588 allowpushback=pushback))
588 allowpushback=pushback))
589 bundler.newpart('replycaps', data=capsblob)
589 bundler.newpart('replycaps', data=capsblob)
590 replyhandlers = []
590 replyhandlers = []
591 for partgenname in b2partsgenorder:
591 for partgenname in b2partsgenorder:
592 partgen = b2partsgenmapping[partgenname]
592 partgen = b2partsgenmapping[partgenname]
593 ret = partgen(pushop, bundler)
593 ret = partgen(pushop, bundler)
594 if callable(ret):
594 if callable(ret):
595 replyhandlers.append(ret)
595 replyhandlers.append(ret)
596 # do not push if nothing to push
596 # do not push if nothing to push
597 if bundler.nbparts <= 1:
597 if bundler.nbparts <= 1:
598 return
598 return
599 stream = util.chunkbuffer(bundler.getchunks())
599 stream = util.chunkbuffer(bundler.getchunks())
600 try:
600 try:
601 reply = pushop.remote.unbundle(stream, ['force'], 'push')
601 reply = pushop.remote.unbundle(stream, ['force'], 'push')
602 except error.BundleValueError, exc:
602 except error.BundleValueError, exc:
603 raise util.Abort('missing support for %s' % exc)
603 raise util.Abort('missing support for %s' % exc)
604 try:
604 try:
605 trgetter = None
605 trgetter = None
606 if pushback:
606 if pushback:
607 trgetter = pushop.trmanager.transaction
607 trgetter = pushop.trmanager.transaction
608 op = bundle2.processbundle(pushop.repo, reply, trgetter)
608 op = bundle2.processbundle(pushop.repo, reply, trgetter)
609 except error.BundleValueError, exc:
609 except error.BundleValueError, exc:
610 raise util.Abort('missing support for %s' % exc)
610 raise util.Abort('missing support for %s' % exc)
611 for rephand in replyhandlers:
611 for rephand in replyhandlers:
612 rephand(op)
612 rephand(op)
613
613
614 def _pushchangeset(pushop):
614 def _pushchangeset(pushop):
615 """Make the actual push of changeset bundle to remote repo"""
615 """Make the actual push of changeset bundle to remote repo"""
616 if 'changesets' in pushop.stepsdone:
616 if 'changesets' in pushop.stepsdone:
617 return
617 return
618 pushop.stepsdone.add('changesets')
618 pushop.stepsdone.add('changesets')
619 if not _pushcheckoutgoing(pushop):
619 if not _pushcheckoutgoing(pushop):
620 return
620 return
621 pushop.repo.prepushoutgoinghooks(pushop.repo,
621 pushop.repo.prepushoutgoinghooks(pushop.repo,
622 pushop.remote,
622 pushop.remote,
623 pushop.outgoing)
623 pushop.outgoing)
624 outgoing = pushop.outgoing
624 outgoing = pushop.outgoing
625 unbundle = pushop.remote.capable('unbundle')
625 unbundle = pushop.remote.capable('unbundle')
626 # TODO: get bundlecaps from remote
626 # TODO: get bundlecaps from remote
627 bundlecaps = None
627 bundlecaps = None
628 # create a changegroup from local
628 # create a changegroup from local
629 if pushop.revs is None and not (outgoing.excluded
629 if pushop.revs is None and not (outgoing.excluded
630 or pushop.repo.changelog.filteredrevs):
630 or pushop.repo.changelog.filteredrevs):
631 # push everything,
631 # push everything,
632 # use the fast path, no race possible on push
632 # use the fast path, no race possible on push
633 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
633 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
634 cg = changegroup.getsubset(pushop.repo,
634 cg = changegroup.getsubset(pushop.repo,
635 outgoing,
635 outgoing,
636 bundler,
636 bundler,
637 'push',
637 'push',
638 fastpath=True)
638 fastpath=True)
639 else:
639 else:
640 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
640 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
641 bundlecaps)
641 bundlecaps)
642
642
643 # apply changegroup to remote
643 # apply changegroup to remote
644 if unbundle:
644 if unbundle:
645 # local repo finds heads on server, finds out what
645 # local repo finds heads on server, finds out what
646 # revs it must push. once revs transferred, if server
646 # revs it must push. once revs transferred, if server
647 # finds it has different heads (someone else won
647 # finds it has different heads (someone else won
648 # commit/push race), server aborts.
648 # commit/push race), server aborts.
649 if pushop.force:
649 if pushop.force:
650 remoteheads = ['force']
650 remoteheads = ['force']
651 else:
651 else:
652 remoteheads = pushop.remoteheads
652 remoteheads = pushop.remoteheads
653 # ssh: return remote's addchangegroup()
653 # ssh: return remote's addchangegroup()
654 # http: return remote's addchangegroup() or 0 for error
654 # http: return remote's addchangegroup() or 0 for error
655 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
655 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
656 pushop.repo.url())
656 pushop.repo.url())
657 else:
657 else:
658 # we return an integer indicating remote head count
658 # we return an integer indicating remote head count
659 # change
659 # change
660 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
660 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
661 pushop.repo.url())
661 pushop.repo.url())
662
662
663 def _pushsyncphase(pushop):
663 def _pushsyncphase(pushop):
664 """synchronise phase information locally and remotely"""
664 """synchronise phase information locally and remotely"""
665 cheads = pushop.commonheads
665 cheads = pushop.commonheads
666 # even when we don't push, exchanging phase data is useful
666 # even when we don't push, exchanging phase data is useful
667 remotephases = pushop.remote.listkeys('phases')
667 remotephases = pushop.remote.listkeys('phases')
668 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
668 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
669 and remotephases # server supports phases
669 and remotephases # server supports phases
670 and pushop.cgresult is None # nothing was pushed
670 and pushop.cgresult is None # nothing was pushed
671 and remotephases.get('publishing', False)):
671 and remotephases.get('publishing', False)):
672 # When:
672 # When:
673 # - this is a subrepo push
673 # - this is a subrepo push
674 # - and remote support phase
674 # - and remote support phase
675 # - and no changeset was pushed
675 # - and no changeset was pushed
676 # - and remote is publishing
676 # - and remote is publishing
677 # We may be in issue 3871 case!
677 # We may be in issue 3871 case!
678 # We drop the possible phase synchronisation done by
678 # We drop the possible phase synchronisation done by
679 # courtesy to publish changesets possibly locally draft
679 # courtesy to publish changesets possibly locally draft
680 # on the remote.
680 # on the remote.
681 remotephases = {'publishing': 'True'}
681 remotephases = {'publishing': 'True'}
682 if not remotephases: # old server or public only reply from non-publishing
682 if not remotephases: # old server or public only reply from non-publishing
683 _localphasemove(pushop, cheads)
683 _localphasemove(pushop, cheads)
684 # don't push any phase data as there is nothing to push
684 # don't push any phase data as there is nothing to push
685 else:
685 else:
686 ana = phases.analyzeremotephases(pushop.repo, cheads,
686 ana = phases.analyzeremotephases(pushop.repo, cheads,
687 remotephases)
687 remotephases)
688 pheads, droots = ana
688 pheads, droots = ana
689 ### Apply remote phase on local
689 ### Apply remote phase on local
690 if remotephases.get('publishing', False):
690 if remotephases.get('publishing', False):
691 _localphasemove(pushop, cheads)
691 _localphasemove(pushop, cheads)
692 else: # publish = False
692 else: # publish = False
693 _localphasemove(pushop, pheads)
693 _localphasemove(pushop, pheads)
694 _localphasemove(pushop, cheads, phases.draft)
694 _localphasemove(pushop, cheads, phases.draft)
695 ### Apply local phase on remote
695 ### Apply local phase on remote
696
696
697 if pushop.cgresult:
697 if pushop.cgresult:
698 if 'phases' in pushop.stepsdone:
698 if 'phases' in pushop.stepsdone:
699 # phases already pushed though bundle2
699 # phases already pushed though bundle2
700 return
700 return
701 outdated = pushop.outdatedphases
701 outdated = pushop.outdatedphases
702 else:
702 else:
703 outdated = pushop.fallbackoutdatedphases
703 outdated = pushop.fallbackoutdatedphases
704
704
705 pushop.stepsdone.add('phases')
705 pushop.stepsdone.add('phases')
706
706
707 # filter heads already turned public by the push
707 # filter heads already turned public by the push
708 outdated = [c for c in outdated if c.node() not in pheads]
708 outdated = [c for c in outdated if c.node() not in pheads]
709 # fallback to independent pushkey command
709 # fallback to independent pushkey command
710 for newremotehead in outdated:
710 for newremotehead in outdated:
711 r = pushop.remote.pushkey('phases',
711 r = pushop.remote.pushkey('phases',
712 newremotehead.hex(),
712 newremotehead.hex(),
713 str(phases.draft),
713 str(phases.draft),
714 str(phases.public))
714 str(phases.public))
715 if not r:
715 if not r:
716 pushop.ui.warn(_('updating %s to public failed!\n')
716 pushop.ui.warn(_('updating %s to public failed!\n')
717 % newremotehead)
717 % newremotehead)
718
718
719 def _localphasemove(pushop, nodes, phase=phases.public):
719 def _localphasemove(pushop, nodes, phase=phases.public):
720 """move <nodes> to <phase> in the local source repo"""
720 """move <nodes> to <phase> in the local source repo"""
721 if pushop.trmanager:
721 if pushop.trmanager:
722 phases.advanceboundary(pushop.repo,
722 phases.advanceboundary(pushop.repo,
723 pushop.trmanager.transaction(),
723 pushop.trmanager.transaction(),
724 phase,
724 phase,
725 nodes)
725 nodes)
726 else:
726 else:
727 # repo is not locked, do not change any phases!
727 # repo is not locked, do not change any phases!
728 # Informs the user that phases should have been moved when
728 # Informs the user that phases should have been moved when
729 # applicable.
729 # applicable.
730 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
730 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
731 phasestr = phases.phasenames[phase]
731 phasestr = phases.phasenames[phase]
732 if actualmoves:
732 if actualmoves:
733 pushop.ui.status(_('cannot lock source repo, skipping '
733 pushop.ui.status(_('cannot lock source repo, skipping '
734 'local %s phase update\n') % phasestr)
734 'local %s phase update\n') % phasestr)
735
735
736 def _pushobsolete(pushop):
736 def _pushobsolete(pushop):
737 """utility function to push obsolete markers to a remote"""
737 """utility function to push obsolete markers to a remote"""
738 if 'obsmarkers' in pushop.stepsdone:
738 if 'obsmarkers' in pushop.stepsdone:
739 return
739 return
740 pushop.ui.debug('try to push obsolete markers to remote\n')
740 pushop.ui.debug('try to push obsolete markers to remote\n')
741 repo = pushop.repo
741 repo = pushop.repo
742 remote = pushop.remote
742 remote = pushop.remote
743 pushop.stepsdone.add('obsmarkers')
743 pushop.stepsdone.add('obsmarkers')
744 if pushop.outobsmarkers:
744 if pushop.outobsmarkers:
745 rslts = []
745 rslts = []
746 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
746 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
747 for key in sorted(remotedata, reverse=True):
747 for key in sorted(remotedata, reverse=True):
748 # reverse sort to ensure we end with dump0
748 # reverse sort to ensure we end with dump0
749 data = remotedata[key]
749 data = remotedata[key]
750 rslts.append(remote.pushkey('obsolete', key, '', data))
750 rslts.append(remote.pushkey('obsolete', key, '', data))
751 if [r for r in rslts if not r]:
751 if [r for r in rslts if not r]:
752 msg = _('failed to push some obsolete markers!\n')
752 msg = _('failed to push some obsolete markers!\n')
753 repo.ui.warn(msg)
753 repo.ui.warn(msg)
754
754
755 def _pushbookmark(pushop):
755 def _pushbookmark(pushop):
756 """Update bookmark position on remote"""
756 """Update bookmark position on remote"""
757 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
757 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
758 return
758 return
759 pushop.stepsdone.add('bookmarks')
759 pushop.stepsdone.add('bookmarks')
760 ui = pushop.ui
760 ui = pushop.ui
761 remote = pushop.remote
761 remote = pushop.remote
762
762
763 for b, old, new in pushop.outbookmarks:
763 for b, old, new in pushop.outbookmarks:
764 action = 'update'
764 action = 'update'
765 if not old:
765 if not old:
766 action = 'export'
766 action = 'export'
767 elif not new:
767 elif not new:
768 action = 'delete'
768 action = 'delete'
769 if remote.pushkey('bookmarks', b, old, new):
769 if remote.pushkey('bookmarks', b, old, new):
770 ui.status(bookmsgmap[action][0] % b)
770 ui.status(bookmsgmap[action][0] % b)
771 else:
771 else:
772 ui.warn(bookmsgmap[action][1] % b)
772 ui.warn(bookmsgmap[action][1] % b)
773 # discovery can have set the value form invalid entry
773 # discovery can have set the value form invalid entry
774 if pushop.bkresult is not None:
774 if pushop.bkresult is not None:
775 pushop.bkresult = 1
775 pushop.bkresult = 1
776
776
777 class pulloperation(object):
777 class pulloperation(object):
778 """A object that represent a single pull operation
778 """A object that represent a single pull operation
779
779
780 It purpose is to carry pull related state and very common operation.
780 It purpose is to carry pull related state and very common operation.
781
781
782 A new should be created at the beginning of each pull and discarded
782 A new should be created at the beginning of each pull and discarded
783 afterward.
783 afterward.
784 """
784 """
785
785
786 def __init__(self, repo, remote, heads=None, force=False, bookmarks=()):
786 def __init__(self, repo, remote, heads=None, force=False, bookmarks=()):
787 # repo we pull into
787 # repo we pull into
788 self.repo = repo
788 self.repo = repo
789 # repo we pull from
789 # repo we pull from
790 self.remote = remote
790 self.remote = remote
791 # revision we try to pull (None is "all")
791 # revision we try to pull (None is "all")
792 self.heads = heads
792 self.heads = heads
793 # bookmark pulled explicitly
793 # bookmark pulled explicitly
794 self.explicitbookmarks = bookmarks
794 self.explicitbookmarks = bookmarks
795 # do we force pull?
795 # do we force pull?
796 self.force = force
796 self.force = force
797 # transaction manager
797 # transaction manager
798 self.trmanager = None
798 self.trmanager = None
799 # set of common changeset between local and remote before pull
799 # set of common changeset between local and remote before pull
800 self.common = None
800 self.common = None
801 # set of pulled head
801 # set of pulled head
802 self.rheads = None
802 self.rheads = None
803 # list of missing changeset to fetch remotely
803 # list of missing changeset to fetch remotely
804 self.fetch = None
804 self.fetch = None
805 # remote bookmarks data
805 # remote bookmarks data
806 self.remotebookmarks = None
806 self.remotebookmarks = None
807 # result of changegroup pulling (used as return code by pull)
807 # result of changegroup pulling (used as return code by pull)
808 self.cgresult = None
808 self.cgresult = None
809 # list of step already done
809 # list of step already done
810 self.stepsdone = set()
810 self.stepsdone = set()
811
811
812 @util.propertycache
812 @util.propertycache
813 def pulledsubset(self):
813 def pulledsubset(self):
814 """heads of the set of changeset target by the pull"""
814 """heads of the set of changeset target by the pull"""
815 # compute target subset
815 # compute target subset
816 if self.heads is None:
816 if self.heads is None:
817 # We pulled every thing possible
817 # We pulled every thing possible
818 # sync on everything common
818 # sync on everything common
819 c = set(self.common)
819 c = set(self.common)
820 ret = list(self.common)
820 ret = list(self.common)
821 for n in self.rheads:
821 for n in self.rheads:
822 if n not in c:
822 if n not in c:
823 ret.append(n)
823 ret.append(n)
824 return ret
824 return ret
825 else:
825 else:
826 # We pulled a specific subset
826 # We pulled a specific subset
827 # sync on this subset
827 # sync on this subset
828 return self.heads
828 return self.heads
829
829
830 def gettransaction(self):
830 def gettransaction(self):
831 # deprecated; talk to trmanager directly
831 # deprecated; talk to trmanager directly
832 return self.trmanager.transaction()
832 return self.trmanager.transaction()
833
833
834 class transactionmanager(object):
834 class transactionmanager(object):
835 """An object to manage the life cycle of a transaction
835 """An object to manage the life cycle of a transaction
836
836
837 It creates the transaction on demand and calls the appropriate hooks when
837 It creates the transaction on demand and calls the appropriate hooks when
838 closing the transaction."""
838 closing the transaction."""
839 def __init__(self, repo, source, url):
839 def __init__(self, repo, source, url):
840 self.repo = repo
840 self.repo = repo
841 self.source = source
841 self.source = source
842 self.url = url
842 self.url = url
843 self._tr = None
843 self._tr = None
844
844
845 def transaction(self):
845 def transaction(self):
846 """Return an open transaction object, constructing if necessary"""
846 """Return an open transaction object, constructing if necessary"""
847 if not self._tr:
847 if not self._tr:
848 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
848 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
849 self._tr = self.repo.transaction(trname)
849 self._tr = self.repo.transaction(trname)
850 self._tr.hookargs['source'] = self.source
850 self._tr.hookargs['source'] = self.source
851 self._tr.hookargs['url'] = self.url
851 self._tr.hookargs['url'] = self.url
852 return self._tr
852 return self._tr
853
853
854 def close(self):
854 def close(self):
855 """close transaction if created"""
855 """close transaction if created"""
856 if self._tr is not None:
856 if self._tr is not None:
857 self._tr.close()
857 self._tr.close()
858
858
859 def release(self):
859 def release(self):
860 """release transaction if created"""
860 """release transaction if created"""
861 if self._tr is not None:
861 if self._tr is not None:
862 self._tr.release()
862 self._tr.release()
863
863
864 def pull(repo, remote, heads=None, force=False, bookmarks=()):
864 def pull(repo, remote, heads=None, force=False, bookmarks=()):
865 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks)
865 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks)
866 if pullop.remote.local():
866 if pullop.remote.local():
867 missing = set(pullop.remote.requirements) - pullop.repo.supported
867 missing = set(pullop.remote.requirements) - pullop.repo.supported
868 if missing:
868 if missing:
869 msg = _("required features are not"
869 msg = _("required features are not"
870 " supported in the destination:"
870 " supported in the destination:"
871 " %s") % (', '.join(sorted(missing)))
871 " %s") % (', '.join(sorted(missing)))
872 raise util.Abort(msg)
872 raise util.Abort(msg)
873
873
874 pullop.remotebookmarks = remote.listkeys('bookmarks')
874 pullop.remotebookmarks = remote.listkeys('bookmarks')
875 lock = pullop.repo.lock()
875 lock = pullop.repo.lock()
876 try:
876 try:
877 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
877 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
878 _pulldiscovery(pullop)
878 _pulldiscovery(pullop)
879 if _canusebundle2(pullop):
879 if _canusebundle2(pullop):
880 _pullbundle2(pullop)
880 _pullbundle2(pullop)
881 _pullchangeset(pullop)
881 _pullchangeset(pullop)
882 _pullphase(pullop)
882 _pullphase(pullop)
883 _pullbookmarks(pullop)
883 _pullbookmarks(pullop)
884 _pullobsolete(pullop)
884 _pullobsolete(pullop)
885 pullop.trmanager.close()
885 pullop.trmanager.close()
886 finally:
886 finally:
887 pullop.trmanager.release()
887 pullop.trmanager.release()
888 lock.release()
888 lock.release()
889
889
890 return pullop
890 return pullop
891
891
892 # list of steps to perform discovery before pull
892 # list of steps to perform discovery before pull
893 pulldiscoveryorder = []
893 pulldiscoveryorder = []
894
894
895 # Mapping between step name and function
895 # Mapping between step name and function
896 #
896 #
897 # This exists to help extensions wrap steps if necessary
897 # This exists to help extensions wrap steps if necessary
898 pulldiscoverymapping = {}
898 pulldiscoverymapping = {}
899
899
900 def pulldiscovery(stepname):
900 def pulldiscovery(stepname):
901 """decorator for function performing discovery before pull
901 """decorator for function performing discovery before pull
902
902
903 The function is added to the step -> function mapping and appended to the
903 The function is added to the step -> function mapping and appended to the
904 list of steps. Beware that decorated function will be added in order (this
904 list of steps. Beware that decorated function will be added in order (this
905 may matter).
905 may matter).
906
906
907 You can only use this decorator for a new step, if you want to wrap a step
907 You can only use this decorator for a new step, if you want to wrap a step
908 from an extension, change the pulldiscovery dictionary directly."""
908 from an extension, change the pulldiscovery dictionary directly."""
909 def dec(func):
909 def dec(func):
910 assert stepname not in pulldiscoverymapping
910 assert stepname not in pulldiscoverymapping
911 pulldiscoverymapping[stepname] = func
911 pulldiscoverymapping[stepname] = func
912 pulldiscoveryorder.append(stepname)
912 pulldiscoveryorder.append(stepname)
913 return func
913 return func
914 return dec
914 return dec
915
915
916 def _pulldiscovery(pullop):
916 def _pulldiscovery(pullop):
917 """Run all discovery steps"""
917 """Run all discovery steps"""
918 for stepname in pulldiscoveryorder:
918 for stepname in pulldiscoveryorder:
919 step = pulldiscoverymapping[stepname]
919 step = pulldiscoverymapping[stepname]
920 step(pullop)
920 step(pullop)
921
921
922 @pulldiscovery('changegroup')
922 @pulldiscovery('changegroup')
923 def _pulldiscoverychangegroup(pullop):
923 def _pulldiscoverychangegroup(pullop):
924 """discovery phase for the pull
924 """discovery phase for the pull
925
925
926 Current handle changeset discovery only, will change handle all discovery
926 Current handle changeset discovery only, will change handle all discovery
927 at some point."""
927 at some point."""
928 tmp = discovery.findcommonincoming(pullop.repo,
928 tmp = discovery.findcommonincoming(pullop.repo,
929 pullop.remote,
929 pullop.remote,
930 heads=pullop.heads,
930 heads=pullop.heads,
931 force=pullop.force)
931 force=pullop.force)
932 common, fetch, rheads = tmp
932 common, fetch, rheads = tmp
933 nm = pullop.repo.unfiltered().changelog.nodemap
933 nm = pullop.repo.unfiltered().changelog.nodemap
934 if fetch and rheads:
934 if fetch and rheads:
935 # If a remote heads in filtered locally, lets drop it from the unknown
935 # If a remote heads in filtered locally, lets drop it from the unknown
936 # remote heads and put in back in common.
936 # remote heads and put in back in common.
937 #
937 #
938 # This is a hackish solution to catch most of "common but locally
938 # This is a hackish solution to catch most of "common but locally
939 # hidden situation". We do not performs discovery on unfiltered
939 # hidden situation". We do not performs discovery on unfiltered
940 # repository because it end up doing a pathological amount of round
940 # repository because it end up doing a pathological amount of round
941 # trip for w huge amount of changeset we do not care about.
941 # trip for w huge amount of changeset we do not care about.
942 #
942 #
943 # If a set of such "common but filtered" changeset exist on the server
943 # If a set of such "common but filtered" changeset exist on the server
944 # but are not including a remote heads, we'll not be able to detect it,
944 # but are not including a remote heads, we'll not be able to detect it,
945 scommon = set(common)
945 scommon = set(common)
946 filteredrheads = []
946 filteredrheads = []
947 for n in rheads:
947 for n in rheads:
948 if n in nm:
948 if n in nm:
949 if n not in scommon:
949 if n not in scommon:
950 common.append(n)
950 common.append(n)
951 else:
951 else:
952 filteredrheads.append(n)
952 filteredrheads.append(n)
953 if not filteredrheads:
953 if not filteredrheads:
954 fetch = []
954 fetch = []
955 rheads = filteredrheads
955 rheads = filteredrheads
956 pullop.common = common
956 pullop.common = common
957 pullop.fetch = fetch
957 pullop.fetch = fetch
958 pullop.rheads = rheads
958 pullop.rheads = rheads
959
959
960 def _pullbundle2(pullop):
960 def _pullbundle2(pullop):
961 """pull data using bundle2
961 """pull data using bundle2
962
962
963 For now, the only supported data are changegroup."""
963 For now, the only supported data are changegroup."""
964 remotecaps = bundle2.bundle2caps(pullop.remote)
964 remotecaps = bundle2.bundle2caps(pullop.remote)
965 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
965 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
966 # pulling changegroup
966 # pulling changegroup
967 pullop.stepsdone.add('changegroup')
967 pullop.stepsdone.add('changegroup')
968
968
969 kwargs['common'] = pullop.common
969 kwargs['common'] = pullop.common
970 kwargs['heads'] = pullop.heads or pullop.rheads
970 kwargs['heads'] = pullop.heads or pullop.rheads
971 kwargs['cg'] = pullop.fetch
971 kwargs['cg'] = pullop.fetch
972 if 'listkeys' in remotecaps:
972 if 'listkeys' in remotecaps:
973 kwargs['listkeys'] = ['phase', 'bookmarks']
973 kwargs['listkeys'] = ['phase', 'bookmarks']
974 if not pullop.fetch:
974 if not pullop.fetch:
975 pullop.repo.ui.status(_("no changes found\n"))
975 pullop.repo.ui.status(_("no changes found\n"))
976 pullop.cgresult = 0
976 pullop.cgresult = 0
977 else:
977 else:
978 if pullop.heads is None and list(pullop.common) == [nullid]:
978 if pullop.heads is None and list(pullop.common) == [nullid]:
979 pullop.repo.ui.status(_("requesting all changes\n"))
979 pullop.repo.ui.status(_("requesting all changes\n"))
980 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
980 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
981 remoteversions = bundle2.obsmarkersversion(remotecaps)
981 remoteversions = bundle2.obsmarkersversion(remotecaps)
982 if obsolete.commonversion(remoteversions) is not None:
982 if obsolete.commonversion(remoteversions) is not None:
983 kwargs['obsmarkers'] = True
983 kwargs['obsmarkers'] = True
984 pullop.stepsdone.add('obsmarkers')
984 pullop.stepsdone.add('obsmarkers')
985 _pullbundle2extraprepare(pullop, kwargs)
985 _pullbundle2extraprepare(pullop, kwargs)
986 bundle = pullop.remote.getbundle('pull', **kwargs)
986 bundle = pullop.remote.getbundle('pull', **kwargs)
987 try:
987 try:
988 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
988 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
989 except error.BundleValueError, exc:
989 except error.BundleValueError, exc:
990 raise util.Abort('missing support for %s' % exc)
990 raise util.Abort('missing support for %s' % exc)
991
991
992 if pullop.fetch:
992 if pullop.fetch:
993 results = [cg['return'] for cg in op.records['changegroup']]
993 results = [cg['return'] for cg in op.records['changegroup']]
994 pullop.cgresult = changegroup.combineresults(results)
994 pullop.cgresult = changegroup.combineresults(results)
995
995
996 # processing phases change
996 # processing phases change
997 for namespace, value in op.records['listkeys']:
997 for namespace, value in op.records['listkeys']:
998 if namespace == 'phases':
998 if namespace == 'phases':
999 _pullapplyphases(pullop, value)
999 _pullapplyphases(pullop, value)
1000
1000
1001 # processing bookmark update
1001 # processing bookmark update
1002 for namespace, value in op.records['listkeys']:
1002 for namespace, value in op.records['listkeys']:
1003 if namespace == 'bookmarks':
1003 if namespace == 'bookmarks':
1004 pullop.remotebookmarks = value
1004 pullop.remotebookmarks = value
1005 _pullbookmarks(pullop)
1005 _pullbookmarks(pullop)
1006
1006
1007 def _pullbundle2extraprepare(pullop, kwargs):
1007 def _pullbundle2extraprepare(pullop, kwargs):
1008 """hook function so that extensions can extend the getbundle call"""
1008 """hook function so that extensions can extend the getbundle call"""
1009 pass
1009 pass
1010
1010
1011 def _pullchangeset(pullop):
1011 def _pullchangeset(pullop):
1012 """pull changeset from unbundle into the local repo"""
1012 """pull changeset from unbundle into the local repo"""
1013 # We delay the open of the transaction as late as possible so we
1013 # We delay the open of the transaction as late as possible so we
1014 # don't open transaction for nothing or you break future useful
1014 # don't open transaction for nothing or you break future useful
1015 # rollback call
1015 # rollback call
1016 if 'changegroup' in pullop.stepsdone:
1016 if 'changegroup' in pullop.stepsdone:
1017 return
1017 return
1018 pullop.stepsdone.add('changegroup')
1018 pullop.stepsdone.add('changegroup')
1019 if not pullop.fetch:
1019 if not pullop.fetch:
1020 pullop.repo.ui.status(_("no changes found\n"))
1020 pullop.repo.ui.status(_("no changes found\n"))
1021 pullop.cgresult = 0
1021 pullop.cgresult = 0
1022 return
1022 return
1023 pullop.gettransaction()
1023 pullop.gettransaction()
1024 if pullop.heads is None and list(pullop.common) == [nullid]:
1024 if pullop.heads is None and list(pullop.common) == [nullid]:
1025 pullop.repo.ui.status(_("requesting all changes\n"))
1025 pullop.repo.ui.status(_("requesting all changes\n"))
1026 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1026 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1027 # issue1320, avoid a race if remote changed after discovery
1027 # issue1320, avoid a race if remote changed after discovery
1028 pullop.heads = pullop.rheads
1028 pullop.heads = pullop.rheads
1029
1029
1030 if pullop.remote.capable('getbundle'):
1030 if pullop.remote.capable('getbundle'):
1031 # TODO: get bundlecaps from remote
1031 # TODO: get bundlecaps from remote
1032 cg = pullop.remote.getbundle('pull', common=pullop.common,
1032 cg = pullop.remote.getbundle('pull', common=pullop.common,
1033 heads=pullop.heads or pullop.rheads)
1033 heads=pullop.heads or pullop.rheads)
1034 elif pullop.heads is None:
1034 elif pullop.heads is None:
1035 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1035 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1036 elif not pullop.remote.capable('changegroupsubset'):
1036 elif not pullop.remote.capable('changegroupsubset'):
1037 raise util.Abort(_("partial pull cannot be done because "
1037 raise util.Abort(_("partial pull cannot be done because "
1038 "other repository doesn't support "
1038 "other repository doesn't support "
1039 "changegroupsubset."))
1039 "changegroupsubset."))
1040 else:
1040 else:
1041 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1041 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1042 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1042 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1043 pullop.remote.url())
1043 pullop.remote.url())
1044
1044
1045 def _pullphase(pullop):
1045 def _pullphase(pullop):
1046 # Get remote phases data from remote
1046 # Get remote phases data from remote
1047 if 'phases' in pullop.stepsdone:
1047 if 'phases' in pullop.stepsdone:
1048 return
1048 return
1049 remotephases = pullop.remote.listkeys('phases')
1049 remotephases = pullop.remote.listkeys('phases')
1050 _pullapplyphases(pullop, remotephases)
1050 _pullapplyphases(pullop, remotephases)
1051
1051
1052 def _pullapplyphases(pullop, remotephases):
1052 def _pullapplyphases(pullop, remotephases):
1053 """apply phase movement from observed remote state"""
1053 """apply phase movement from observed remote state"""
1054 if 'phases' in pullop.stepsdone:
1054 if 'phases' in pullop.stepsdone:
1055 return
1055 return
1056 pullop.stepsdone.add('phases')
1056 pullop.stepsdone.add('phases')
1057 publishing = bool(remotephases.get('publishing', False))
1057 publishing = bool(remotephases.get('publishing', False))
1058 if remotephases and not publishing:
1058 if remotephases and not publishing:
1059 # remote is new and unpublishing
1059 # remote is new and unpublishing
1060 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1060 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1061 pullop.pulledsubset,
1061 pullop.pulledsubset,
1062 remotephases)
1062 remotephases)
1063 dheads = pullop.pulledsubset
1063 dheads = pullop.pulledsubset
1064 else:
1064 else:
1065 # Remote is old or publishing all common changesets
1065 # Remote is old or publishing all common changesets
1066 # should be seen as public
1066 # should be seen as public
1067 pheads = pullop.pulledsubset
1067 pheads = pullop.pulledsubset
1068 dheads = []
1068 dheads = []
1069 unfi = pullop.repo.unfiltered()
1069 unfi = pullop.repo.unfiltered()
1070 phase = unfi._phasecache.phase
1070 phase = unfi._phasecache.phase
1071 rev = unfi.changelog.nodemap.get
1071 rev = unfi.changelog.nodemap.get
1072 public = phases.public
1072 public = phases.public
1073 draft = phases.draft
1073 draft = phases.draft
1074
1074
1075 # exclude changesets already public locally and update the others
1075 # exclude changesets already public locally and update the others
1076 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1076 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1077 if pheads:
1077 if pheads:
1078 tr = pullop.gettransaction()
1078 tr = pullop.gettransaction()
1079 phases.advanceboundary(pullop.repo, tr, public, pheads)
1079 phases.advanceboundary(pullop.repo, tr, public, pheads)
1080
1080
1081 # exclude changesets already draft locally and update the others
1081 # exclude changesets already draft locally and update the others
1082 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1082 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1083 if dheads:
1083 if dheads:
1084 tr = pullop.gettransaction()
1084 tr = pullop.gettransaction()
1085 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1085 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1086
1086
1087 def _pullbookmarks(pullop):
1087 def _pullbookmarks(pullop):
1088 """process the remote bookmark information to update the local one"""
1088 """process the remote bookmark information to update the local one"""
1089 if 'bookmarks' in pullop.stepsdone:
1089 if 'bookmarks' in pullop.stepsdone:
1090 return
1090 return
1091 pullop.stepsdone.add('bookmarks')
1091 pullop.stepsdone.add('bookmarks')
1092 repo = pullop.repo
1092 repo = pullop.repo
1093 remotebookmarks = pullop.remotebookmarks
1093 remotebookmarks = pullop.remotebookmarks
1094 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1094 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1095 pullop.remote.url(),
1095 pullop.remote.url(),
1096 pullop.gettransaction,
1096 pullop.gettransaction,
1097 explicit=pullop.explicitbookmarks)
1097 explicit=pullop.explicitbookmarks)
1098
1098
1099 def _pullobsolete(pullop):
1099 def _pullobsolete(pullop):
1100 """utility function to pull obsolete markers from a remote
1100 """utility function to pull obsolete markers from a remote
1101
1101
1102 The `gettransaction` is function that return the pull transaction, creating
1102 The `gettransaction` is function that return the pull transaction, creating
1103 one if necessary. We return the transaction to inform the calling code that
1103 one if necessary. We return the transaction to inform the calling code that
1104 a new transaction have been created (when applicable).
1104 a new transaction have been created (when applicable).
1105
1105
1106 Exists mostly to allow overriding for experimentation purpose"""
1106 Exists mostly to allow overriding for experimentation purpose"""
1107 if 'obsmarkers' in pullop.stepsdone:
1107 if 'obsmarkers' in pullop.stepsdone:
1108 return
1108 return
1109 pullop.stepsdone.add('obsmarkers')
1109 pullop.stepsdone.add('obsmarkers')
1110 tr = None
1110 tr = None
1111 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1111 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1112 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1112 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1113 remoteobs = pullop.remote.listkeys('obsolete')
1113 remoteobs = pullop.remote.listkeys('obsolete')
1114 if 'dump0' in remoteobs:
1114 if 'dump0' in remoteobs:
1115 tr = pullop.gettransaction()
1115 tr = pullop.gettransaction()
1116 for key in sorted(remoteobs, reverse=True):
1116 for key in sorted(remoteobs, reverse=True):
1117 if key.startswith('dump'):
1117 if key.startswith('dump'):
1118 data = base85.b85decode(remoteobs[key])
1118 data = base85.b85decode(remoteobs[key])
1119 pullop.repo.obsstore.mergemarkers(tr, data)
1119 pullop.repo.obsstore.mergemarkers(tr, data)
1120 pullop.repo.invalidatevolatilesets()
1120 pullop.repo.invalidatevolatilesets()
1121 return tr
1121 return tr
1122
1122
1123 def caps20to10(repo):
1123 def caps20to10(repo):
1124 """return a set with appropriate options to use bundle20 during getbundle"""
1124 """return a set with appropriate options to use bundle20 during getbundle"""
1125 caps = set(['HG20'])
1125 caps = set(['HG20'])
1126 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1126 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1127 caps.add('bundle2=' + urllib.quote(capsblob))
1127 caps.add('bundle2=' + urllib.quote(capsblob))
1128 return caps
1128 return caps
1129
1129
1130 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1130 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1131 getbundle2partsorder = []
1131 getbundle2partsorder = []
1132
1132
1133 # Mapping between step name and function
1133 # Mapping between step name and function
1134 #
1134 #
1135 # This exists to help extensions wrap steps if necessary
1135 # This exists to help extensions wrap steps if necessary
1136 getbundle2partsmapping = {}
1136 getbundle2partsmapping = {}
1137
1137
1138 def getbundle2partsgenerator(stepname):
1138 def getbundle2partsgenerator(stepname, idx=None):
1139 """decorator for function generating bundle2 part for getbundle
1139 """decorator for function generating bundle2 part for getbundle
1140
1140
1141 The function is added to the step -> function mapping and appended to the
1141 The function is added to the step -> function mapping and appended to the
1142 list of steps. Beware that decorated functions will be added in order
1142 list of steps. Beware that decorated functions will be added in order
1143 (this may matter).
1143 (this may matter).
1144
1144
1145 You can only use this decorator for new steps, if you want to wrap a step
1145 You can only use this decorator for new steps, if you want to wrap a step
1146 from an extension, attack the getbundle2partsmapping dictionary directly."""
1146 from an extension, attack the getbundle2partsmapping dictionary directly."""
1147 def dec(func):
1147 def dec(func):
1148 assert stepname not in getbundle2partsmapping
1148 assert stepname not in getbundle2partsmapping
1149 getbundle2partsmapping[stepname] = func
1149 getbundle2partsmapping[stepname] = func
1150 getbundle2partsorder.append(stepname)
1150 if idx is None:
1151 getbundle2partsorder.append(stepname)
1152 else:
1153 getbundle2partsorder.insert(idx, stepname)
1151 return func
1154 return func
1152 return dec
1155 return dec
1153
1156
1154 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1157 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1155 **kwargs):
1158 **kwargs):
1156 """return a full bundle (with potentially multiple kind of parts)
1159 """return a full bundle (with potentially multiple kind of parts)
1157
1160
1158 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1161 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1159 passed. For now, the bundle can contain only changegroup, but this will
1162 passed. For now, the bundle can contain only changegroup, but this will
1160 changes when more part type will be available for bundle2.
1163 changes when more part type will be available for bundle2.
1161
1164
1162 This is different from changegroup.getchangegroup that only returns an HG10
1165 This is different from changegroup.getchangegroup that only returns an HG10
1163 changegroup bundle. They may eventually get reunited in the future when we
1166 changegroup bundle. They may eventually get reunited in the future when we
1164 have a clearer idea of the API we what to query different data.
1167 have a clearer idea of the API we what to query different data.
1165
1168
1166 The implementation is at a very early stage and will get massive rework
1169 The implementation is at a very early stage and will get massive rework
1167 when the API of bundle is refined.
1170 when the API of bundle is refined.
1168 """
1171 """
1169 # bundle10 case
1172 # bundle10 case
1170 usebundle2 = False
1173 usebundle2 = False
1171 if bundlecaps is not None:
1174 if bundlecaps is not None:
1172 usebundle2 = util.any((cap.startswith('HG2') for cap in bundlecaps))
1175 usebundle2 = util.any((cap.startswith('HG2') for cap in bundlecaps))
1173 if not usebundle2:
1176 if not usebundle2:
1174 if bundlecaps and not kwargs.get('cg', True):
1177 if bundlecaps and not kwargs.get('cg', True):
1175 raise ValueError(_('request for bundle10 must include changegroup'))
1178 raise ValueError(_('request for bundle10 must include changegroup'))
1176
1179
1177 if kwargs:
1180 if kwargs:
1178 raise ValueError(_('unsupported getbundle arguments: %s')
1181 raise ValueError(_('unsupported getbundle arguments: %s')
1179 % ', '.join(sorted(kwargs.keys())))
1182 % ', '.join(sorted(kwargs.keys())))
1180 return changegroup.getchangegroup(repo, source, heads=heads,
1183 return changegroup.getchangegroup(repo, source, heads=heads,
1181 common=common, bundlecaps=bundlecaps)
1184 common=common, bundlecaps=bundlecaps)
1182
1185
1183 # bundle20 case
1186 # bundle20 case
1184 b2caps = {}
1187 b2caps = {}
1185 for bcaps in bundlecaps:
1188 for bcaps in bundlecaps:
1186 if bcaps.startswith('bundle2='):
1189 if bcaps.startswith('bundle2='):
1187 blob = urllib.unquote(bcaps[len('bundle2='):])
1190 blob = urllib.unquote(bcaps[len('bundle2='):])
1188 b2caps.update(bundle2.decodecaps(blob))
1191 b2caps.update(bundle2.decodecaps(blob))
1189 bundler = bundle2.bundle20(repo.ui, b2caps)
1192 bundler = bundle2.bundle20(repo.ui, b2caps)
1190
1193
1191 kwargs['heads'] = heads
1194 kwargs['heads'] = heads
1192 kwargs['common'] = common
1195 kwargs['common'] = common
1193
1196
1194 for name in getbundle2partsorder:
1197 for name in getbundle2partsorder:
1195 func = getbundle2partsmapping[name]
1198 func = getbundle2partsmapping[name]
1196 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1199 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1197 **kwargs)
1200 **kwargs)
1198
1201
1199 return util.chunkbuffer(bundler.getchunks())
1202 return util.chunkbuffer(bundler.getchunks())
1200
1203
1201 @getbundle2partsgenerator('changegroup')
1204 @getbundle2partsgenerator('changegroup')
1202 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1205 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1203 b2caps=None, heads=None, common=None, **kwargs):
1206 b2caps=None, heads=None, common=None, **kwargs):
1204 """add a changegroup part to the requested bundle"""
1207 """add a changegroup part to the requested bundle"""
1205 cg = None
1208 cg = None
1206 if kwargs.get('cg', True):
1209 if kwargs.get('cg', True):
1207 # build changegroup bundle here.
1210 # build changegroup bundle here.
1208 version = None
1211 version = None
1209 cgversions = b2caps.get('changegroup')
1212 cgversions = b2caps.get('changegroup')
1210 if not cgversions: # 3.1 and 3.2 ship with an empty value
1213 if not cgversions: # 3.1 and 3.2 ship with an empty value
1211 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1214 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1212 common=common,
1215 common=common,
1213 bundlecaps=bundlecaps)
1216 bundlecaps=bundlecaps)
1214 else:
1217 else:
1215 cgversions = [v for v in cgversions if v in changegroup.packermap]
1218 cgversions = [v for v in cgversions if v in changegroup.packermap]
1216 if not cgversions:
1219 if not cgversions:
1217 raise ValueError(_('no common changegroup version'))
1220 raise ValueError(_('no common changegroup version'))
1218 version = max(cgversions)
1221 version = max(cgversions)
1219 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1222 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1220 common=common,
1223 common=common,
1221 bundlecaps=bundlecaps,
1224 bundlecaps=bundlecaps,
1222 version=version)
1225 version=version)
1223
1226
1224 if cg:
1227 if cg:
1225 part = bundler.newpart('changegroup', data=cg)
1228 part = bundler.newpart('changegroup', data=cg)
1226 if version is not None:
1229 if version is not None:
1227 part.addparam('version', version)
1230 part.addparam('version', version)
1228
1231
1229 @getbundle2partsgenerator('listkeys')
1232 @getbundle2partsgenerator('listkeys')
1230 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1233 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1231 b2caps=None, **kwargs):
1234 b2caps=None, **kwargs):
1232 """add parts containing listkeys namespaces to the requested bundle"""
1235 """add parts containing listkeys namespaces to the requested bundle"""
1233 listkeys = kwargs.get('listkeys', ())
1236 listkeys = kwargs.get('listkeys', ())
1234 for namespace in listkeys:
1237 for namespace in listkeys:
1235 part = bundler.newpart('listkeys')
1238 part = bundler.newpart('listkeys')
1236 part.addparam('namespace', namespace)
1239 part.addparam('namespace', namespace)
1237 keys = repo.listkeys(namespace).items()
1240 keys = repo.listkeys(namespace).items()
1238 part.data = pushkey.encodekeys(keys)
1241 part.data = pushkey.encodekeys(keys)
1239
1242
1240 @getbundle2partsgenerator('obsmarkers')
1243 @getbundle2partsgenerator('obsmarkers')
1241 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1244 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1242 b2caps=None, heads=None, **kwargs):
1245 b2caps=None, heads=None, **kwargs):
1243 """add an obsolescence markers part to the requested bundle"""
1246 """add an obsolescence markers part to the requested bundle"""
1244 if kwargs.get('obsmarkers', False):
1247 if kwargs.get('obsmarkers', False):
1245 if heads is None:
1248 if heads is None:
1246 heads = repo.heads()
1249 heads = repo.heads()
1247 subset = [c.node() for c in repo.set('::%ln', heads)]
1250 subset = [c.node() for c in repo.set('::%ln', heads)]
1248 markers = repo.obsstore.relevantmarkers(subset)
1251 markers = repo.obsstore.relevantmarkers(subset)
1249 buildobsmarkerspart(bundler, markers)
1252 buildobsmarkerspart(bundler, markers)
1250
1253
1251 def check_heads(repo, their_heads, context):
1254 def check_heads(repo, their_heads, context):
1252 """check if the heads of a repo have been modified
1255 """check if the heads of a repo have been modified
1253
1256
1254 Used by peer for unbundling.
1257 Used by peer for unbundling.
1255 """
1258 """
1256 heads = repo.heads()
1259 heads = repo.heads()
1257 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1260 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1258 if not (their_heads == ['force'] or their_heads == heads or
1261 if not (their_heads == ['force'] or their_heads == heads or
1259 their_heads == ['hashed', heads_hash]):
1262 their_heads == ['hashed', heads_hash]):
1260 # someone else committed/pushed/unbundled while we
1263 # someone else committed/pushed/unbundled while we
1261 # were transferring data
1264 # were transferring data
1262 raise error.PushRaced('repository changed while %s - '
1265 raise error.PushRaced('repository changed while %s - '
1263 'please try again' % context)
1266 'please try again' % context)
1264
1267
1265 def unbundle(repo, cg, heads, source, url):
1268 def unbundle(repo, cg, heads, source, url):
1266 """Apply a bundle to a repo.
1269 """Apply a bundle to a repo.
1267
1270
1268 this function makes sure the repo is locked during the application and have
1271 this function makes sure the repo is locked during the application and have
1269 mechanism to check that no push race occurred between the creation of the
1272 mechanism to check that no push race occurred between the creation of the
1270 bundle and its application.
1273 bundle and its application.
1271
1274
1272 If the push was raced as PushRaced exception is raised."""
1275 If the push was raced as PushRaced exception is raised."""
1273 r = 0
1276 r = 0
1274 # need a transaction when processing a bundle2 stream
1277 # need a transaction when processing a bundle2 stream
1275 tr = None
1278 tr = None
1276 lock = repo.lock()
1279 lock = repo.lock()
1277 try:
1280 try:
1278 check_heads(repo, heads, 'uploading changes')
1281 check_heads(repo, heads, 'uploading changes')
1279 # push can proceed
1282 # push can proceed
1280 if util.safehasattr(cg, 'params'):
1283 if util.safehasattr(cg, 'params'):
1281 try:
1284 try:
1282 tr = repo.transaction('unbundle')
1285 tr = repo.transaction('unbundle')
1283 tr.hookargs['source'] = source
1286 tr.hookargs['source'] = source
1284 tr.hookargs['url'] = url
1287 tr.hookargs['url'] = url
1285 tr.hookargs['bundle2'] = '1'
1288 tr.hookargs['bundle2'] = '1'
1286 r = bundle2.processbundle(repo, cg, lambda: tr).reply
1289 r = bundle2.processbundle(repo, cg, lambda: tr).reply
1287 tr.close()
1290 tr.close()
1288 except Exception, exc:
1291 except Exception, exc:
1289 exc.duringunbundle2 = True
1292 exc.duringunbundle2 = True
1290 raise
1293 raise
1291 else:
1294 else:
1292 r = changegroup.addchangegroup(repo, cg, source, url)
1295 r = changegroup.addchangegroup(repo, cg, source, url)
1293 finally:
1296 finally:
1294 if tr is not None:
1297 if tr is not None:
1295 tr.release()
1298 tr.release()
1296 lock.release()
1299 lock.release()
1297 return r
1300 return r
General Comments 0
You need to be logged in to leave comments. Login now