##// END OF EJS Templates
push: acquire local 'wlock' if "pushback" is expected (BC) (issue4596)...
Pierre-Yves David -
r24754:5dc5cd7a default
parent child Browse files
Show More
@@ -1,1301 +1,1308 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno, urllib
10 import errno, urllib
11 import util, scmutil, changegroup, base85, error
11 import util, scmutil, changegroup, base85, error
12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 import lock as lockmod
13 import lock as lockmod
14
14
15 def readbundle(ui, fh, fname, vfs=None):
15 def readbundle(ui, fh, fname, vfs=None):
16 header = changegroup.readexactly(fh, 4)
16 header = changegroup.readexactly(fh, 4)
17
17
18 alg = None
18 alg = None
19 if not fname:
19 if not fname:
20 fname = "stream"
20 fname = "stream"
21 if not header.startswith('HG') and header.startswith('\0'):
21 if not header.startswith('HG') and header.startswith('\0'):
22 fh = changegroup.headerlessfixup(fh, header)
22 fh = changegroup.headerlessfixup(fh, header)
23 header = "HG10"
23 header = "HG10"
24 alg = 'UN'
24 alg = 'UN'
25 elif vfs:
25 elif vfs:
26 fname = vfs.join(fname)
26 fname = vfs.join(fname)
27
27
28 magic, version = header[0:2], header[2:4]
28 magic, version = header[0:2], header[2:4]
29
29
30 if magic != 'HG':
30 if magic != 'HG':
31 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
32 if version == '10':
32 if version == '10':
33 if alg is None:
33 if alg is None:
34 alg = changegroup.readexactly(fh, 2)
34 alg = changegroup.readexactly(fh, 2)
35 return changegroup.cg1unpacker(fh, alg)
35 return changegroup.cg1unpacker(fh, alg)
36 elif version.startswith('2'):
36 elif version.startswith('2'):
37 return bundle2.getunbundler(ui, fh, header=magic + version)
37 return bundle2.getunbundler(ui, fh, header=magic + version)
38 else:
38 else:
39 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
40
40
41 def buildobsmarkerspart(bundler, markers):
41 def buildobsmarkerspart(bundler, markers):
42 """add an obsmarker part to the bundler with <markers>
42 """add an obsmarker part to the bundler with <markers>
43
43
44 No part is created if markers is empty.
44 No part is created if markers is empty.
45 Raises ValueError if the bundler doesn't support any known obsmarker format.
45 Raises ValueError if the bundler doesn't support any known obsmarker format.
46 """
46 """
47 if markers:
47 if markers:
48 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
48 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
49 version = obsolete.commonversion(remoteversions)
49 version = obsolete.commonversion(remoteversions)
50 if version is None:
50 if version is None:
51 raise ValueError('bundler do not support common obsmarker format')
51 raise ValueError('bundler do not support common obsmarker format')
52 stream = obsolete.encodemarkers(markers, True, version=version)
52 stream = obsolete.encodemarkers(markers, True, version=version)
53 return bundler.newpart('obsmarkers', data=stream)
53 return bundler.newpart('obsmarkers', data=stream)
54 return None
54 return None
55
55
56 def _canusebundle2(op):
56 def _canusebundle2(op):
57 """return true if a pull/push can use bundle2
57 """return true if a pull/push can use bundle2
58
58
59 Feel free to nuke this function when we drop the experimental option"""
59 Feel free to nuke this function when we drop the experimental option"""
60 return (op.repo.ui.configbool('experimental', 'bundle2-exp', False)
60 return (op.repo.ui.configbool('experimental', 'bundle2-exp', False)
61 and op.remote.capable('bundle2'))
61 and op.remote.capable('bundle2'))
62
62
63
63
64 class pushoperation(object):
64 class pushoperation(object):
65 """A object that represent a single push operation
65 """A object that represent a single push operation
66
66
67 It purpose is to carry push related state and very common operation.
67 It purpose is to carry push related state and very common operation.
68
68
69 A new should be created at the beginning of each push and discarded
69 A new should be created at the beginning of each push and discarded
70 afterward.
70 afterward.
71 """
71 """
72
72
73 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
73 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
74 bookmarks=()):
74 bookmarks=()):
75 # repo we push from
75 # repo we push from
76 self.repo = repo
76 self.repo = repo
77 self.ui = repo.ui
77 self.ui = repo.ui
78 # repo we push to
78 # repo we push to
79 self.remote = remote
79 self.remote = remote
80 # force option provided
80 # force option provided
81 self.force = force
81 self.force = force
82 # revs to be pushed (None is "all")
82 # revs to be pushed (None is "all")
83 self.revs = revs
83 self.revs = revs
84 # bookmark explicitly pushed
84 # bookmark explicitly pushed
85 self.bookmarks = bookmarks
85 self.bookmarks = bookmarks
86 # allow push of new branch
86 # allow push of new branch
87 self.newbranch = newbranch
87 self.newbranch = newbranch
88 # did a local lock get acquired?
88 # did a local lock get acquired?
89 self.locallocked = None
89 self.locallocked = None
90 # step already performed
90 # step already performed
91 # (used to check what steps have been already performed through bundle2)
91 # (used to check what steps have been already performed through bundle2)
92 self.stepsdone = set()
92 self.stepsdone = set()
93 # Integer version of the changegroup push result
93 # Integer version of the changegroup push result
94 # - None means nothing to push
94 # - None means nothing to push
95 # - 0 means HTTP error
95 # - 0 means HTTP error
96 # - 1 means we pushed and remote head count is unchanged *or*
96 # - 1 means we pushed and remote head count is unchanged *or*
97 # we have outgoing changesets but refused to push
97 # we have outgoing changesets but refused to push
98 # - other values as described by addchangegroup()
98 # - other values as described by addchangegroup()
99 self.cgresult = None
99 self.cgresult = None
100 # Boolean value for the bookmark push
100 # Boolean value for the bookmark push
101 self.bkresult = None
101 self.bkresult = None
102 # discover.outgoing object (contains common and outgoing data)
102 # discover.outgoing object (contains common and outgoing data)
103 self.outgoing = None
103 self.outgoing = None
104 # all remote heads before the push
104 # all remote heads before the push
105 self.remoteheads = None
105 self.remoteheads = None
106 # testable as a boolean indicating if any nodes are missing locally.
106 # testable as a boolean indicating if any nodes are missing locally.
107 self.incoming = None
107 self.incoming = None
108 # phases changes that must be pushed along side the changesets
108 # phases changes that must be pushed along side the changesets
109 self.outdatedphases = None
109 self.outdatedphases = None
110 # phases changes that must be pushed if changeset push fails
110 # phases changes that must be pushed if changeset push fails
111 self.fallbackoutdatedphases = None
111 self.fallbackoutdatedphases = None
112 # outgoing obsmarkers
112 # outgoing obsmarkers
113 self.outobsmarkers = set()
113 self.outobsmarkers = set()
114 # outgoing bookmarks
114 # outgoing bookmarks
115 self.outbookmarks = []
115 self.outbookmarks = []
116 # transaction manager
116 # transaction manager
117 self.trmanager = None
117 self.trmanager = None
118
118
119 @util.propertycache
119 @util.propertycache
120 def futureheads(self):
120 def futureheads(self):
121 """future remote heads if the changeset push succeeds"""
121 """future remote heads if the changeset push succeeds"""
122 return self.outgoing.missingheads
122 return self.outgoing.missingheads
123
123
124 @util.propertycache
124 @util.propertycache
125 def fallbackheads(self):
125 def fallbackheads(self):
126 """future remote heads if the changeset push fails"""
126 """future remote heads if the changeset push fails"""
127 if self.revs is None:
127 if self.revs is None:
128 # not target to push, all common are relevant
128 # not target to push, all common are relevant
129 return self.outgoing.commonheads
129 return self.outgoing.commonheads
130 unfi = self.repo.unfiltered()
130 unfi = self.repo.unfiltered()
131 # I want cheads = heads(::missingheads and ::commonheads)
131 # I want cheads = heads(::missingheads and ::commonheads)
132 # (missingheads is revs with secret changeset filtered out)
132 # (missingheads is revs with secret changeset filtered out)
133 #
133 #
134 # This can be expressed as:
134 # This can be expressed as:
135 # cheads = ( (missingheads and ::commonheads)
135 # cheads = ( (missingheads and ::commonheads)
136 # + (commonheads and ::missingheads))"
136 # + (commonheads and ::missingheads))"
137 # )
137 # )
138 #
138 #
139 # while trying to push we already computed the following:
139 # while trying to push we already computed the following:
140 # common = (::commonheads)
140 # common = (::commonheads)
141 # missing = ((commonheads::missingheads) - commonheads)
141 # missing = ((commonheads::missingheads) - commonheads)
142 #
142 #
143 # We can pick:
143 # We can pick:
144 # * missingheads part of common (::commonheads)
144 # * missingheads part of common (::commonheads)
145 common = set(self.outgoing.common)
145 common = set(self.outgoing.common)
146 nm = self.repo.changelog.nodemap
146 nm = self.repo.changelog.nodemap
147 cheads = [node for node in self.revs if nm[node] in common]
147 cheads = [node for node in self.revs if nm[node] in common]
148 # and
148 # and
149 # * commonheads parents on missing
149 # * commonheads parents on missing
150 revset = unfi.set('%ln and parents(roots(%ln))',
150 revset = unfi.set('%ln and parents(roots(%ln))',
151 self.outgoing.commonheads,
151 self.outgoing.commonheads,
152 self.outgoing.missing)
152 self.outgoing.missing)
153 cheads.extend(c.node() for c in revset)
153 cheads.extend(c.node() for c in revset)
154 return cheads
154 return cheads
155
155
156 @property
156 @property
157 def commonheads(self):
157 def commonheads(self):
158 """set of all common heads after changeset bundle push"""
158 """set of all common heads after changeset bundle push"""
159 if self.cgresult:
159 if self.cgresult:
160 return self.futureheads
160 return self.futureheads
161 else:
161 else:
162 return self.fallbackheads
162 return self.fallbackheads
163
163
164 # mapping of message used when pushing bookmark
164 # mapping of message used when pushing bookmark
165 bookmsgmap = {'update': (_("updating bookmark %s\n"),
165 bookmsgmap = {'update': (_("updating bookmark %s\n"),
166 _('updating bookmark %s failed!\n')),
166 _('updating bookmark %s failed!\n')),
167 'export': (_("exporting bookmark %s\n"),
167 'export': (_("exporting bookmark %s\n"),
168 _('exporting bookmark %s failed!\n')),
168 _('exporting bookmark %s failed!\n')),
169 'delete': (_("deleting remote bookmark %s\n"),
169 'delete': (_("deleting remote bookmark %s\n"),
170 _('deleting remote bookmark %s failed!\n')),
170 _('deleting remote bookmark %s failed!\n')),
171 }
171 }
172
172
173
173
174 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
174 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
175 '''Push outgoing changesets (limited by revs) from a local
175 '''Push outgoing changesets (limited by revs) from a local
176 repository to remote. Return an integer:
176 repository to remote. Return an integer:
177 - None means nothing to push
177 - None means nothing to push
178 - 0 means HTTP error
178 - 0 means HTTP error
179 - 1 means we pushed and remote head count is unchanged *or*
179 - 1 means we pushed and remote head count is unchanged *or*
180 we have outgoing changesets but refused to push
180 we have outgoing changesets but refused to push
181 - other values as described by addchangegroup()
181 - other values as described by addchangegroup()
182 '''
182 '''
183 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
183 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
184 if pushop.remote.local():
184 if pushop.remote.local():
185 missing = (set(pushop.repo.requirements)
185 missing = (set(pushop.repo.requirements)
186 - pushop.remote.local().supported)
186 - pushop.remote.local().supported)
187 if missing:
187 if missing:
188 msg = _("required features are not"
188 msg = _("required features are not"
189 " supported in the destination:"
189 " supported in the destination:"
190 " %s") % (', '.join(sorted(missing)))
190 " %s") % (', '.join(sorted(missing)))
191 raise util.Abort(msg)
191 raise util.Abort(msg)
192
192
193 # there are two ways to push to remote repo:
193 # there are two ways to push to remote repo:
194 #
194 #
195 # addchangegroup assumes local user can lock remote
195 # addchangegroup assumes local user can lock remote
196 # repo (local filesystem, old ssh servers).
196 # repo (local filesystem, old ssh servers).
197 #
197 #
198 # unbundle assumes local user cannot lock remote repo (new ssh
198 # unbundle assumes local user cannot lock remote repo (new ssh
199 # servers, http servers).
199 # servers, http servers).
200
200
201 if not pushop.remote.canpush():
201 if not pushop.remote.canpush():
202 raise util.Abort(_("destination does not support push"))
202 raise util.Abort(_("destination does not support push"))
203 # get local lock as we might write phase data
203 # get local lock as we might write phase data
204 locallock = None
204 localwlock = locallock = None
205 try:
205 try:
206 # bundle2 push may receive a reply bundle touching bookmarks or other
207 # things requiring the wlock. Take it now to ensure proper ordering.
208 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
209 if _canusebundle2(pushop) and maypushback:
210 localwlock = pushop.repo.wlock()
206 locallock = pushop.repo.lock()
211 locallock = pushop.repo.lock()
207 pushop.locallocked = True
212 pushop.locallocked = True
208 except IOError, err:
213 except IOError, err:
209 pushop.locallocked = False
214 pushop.locallocked = False
210 if err.errno != errno.EACCES:
215 if err.errno != errno.EACCES:
211 raise
216 raise
212 # source repo cannot be locked.
217 # source repo cannot be locked.
213 # We do not abort the push, but just disable the local phase
218 # We do not abort the push, but just disable the local phase
214 # synchronisation.
219 # synchronisation.
215 msg = 'cannot lock source repository: %s\n' % err
220 msg = 'cannot lock source repository: %s\n' % err
216 pushop.ui.debug(msg)
221 pushop.ui.debug(msg)
217 try:
222 try:
218 if pushop.locallocked:
223 if pushop.locallocked:
219 pushop.trmanager = transactionmanager(repo,
224 pushop.trmanager = transactionmanager(repo,
220 'push-response',
225 'push-response',
221 pushop.remote.url())
226 pushop.remote.url())
222 pushop.repo.checkpush(pushop)
227 pushop.repo.checkpush(pushop)
223 lock = None
228 lock = None
224 unbundle = pushop.remote.capable('unbundle')
229 unbundle = pushop.remote.capable('unbundle')
225 if not unbundle:
230 if not unbundle:
226 lock = pushop.remote.lock()
231 lock = pushop.remote.lock()
227 try:
232 try:
228 _pushdiscovery(pushop)
233 _pushdiscovery(pushop)
229 if _canusebundle2(pushop):
234 if _canusebundle2(pushop):
230 _pushbundle2(pushop)
235 _pushbundle2(pushop)
231 _pushchangeset(pushop)
236 _pushchangeset(pushop)
232 _pushsyncphase(pushop)
237 _pushsyncphase(pushop)
233 _pushobsolete(pushop)
238 _pushobsolete(pushop)
234 _pushbookmark(pushop)
239 _pushbookmark(pushop)
235 finally:
240 finally:
236 if lock is not None:
241 if lock is not None:
237 lock.release()
242 lock.release()
238 if pushop.trmanager:
243 if pushop.trmanager:
239 pushop.trmanager.close()
244 pushop.trmanager.close()
240 finally:
245 finally:
241 if pushop.trmanager:
246 if pushop.trmanager:
242 pushop.trmanager.release()
247 pushop.trmanager.release()
243 if locallock is not None:
248 if locallock is not None:
244 locallock.release()
249 locallock.release()
250 if localwlock is not None:
251 localwlock.release()
245
252
246 return pushop
253 return pushop
247
254
248 # list of steps to perform discovery before push
255 # list of steps to perform discovery before push
249 pushdiscoveryorder = []
256 pushdiscoveryorder = []
250
257
251 # Mapping between step name and function
258 # Mapping between step name and function
252 #
259 #
253 # This exists to help extensions wrap steps if necessary
260 # This exists to help extensions wrap steps if necessary
254 pushdiscoverymapping = {}
261 pushdiscoverymapping = {}
255
262
256 def pushdiscovery(stepname):
263 def pushdiscovery(stepname):
257 """decorator for function performing discovery before push
264 """decorator for function performing discovery before push
258
265
259 The function is added to the step -> function mapping and appended to the
266 The function is added to the step -> function mapping and appended to the
260 list of steps. Beware that decorated function will be added in order (this
267 list of steps. Beware that decorated function will be added in order (this
261 may matter).
268 may matter).
262
269
263 You can only use this decorator for a new step, if you want to wrap a step
270 You can only use this decorator for a new step, if you want to wrap a step
264 from an extension, change the pushdiscovery dictionary directly."""
271 from an extension, change the pushdiscovery dictionary directly."""
265 def dec(func):
272 def dec(func):
266 assert stepname not in pushdiscoverymapping
273 assert stepname not in pushdiscoverymapping
267 pushdiscoverymapping[stepname] = func
274 pushdiscoverymapping[stepname] = func
268 pushdiscoveryorder.append(stepname)
275 pushdiscoveryorder.append(stepname)
269 return func
276 return func
270 return dec
277 return dec
271
278
272 def _pushdiscovery(pushop):
279 def _pushdiscovery(pushop):
273 """Run all discovery steps"""
280 """Run all discovery steps"""
274 for stepname in pushdiscoveryorder:
281 for stepname in pushdiscoveryorder:
275 step = pushdiscoverymapping[stepname]
282 step = pushdiscoverymapping[stepname]
276 step(pushop)
283 step(pushop)
277
284
278 @pushdiscovery('changeset')
285 @pushdiscovery('changeset')
279 def _pushdiscoverychangeset(pushop):
286 def _pushdiscoverychangeset(pushop):
280 """discover the changeset that need to be pushed"""
287 """discover the changeset that need to be pushed"""
281 fci = discovery.findcommonincoming
288 fci = discovery.findcommonincoming
282 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
289 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
283 common, inc, remoteheads = commoninc
290 common, inc, remoteheads = commoninc
284 fco = discovery.findcommonoutgoing
291 fco = discovery.findcommonoutgoing
285 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
292 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
286 commoninc=commoninc, force=pushop.force)
293 commoninc=commoninc, force=pushop.force)
287 pushop.outgoing = outgoing
294 pushop.outgoing = outgoing
288 pushop.remoteheads = remoteheads
295 pushop.remoteheads = remoteheads
289 pushop.incoming = inc
296 pushop.incoming = inc
290
297
291 @pushdiscovery('phase')
298 @pushdiscovery('phase')
292 def _pushdiscoveryphase(pushop):
299 def _pushdiscoveryphase(pushop):
293 """discover the phase that needs to be pushed
300 """discover the phase that needs to be pushed
294
301
295 (computed for both success and failure case for changesets push)"""
302 (computed for both success and failure case for changesets push)"""
296 outgoing = pushop.outgoing
303 outgoing = pushop.outgoing
297 unfi = pushop.repo.unfiltered()
304 unfi = pushop.repo.unfiltered()
298 remotephases = pushop.remote.listkeys('phases')
305 remotephases = pushop.remote.listkeys('phases')
299 publishing = remotephases.get('publishing', False)
306 publishing = remotephases.get('publishing', False)
300 ana = phases.analyzeremotephases(pushop.repo,
307 ana = phases.analyzeremotephases(pushop.repo,
301 pushop.fallbackheads,
308 pushop.fallbackheads,
302 remotephases)
309 remotephases)
303 pheads, droots = ana
310 pheads, droots = ana
304 extracond = ''
311 extracond = ''
305 if not publishing:
312 if not publishing:
306 extracond = ' and public()'
313 extracond = ' and public()'
307 revset = 'heads((%%ln::%%ln) %s)' % extracond
314 revset = 'heads((%%ln::%%ln) %s)' % extracond
308 # Get the list of all revs draft on remote by public here.
315 # Get the list of all revs draft on remote by public here.
309 # XXX Beware that revset break if droots is not strictly
316 # XXX Beware that revset break if droots is not strictly
310 # XXX root we may want to ensure it is but it is costly
317 # XXX root we may want to ensure it is but it is costly
311 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
318 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
312 if not outgoing.missing:
319 if not outgoing.missing:
313 future = fallback
320 future = fallback
314 else:
321 else:
315 # adds changeset we are going to push as draft
322 # adds changeset we are going to push as draft
316 #
323 #
317 # should not be necessary for publishing server, but because of an
324 # should not be necessary for publishing server, but because of an
318 # issue fixed in xxxxx we have to do it anyway.
325 # issue fixed in xxxxx we have to do it anyway.
319 fdroots = list(unfi.set('roots(%ln + %ln::)',
326 fdroots = list(unfi.set('roots(%ln + %ln::)',
320 outgoing.missing, droots))
327 outgoing.missing, droots))
321 fdroots = [f.node() for f in fdroots]
328 fdroots = [f.node() for f in fdroots]
322 future = list(unfi.set(revset, fdroots, pushop.futureheads))
329 future = list(unfi.set(revset, fdroots, pushop.futureheads))
323 pushop.outdatedphases = future
330 pushop.outdatedphases = future
324 pushop.fallbackoutdatedphases = fallback
331 pushop.fallbackoutdatedphases = fallback
325
332
326 @pushdiscovery('obsmarker')
333 @pushdiscovery('obsmarker')
327 def _pushdiscoveryobsmarkers(pushop):
334 def _pushdiscoveryobsmarkers(pushop):
328 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
335 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
329 and pushop.repo.obsstore
336 and pushop.repo.obsstore
330 and 'obsolete' in pushop.remote.listkeys('namespaces')):
337 and 'obsolete' in pushop.remote.listkeys('namespaces')):
331 repo = pushop.repo
338 repo = pushop.repo
332 # very naive computation, that can be quite expensive on big repo.
339 # very naive computation, that can be quite expensive on big repo.
333 # However: evolution is currently slow on them anyway.
340 # However: evolution is currently slow on them anyway.
334 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
341 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
335 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
342 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
336
343
337 @pushdiscovery('bookmarks')
344 @pushdiscovery('bookmarks')
338 def _pushdiscoverybookmarks(pushop):
345 def _pushdiscoverybookmarks(pushop):
339 ui = pushop.ui
346 ui = pushop.ui
340 repo = pushop.repo.unfiltered()
347 repo = pushop.repo.unfiltered()
341 remote = pushop.remote
348 remote = pushop.remote
342 ui.debug("checking for updated bookmarks\n")
349 ui.debug("checking for updated bookmarks\n")
343 ancestors = ()
350 ancestors = ()
344 if pushop.revs:
351 if pushop.revs:
345 revnums = map(repo.changelog.rev, pushop.revs)
352 revnums = map(repo.changelog.rev, pushop.revs)
346 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
353 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
347 remotebookmark = remote.listkeys('bookmarks')
354 remotebookmark = remote.listkeys('bookmarks')
348
355
349 explicit = set(pushop.bookmarks)
356 explicit = set(pushop.bookmarks)
350
357
351 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
358 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
352 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
359 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
353 for b, scid, dcid in advsrc:
360 for b, scid, dcid in advsrc:
354 if b in explicit:
361 if b in explicit:
355 explicit.remove(b)
362 explicit.remove(b)
356 if not ancestors or repo[scid].rev() in ancestors:
363 if not ancestors or repo[scid].rev() in ancestors:
357 pushop.outbookmarks.append((b, dcid, scid))
364 pushop.outbookmarks.append((b, dcid, scid))
358 # search added bookmark
365 # search added bookmark
359 for b, scid, dcid in addsrc:
366 for b, scid, dcid in addsrc:
360 if b in explicit:
367 if b in explicit:
361 explicit.remove(b)
368 explicit.remove(b)
362 pushop.outbookmarks.append((b, '', scid))
369 pushop.outbookmarks.append((b, '', scid))
363 # search for overwritten bookmark
370 # search for overwritten bookmark
364 for b, scid, dcid in advdst + diverge + differ:
371 for b, scid, dcid in advdst + diverge + differ:
365 if b in explicit:
372 if b in explicit:
366 explicit.remove(b)
373 explicit.remove(b)
367 pushop.outbookmarks.append((b, dcid, scid))
374 pushop.outbookmarks.append((b, dcid, scid))
368 # search for bookmark to delete
375 # search for bookmark to delete
369 for b, scid, dcid in adddst:
376 for b, scid, dcid in adddst:
370 if b in explicit:
377 if b in explicit:
371 explicit.remove(b)
378 explicit.remove(b)
372 # treat as "deleted locally"
379 # treat as "deleted locally"
373 pushop.outbookmarks.append((b, dcid, ''))
380 pushop.outbookmarks.append((b, dcid, ''))
374 # identical bookmarks shouldn't get reported
381 # identical bookmarks shouldn't get reported
375 for b, scid, dcid in same:
382 for b, scid, dcid in same:
376 if b in explicit:
383 if b in explicit:
377 explicit.remove(b)
384 explicit.remove(b)
378
385
379 if explicit:
386 if explicit:
380 explicit = sorted(explicit)
387 explicit = sorted(explicit)
381 # we should probably list all of them
388 # we should probably list all of them
382 ui.warn(_('bookmark %s does not exist on the local '
389 ui.warn(_('bookmark %s does not exist on the local '
383 'or remote repository!\n') % explicit[0])
390 'or remote repository!\n') % explicit[0])
384 pushop.bkresult = 2
391 pushop.bkresult = 2
385
392
386 pushop.outbookmarks.sort()
393 pushop.outbookmarks.sort()
387
394
388 def _pushcheckoutgoing(pushop):
395 def _pushcheckoutgoing(pushop):
389 outgoing = pushop.outgoing
396 outgoing = pushop.outgoing
390 unfi = pushop.repo.unfiltered()
397 unfi = pushop.repo.unfiltered()
391 if not outgoing.missing:
398 if not outgoing.missing:
392 # nothing to push
399 # nothing to push
393 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
400 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
394 return False
401 return False
395 # something to push
402 # something to push
396 if not pushop.force:
403 if not pushop.force:
397 # if repo.obsstore == False --> no obsolete
404 # if repo.obsstore == False --> no obsolete
398 # then, save the iteration
405 # then, save the iteration
399 if unfi.obsstore:
406 if unfi.obsstore:
400 # this message are here for 80 char limit reason
407 # this message are here for 80 char limit reason
401 mso = _("push includes obsolete changeset: %s!")
408 mso = _("push includes obsolete changeset: %s!")
402 mst = {"unstable": _("push includes unstable changeset: %s!"),
409 mst = {"unstable": _("push includes unstable changeset: %s!"),
403 "bumped": _("push includes bumped changeset: %s!"),
410 "bumped": _("push includes bumped changeset: %s!"),
404 "divergent": _("push includes divergent changeset: %s!")}
411 "divergent": _("push includes divergent changeset: %s!")}
405 # If we are to push if there is at least one
412 # If we are to push if there is at least one
406 # obsolete or unstable changeset in missing, at
413 # obsolete or unstable changeset in missing, at
407 # least one of the missinghead will be obsolete or
414 # least one of the missinghead will be obsolete or
408 # unstable. So checking heads only is ok
415 # unstable. So checking heads only is ok
409 for node in outgoing.missingheads:
416 for node in outgoing.missingheads:
410 ctx = unfi[node]
417 ctx = unfi[node]
411 if ctx.obsolete():
418 if ctx.obsolete():
412 raise util.Abort(mso % ctx)
419 raise util.Abort(mso % ctx)
413 elif ctx.troubled():
420 elif ctx.troubled():
414 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
421 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
415 newbm = pushop.ui.configlist('bookmarks', 'pushing')
422 newbm = pushop.ui.configlist('bookmarks', 'pushing')
416 discovery.checkheads(unfi, pushop.remote, outgoing,
423 discovery.checkheads(unfi, pushop.remote, outgoing,
417 pushop.remoteheads,
424 pushop.remoteheads,
418 pushop.newbranch,
425 pushop.newbranch,
419 bool(pushop.incoming),
426 bool(pushop.incoming),
420 newbm)
427 newbm)
421 return True
428 return True
422
429
423 # List of names of steps to perform for an outgoing bundle2, order matters.
430 # List of names of steps to perform for an outgoing bundle2, order matters.
424 b2partsgenorder = []
431 b2partsgenorder = []
425
432
426 # Mapping between step name and function
433 # Mapping between step name and function
427 #
434 #
428 # This exists to help extensions wrap steps if necessary
435 # This exists to help extensions wrap steps if necessary
429 b2partsgenmapping = {}
436 b2partsgenmapping = {}
430
437
431 def b2partsgenerator(stepname, idx=None):
438 def b2partsgenerator(stepname, idx=None):
432 """decorator for function generating bundle2 part
439 """decorator for function generating bundle2 part
433
440
434 The function is added to the step -> function mapping and appended to the
441 The function is added to the step -> function mapping and appended to the
435 list of steps. Beware that decorated functions will be added in order
442 list of steps. Beware that decorated functions will be added in order
436 (this may matter).
443 (this may matter).
437
444
438 You can only use this decorator for new steps, if you want to wrap a step
445 You can only use this decorator for new steps, if you want to wrap a step
439 from an extension, attack the b2partsgenmapping dictionary directly."""
446 from an extension, attack the b2partsgenmapping dictionary directly."""
440 def dec(func):
447 def dec(func):
441 assert stepname not in b2partsgenmapping
448 assert stepname not in b2partsgenmapping
442 b2partsgenmapping[stepname] = func
449 b2partsgenmapping[stepname] = func
443 if idx is None:
450 if idx is None:
444 b2partsgenorder.append(stepname)
451 b2partsgenorder.append(stepname)
445 else:
452 else:
446 b2partsgenorder.insert(idx, stepname)
453 b2partsgenorder.insert(idx, stepname)
447 return func
454 return func
448 return dec
455 return dec
449
456
450 @b2partsgenerator('changeset')
457 @b2partsgenerator('changeset')
451 def _pushb2ctx(pushop, bundler):
458 def _pushb2ctx(pushop, bundler):
452 """handle changegroup push through bundle2
459 """handle changegroup push through bundle2
453
460
454 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
461 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
455 """
462 """
456 if 'changesets' in pushop.stepsdone:
463 if 'changesets' in pushop.stepsdone:
457 return
464 return
458 pushop.stepsdone.add('changesets')
465 pushop.stepsdone.add('changesets')
459 # Send known heads to the server for race detection.
466 # Send known heads to the server for race detection.
460 if not _pushcheckoutgoing(pushop):
467 if not _pushcheckoutgoing(pushop):
461 return
468 return
462 pushop.repo.prepushoutgoinghooks(pushop.repo,
469 pushop.repo.prepushoutgoinghooks(pushop.repo,
463 pushop.remote,
470 pushop.remote,
464 pushop.outgoing)
471 pushop.outgoing)
465 if not pushop.force:
472 if not pushop.force:
466 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
473 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
467 b2caps = bundle2.bundle2caps(pushop.remote)
474 b2caps = bundle2.bundle2caps(pushop.remote)
468 version = None
475 version = None
469 cgversions = b2caps.get('changegroup')
476 cgversions = b2caps.get('changegroup')
470 if not cgversions: # 3.1 and 3.2 ship with an empty value
477 if not cgversions: # 3.1 and 3.2 ship with an empty value
471 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
478 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
472 pushop.outgoing)
479 pushop.outgoing)
473 else:
480 else:
474 cgversions = [v for v in cgversions if v in changegroup.packermap]
481 cgversions = [v for v in cgversions if v in changegroup.packermap]
475 if not cgversions:
482 if not cgversions:
476 raise ValueError(_('no common changegroup version'))
483 raise ValueError(_('no common changegroup version'))
477 version = max(cgversions)
484 version = max(cgversions)
478 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
485 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
479 pushop.outgoing,
486 pushop.outgoing,
480 version=version)
487 version=version)
481 cgpart = bundler.newpart('changegroup', data=cg)
488 cgpart = bundler.newpart('changegroup', data=cg)
482 if version is not None:
489 if version is not None:
483 cgpart.addparam('version', version)
490 cgpart.addparam('version', version)
484 def handlereply(op):
491 def handlereply(op):
485 """extract addchangegroup returns from server reply"""
492 """extract addchangegroup returns from server reply"""
486 cgreplies = op.records.getreplies(cgpart.id)
493 cgreplies = op.records.getreplies(cgpart.id)
487 assert len(cgreplies['changegroup']) == 1
494 assert len(cgreplies['changegroup']) == 1
488 pushop.cgresult = cgreplies['changegroup'][0]['return']
495 pushop.cgresult = cgreplies['changegroup'][0]['return']
489 return handlereply
496 return handlereply
490
497
491 @b2partsgenerator('phase')
498 @b2partsgenerator('phase')
492 def _pushb2phases(pushop, bundler):
499 def _pushb2phases(pushop, bundler):
493 """handle phase push through bundle2"""
500 """handle phase push through bundle2"""
494 if 'phases' in pushop.stepsdone:
501 if 'phases' in pushop.stepsdone:
495 return
502 return
496 b2caps = bundle2.bundle2caps(pushop.remote)
503 b2caps = bundle2.bundle2caps(pushop.remote)
497 if not 'pushkey' in b2caps:
504 if not 'pushkey' in b2caps:
498 return
505 return
499 pushop.stepsdone.add('phases')
506 pushop.stepsdone.add('phases')
500 part2node = []
507 part2node = []
501 enc = pushkey.encode
508 enc = pushkey.encode
502 for newremotehead in pushop.outdatedphases:
509 for newremotehead in pushop.outdatedphases:
503 part = bundler.newpart('pushkey')
510 part = bundler.newpart('pushkey')
504 part.addparam('namespace', enc('phases'))
511 part.addparam('namespace', enc('phases'))
505 part.addparam('key', enc(newremotehead.hex()))
512 part.addparam('key', enc(newremotehead.hex()))
506 part.addparam('old', enc(str(phases.draft)))
513 part.addparam('old', enc(str(phases.draft)))
507 part.addparam('new', enc(str(phases.public)))
514 part.addparam('new', enc(str(phases.public)))
508 part2node.append((part.id, newremotehead))
515 part2node.append((part.id, newremotehead))
509 def handlereply(op):
516 def handlereply(op):
510 for partid, node in part2node:
517 for partid, node in part2node:
511 partrep = op.records.getreplies(partid)
518 partrep = op.records.getreplies(partid)
512 results = partrep['pushkey']
519 results = partrep['pushkey']
513 assert len(results) <= 1
520 assert len(results) <= 1
514 msg = None
521 msg = None
515 if not results:
522 if not results:
516 msg = _('server ignored update of %s to public!\n') % node
523 msg = _('server ignored update of %s to public!\n') % node
517 elif not int(results[0]['return']):
524 elif not int(results[0]['return']):
518 msg = _('updating %s to public failed!\n') % node
525 msg = _('updating %s to public failed!\n') % node
519 if msg is not None:
526 if msg is not None:
520 pushop.ui.warn(msg)
527 pushop.ui.warn(msg)
521 return handlereply
528 return handlereply
522
529
523 @b2partsgenerator('obsmarkers')
530 @b2partsgenerator('obsmarkers')
524 def _pushb2obsmarkers(pushop, bundler):
531 def _pushb2obsmarkers(pushop, bundler):
525 if 'obsmarkers' in pushop.stepsdone:
532 if 'obsmarkers' in pushop.stepsdone:
526 return
533 return
527 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
534 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
528 if obsolete.commonversion(remoteversions) is None:
535 if obsolete.commonversion(remoteversions) is None:
529 return
536 return
530 pushop.stepsdone.add('obsmarkers')
537 pushop.stepsdone.add('obsmarkers')
531 if pushop.outobsmarkers:
538 if pushop.outobsmarkers:
532 buildobsmarkerspart(bundler, pushop.outobsmarkers)
539 buildobsmarkerspart(bundler, pushop.outobsmarkers)
533
540
534 @b2partsgenerator('bookmarks')
541 @b2partsgenerator('bookmarks')
535 def _pushb2bookmarks(pushop, bundler):
542 def _pushb2bookmarks(pushop, bundler):
536 """handle phase push through bundle2"""
543 """handle phase push through bundle2"""
537 if 'bookmarks' in pushop.stepsdone:
544 if 'bookmarks' in pushop.stepsdone:
538 return
545 return
539 b2caps = bundle2.bundle2caps(pushop.remote)
546 b2caps = bundle2.bundle2caps(pushop.remote)
540 if 'pushkey' not in b2caps:
547 if 'pushkey' not in b2caps:
541 return
548 return
542 pushop.stepsdone.add('bookmarks')
549 pushop.stepsdone.add('bookmarks')
543 part2book = []
550 part2book = []
544 enc = pushkey.encode
551 enc = pushkey.encode
545 for book, old, new in pushop.outbookmarks:
552 for book, old, new in pushop.outbookmarks:
546 part = bundler.newpart('pushkey')
553 part = bundler.newpart('pushkey')
547 part.addparam('namespace', enc('bookmarks'))
554 part.addparam('namespace', enc('bookmarks'))
548 part.addparam('key', enc(book))
555 part.addparam('key', enc(book))
549 part.addparam('old', enc(old))
556 part.addparam('old', enc(old))
550 part.addparam('new', enc(new))
557 part.addparam('new', enc(new))
551 action = 'update'
558 action = 'update'
552 if not old:
559 if not old:
553 action = 'export'
560 action = 'export'
554 elif not new:
561 elif not new:
555 action = 'delete'
562 action = 'delete'
556 part2book.append((part.id, book, action))
563 part2book.append((part.id, book, action))
557
564
558
565
559 def handlereply(op):
566 def handlereply(op):
560 ui = pushop.ui
567 ui = pushop.ui
561 for partid, book, action in part2book:
568 for partid, book, action in part2book:
562 partrep = op.records.getreplies(partid)
569 partrep = op.records.getreplies(partid)
563 results = partrep['pushkey']
570 results = partrep['pushkey']
564 assert len(results) <= 1
571 assert len(results) <= 1
565 if not results:
572 if not results:
566 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
573 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
567 else:
574 else:
568 ret = int(results[0]['return'])
575 ret = int(results[0]['return'])
569 if ret:
576 if ret:
570 ui.status(bookmsgmap[action][0] % book)
577 ui.status(bookmsgmap[action][0] % book)
571 else:
578 else:
572 ui.warn(bookmsgmap[action][1] % book)
579 ui.warn(bookmsgmap[action][1] % book)
573 if pushop.bkresult is not None:
580 if pushop.bkresult is not None:
574 pushop.bkresult = 1
581 pushop.bkresult = 1
575 return handlereply
582 return handlereply
576
583
577
584
578 def _pushbundle2(pushop):
585 def _pushbundle2(pushop):
579 """push data to the remote using bundle2
586 """push data to the remote using bundle2
580
587
581 The only currently supported type of data is changegroup but this will
588 The only currently supported type of data is changegroup but this will
582 evolve in the future."""
589 evolve in the future."""
583 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
590 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
584 pushback = (pushop.trmanager
591 pushback = (pushop.trmanager
585 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
592 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
586
593
587 # create reply capability
594 # create reply capability
588 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
595 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
589 allowpushback=pushback))
596 allowpushback=pushback))
590 bundler.newpart('replycaps', data=capsblob)
597 bundler.newpart('replycaps', data=capsblob)
591 replyhandlers = []
598 replyhandlers = []
592 for partgenname in b2partsgenorder:
599 for partgenname in b2partsgenorder:
593 partgen = b2partsgenmapping[partgenname]
600 partgen = b2partsgenmapping[partgenname]
594 ret = partgen(pushop, bundler)
601 ret = partgen(pushop, bundler)
595 if callable(ret):
602 if callable(ret):
596 replyhandlers.append(ret)
603 replyhandlers.append(ret)
597 # do not push if nothing to push
604 # do not push if nothing to push
598 if bundler.nbparts <= 1:
605 if bundler.nbparts <= 1:
599 return
606 return
600 stream = util.chunkbuffer(bundler.getchunks())
607 stream = util.chunkbuffer(bundler.getchunks())
601 try:
608 try:
602 reply = pushop.remote.unbundle(stream, ['force'], 'push')
609 reply = pushop.remote.unbundle(stream, ['force'], 'push')
603 except error.BundleValueError, exc:
610 except error.BundleValueError, exc:
604 raise util.Abort('missing support for %s' % exc)
611 raise util.Abort('missing support for %s' % exc)
605 try:
612 try:
606 trgetter = None
613 trgetter = None
607 if pushback:
614 if pushback:
608 trgetter = pushop.trmanager.transaction
615 trgetter = pushop.trmanager.transaction
609 op = bundle2.processbundle(pushop.repo, reply, trgetter)
616 op = bundle2.processbundle(pushop.repo, reply, trgetter)
610 except error.BundleValueError, exc:
617 except error.BundleValueError, exc:
611 raise util.Abort('missing support for %s' % exc)
618 raise util.Abort('missing support for %s' % exc)
612 for rephand in replyhandlers:
619 for rephand in replyhandlers:
613 rephand(op)
620 rephand(op)
614
621
615 def _pushchangeset(pushop):
622 def _pushchangeset(pushop):
616 """Make the actual push of changeset bundle to remote repo"""
623 """Make the actual push of changeset bundle to remote repo"""
617 if 'changesets' in pushop.stepsdone:
624 if 'changesets' in pushop.stepsdone:
618 return
625 return
619 pushop.stepsdone.add('changesets')
626 pushop.stepsdone.add('changesets')
620 if not _pushcheckoutgoing(pushop):
627 if not _pushcheckoutgoing(pushop):
621 return
628 return
622 pushop.repo.prepushoutgoinghooks(pushop.repo,
629 pushop.repo.prepushoutgoinghooks(pushop.repo,
623 pushop.remote,
630 pushop.remote,
624 pushop.outgoing)
631 pushop.outgoing)
625 outgoing = pushop.outgoing
632 outgoing = pushop.outgoing
626 unbundle = pushop.remote.capable('unbundle')
633 unbundle = pushop.remote.capable('unbundle')
627 # TODO: get bundlecaps from remote
634 # TODO: get bundlecaps from remote
628 bundlecaps = None
635 bundlecaps = None
629 # create a changegroup from local
636 # create a changegroup from local
630 if pushop.revs is None and not (outgoing.excluded
637 if pushop.revs is None and not (outgoing.excluded
631 or pushop.repo.changelog.filteredrevs):
638 or pushop.repo.changelog.filteredrevs):
632 # push everything,
639 # push everything,
633 # use the fast path, no race possible on push
640 # use the fast path, no race possible on push
634 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
641 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
635 cg = changegroup.getsubset(pushop.repo,
642 cg = changegroup.getsubset(pushop.repo,
636 outgoing,
643 outgoing,
637 bundler,
644 bundler,
638 'push',
645 'push',
639 fastpath=True)
646 fastpath=True)
640 else:
647 else:
641 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
648 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
642 bundlecaps)
649 bundlecaps)
643
650
644 # apply changegroup to remote
651 # apply changegroup to remote
645 if unbundle:
652 if unbundle:
646 # local repo finds heads on server, finds out what
653 # local repo finds heads on server, finds out what
647 # revs it must push. once revs transferred, if server
654 # revs it must push. once revs transferred, if server
648 # finds it has different heads (someone else won
655 # finds it has different heads (someone else won
649 # commit/push race), server aborts.
656 # commit/push race), server aborts.
650 if pushop.force:
657 if pushop.force:
651 remoteheads = ['force']
658 remoteheads = ['force']
652 else:
659 else:
653 remoteheads = pushop.remoteheads
660 remoteheads = pushop.remoteheads
654 # ssh: return remote's addchangegroup()
661 # ssh: return remote's addchangegroup()
655 # http: return remote's addchangegroup() or 0 for error
662 # http: return remote's addchangegroup() or 0 for error
656 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
663 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
657 pushop.repo.url())
664 pushop.repo.url())
658 else:
665 else:
659 # we return an integer indicating remote head count
666 # we return an integer indicating remote head count
660 # change
667 # change
661 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
668 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
662 pushop.repo.url())
669 pushop.repo.url())
663
670
664 def _pushsyncphase(pushop):
671 def _pushsyncphase(pushop):
665 """synchronise phase information locally and remotely"""
672 """synchronise phase information locally and remotely"""
666 cheads = pushop.commonheads
673 cheads = pushop.commonheads
667 # even when we don't push, exchanging phase data is useful
674 # even when we don't push, exchanging phase data is useful
668 remotephases = pushop.remote.listkeys('phases')
675 remotephases = pushop.remote.listkeys('phases')
669 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
676 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
670 and remotephases # server supports phases
677 and remotephases # server supports phases
671 and pushop.cgresult is None # nothing was pushed
678 and pushop.cgresult is None # nothing was pushed
672 and remotephases.get('publishing', False)):
679 and remotephases.get('publishing', False)):
673 # When:
680 # When:
674 # - this is a subrepo push
681 # - this is a subrepo push
675 # - and remote support phase
682 # - and remote support phase
676 # - and no changeset was pushed
683 # - and no changeset was pushed
677 # - and remote is publishing
684 # - and remote is publishing
678 # We may be in issue 3871 case!
685 # We may be in issue 3871 case!
679 # We drop the possible phase synchronisation done by
686 # We drop the possible phase synchronisation done by
680 # courtesy to publish changesets possibly locally draft
687 # courtesy to publish changesets possibly locally draft
681 # on the remote.
688 # on the remote.
682 remotephases = {'publishing': 'True'}
689 remotephases = {'publishing': 'True'}
683 if not remotephases: # old server or public only reply from non-publishing
690 if not remotephases: # old server or public only reply from non-publishing
684 _localphasemove(pushop, cheads)
691 _localphasemove(pushop, cheads)
685 # don't push any phase data as there is nothing to push
692 # don't push any phase data as there is nothing to push
686 else:
693 else:
687 ana = phases.analyzeremotephases(pushop.repo, cheads,
694 ana = phases.analyzeremotephases(pushop.repo, cheads,
688 remotephases)
695 remotephases)
689 pheads, droots = ana
696 pheads, droots = ana
690 ### Apply remote phase on local
697 ### Apply remote phase on local
691 if remotephases.get('publishing', False):
698 if remotephases.get('publishing', False):
692 _localphasemove(pushop, cheads)
699 _localphasemove(pushop, cheads)
693 else: # publish = False
700 else: # publish = False
694 _localphasemove(pushop, pheads)
701 _localphasemove(pushop, pheads)
695 _localphasemove(pushop, cheads, phases.draft)
702 _localphasemove(pushop, cheads, phases.draft)
696 ### Apply local phase on remote
703 ### Apply local phase on remote
697
704
698 if pushop.cgresult:
705 if pushop.cgresult:
699 if 'phases' in pushop.stepsdone:
706 if 'phases' in pushop.stepsdone:
700 # phases already pushed though bundle2
707 # phases already pushed though bundle2
701 return
708 return
702 outdated = pushop.outdatedphases
709 outdated = pushop.outdatedphases
703 else:
710 else:
704 outdated = pushop.fallbackoutdatedphases
711 outdated = pushop.fallbackoutdatedphases
705
712
706 pushop.stepsdone.add('phases')
713 pushop.stepsdone.add('phases')
707
714
708 # filter heads already turned public by the push
715 # filter heads already turned public by the push
709 outdated = [c for c in outdated if c.node() not in pheads]
716 outdated = [c for c in outdated if c.node() not in pheads]
710 # fallback to independent pushkey command
717 # fallback to independent pushkey command
711 for newremotehead in outdated:
718 for newremotehead in outdated:
712 r = pushop.remote.pushkey('phases',
719 r = pushop.remote.pushkey('phases',
713 newremotehead.hex(),
720 newremotehead.hex(),
714 str(phases.draft),
721 str(phases.draft),
715 str(phases.public))
722 str(phases.public))
716 if not r:
723 if not r:
717 pushop.ui.warn(_('updating %s to public failed!\n')
724 pushop.ui.warn(_('updating %s to public failed!\n')
718 % newremotehead)
725 % newremotehead)
719
726
720 def _localphasemove(pushop, nodes, phase=phases.public):
727 def _localphasemove(pushop, nodes, phase=phases.public):
721 """move <nodes> to <phase> in the local source repo"""
728 """move <nodes> to <phase> in the local source repo"""
722 if pushop.trmanager:
729 if pushop.trmanager:
723 phases.advanceboundary(pushop.repo,
730 phases.advanceboundary(pushop.repo,
724 pushop.trmanager.transaction(),
731 pushop.trmanager.transaction(),
725 phase,
732 phase,
726 nodes)
733 nodes)
727 else:
734 else:
728 # repo is not locked, do not change any phases!
735 # repo is not locked, do not change any phases!
729 # Informs the user that phases should have been moved when
736 # Informs the user that phases should have been moved when
730 # applicable.
737 # applicable.
731 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
738 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
732 phasestr = phases.phasenames[phase]
739 phasestr = phases.phasenames[phase]
733 if actualmoves:
740 if actualmoves:
734 pushop.ui.status(_('cannot lock source repo, skipping '
741 pushop.ui.status(_('cannot lock source repo, skipping '
735 'local %s phase update\n') % phasestr)
742 'local %s phase update\n') % phasestr)
736
743
737 def _pushobsolete(pushop):
744 def _pushobsolete(pushop):
738 """utility function to push obsolete markers to a remote"""
745 """utility function to push obsolete markers to a remote"""
739 if 'obsmarkers' in pushop.stepsdone:
746 if 'obsmarkers' in pushop.stepsdone:
740 return
747 return
741 pushop.ui.debug('try to push obsolete markers to remote\n')
748 pushop.ui.debug('try to push obsolete markers to remote\n')
742 repo = pushop.repo
749 repo = pushop.repo
743 remote = pushop.remote
750 remote = pushop.remote
744 pushop.stepsdone.add('obsmarkers')
751 pushop.stepsdone.add('obsmarkers')
745 if pushop.outobsmarkers:
752 if pushop.outobsmarkers:
746 rslts = []
753 rslts = []
747 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
754 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
748 for key in sorted(remotedata, reverse=True):
755 for key in sorted(remotedata, reverse=True):
749 # reverse sort to ensure we end with dump0
756 # reverse sort to ensure we end with dump0
750 data = remotedata[key]
757 data = remotedata[key]
751 rslts.append(remote.pushkey('obsolete', key, '', data))
758 rslts.append(remote.pushkey('obsolete', key, '', data))
752 if [r for r in rslts if not r]:
759 if [r for r in rslts if not r]:
753 msg = _('failed to push some obsolete markers!\n')
760 msg = _('failed to push some obsolete markers!\n')
754 repo.ui.warn(msg)
761 repo.ui.warn(msg)
755
762
756 def _pushbookmark(pushop):
763 def _pushbookmark(pushop):
757 """Update bookmark position on remote"""
764 """Update bookmark position on remote"""
758 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
765 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
759 return
766 return
760 pushop.stepsdone.add('bookmarks')
767 pushop.stepsdone.add('bookmarks')
761 ui = pushop.ui
768 ui = pushop.ui
762 remote = pushop.remote
769 remote = pushop.remote
763
770
764 for b, old, new in pushop.outbookmarks:
771 for b, old, new in pushop.outbookmarks:
765 action = 'update'
772 action = 'update'
766 if not old:
773 if not old:
767 action = 'export'
774 action = 'export'
768 elif not new:
775 elif not new:
769 action = 'delete'
776 action = 'delete'
770 if remote.pushkey('bookmarks', b, old, new):
777 if remote.pushkey('bookmarks', b, old, new):
771 ui.status(bookmsgmap[action][0] % b)
778 ui.status(bookmsgmap[action][0] % b)
772 else:
779 else:
773 ui.warn(bookmsgmap[action][1] % b)
780 ui.warn(bookmsgmap[action][1] % b)
774 # discovery can have set the value form invalid entry
781 # discovery can have set the value form invalid entry
775 if pushop.bkresult is not None:
782 if pushop.bkresult is not None:
776 pushop.bkresult = 1
783 pushop.bkresult = 1
777
784
778 class pulloperation(object):
785 class pulloperation(object):
779 """A object that represent a single pull operation
786 """A object that represent a single pull operation
780
787
781 It purpose is to carry pull related state and very common operation.
788 It purpose is to carry pull related state and very common operation.
782
789
783 A new should be created at the beginning of each pull and discarded
790 A new should be created at the beginning of each pull and discarded
784 afterward.
791 afterward.
785 """
792 """
786
793
787 def __init__(self, repo, remote, heads=None, force=False, bookmarks=()):
794 def __init__(self, repo, remote, heads=None, force=False, bookmarks=()):
788 # repo we pull into
795 # repo we pull into
789 self.repo = repo
796 self.repo = repo
790 # repo we pull from
797 # repo we pull from
791 self.remote = remote
798 self.remote = remote
792 # revision we try to pull (None is "all")
799 # revision we try to pull (None is "all")
793 self.heads = heads
800 self.heads = heads
794 # bookmark pulled explicitly
801 # bookmark pulled explicitly
795 self.explicitbookmarks = bookmarks
802 self.explicitbookmarks = bookmarks
796 # do we force pull?
803 # do we force pull?
797 self.force = force
804 self.force = force
798 # transaction manager
805 # transaction manager
799 self.trmanager = None
806 self.trmanager = None
800 # set of common changeset between local and remote before pull
807 # set of common changeset between local and remote before pull
801 self.common = None
808 self.common = None
802 # set of pulled head
809 # set of pulled head
803 self.rheads = None
810 self.rheads = None
804 # list of missing changeset to fetch remotely
811 # list of missing changeset to fetch remotely
805 self.fetch = None
812 self.fetch = None
806 # remote bookmarks data
813 # remote bookmarks data
807 self.remotebookmarks = None
814 self.remotebookmarks = None
808 # result of changegroup pulling (used as return code by pull)
815 # result of changegroup pulling (used as return code by pull)
809 self.cgresult = None
816 self.cgresult = None
810 # list of step already done
817 # list of step already done
811 self.stepsdone = set()
818 self.stepsdone = set()
812
819
813 @util.propertycache
820 @util.propertycache
814 def pulledsubset(self):
821 def pulledsubset(self):
815 """heads of the set of changeset target by the pull"""
822 """heads of the set of changeset target by the pull"""
816 # compute target subset
823 # compute target subset
817 if self.heads is None:
824 if self.heads is None:
818 # We pulled every thing possible
825 # We pulled every thing possible
819 # sync on everything common
826 # sync on everything common
820 c = set(self.common)
827 c = set(self.common)
821 ret = list(self.common)
828 ret = list(self.common)
822 for n in self.rheads:
829 for n in self.rheads:
823 if n not in c:
830 if n not in c:
824 ret.append(n)
831 ret.append(n)
825 return ret
832 return ret
826 else:
833 else:
827 # We pulled a specific subset
834 # We pulled a specific subset
828 # sync on this subset
835 # sync on this subset
829 return self.heads
836 return self.heads
830
837
831 def gettransaction(self):
838 def gettransaction(self):
832 # deprecated; talk to trmanager directly
839 # deprecated; talk to trmanager directly
833 return self.trmanager.transaction()
840 return self.trmanager.transaction()
834
841
835 class transactionmanager(object):
842 class transactionmanager(object):
836 """An object to manage the life cycle of a transaction
843 """An object to manage the life cycle of a transaction
837
844
838 It creates the transaction on demand and calls the appropriate hooks when
845 It creates the transaction on demand and calls the appropriate hooks when
839 closing the transaction."""
846 closing the transaction."""
840 def __init__(self, repo, source, url):
847 def __init__(self, repo, source, url):
841 self.repo = repo
848 self.repo = repo
842 self.source = source
849 self.source = source
843 self.url = url
850 self.url = url
844 self._tr = None
851 self._tr = None
845
852
846 def transaction(self):
853 def transaction(self):
847 """Return an open transaction object, constructing if necessary"""
854 """Return an open transaction object, constructing if necessary"""
848 if not self._tr:
855 if not self._tr:
849 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
856 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
850 self._tr = self.repo.transaction(trname)
857 self._tr = self.repo.transaction(trname)
851 self._tr.hookargs['source'] = self.source
858 self._tr.hookargs['source'] = self.source
852 self._tr.hookargs['url'] = self.url
859 self._tr.hookargs['url'] = self.url
853 return self._tr
860 return self._tr
854
861
855 def close(self):
862 def close(self):
856 """close transaction if created"""
863 """close transaction if created"""
857 if self._tr is not None:
864 if self._tr is not None:
858 self._tr.close()
865 self._tr.close()
859
866
860 def release(self):
867 def release(self):
861 """release transaction if created"""
868 """release transaction if created"""
862 if self._tr is not None:
869 if self._tr is not None:
863 self._tr.release()
870 self._tr.release()
864
871
865 def pull(repo, remote, heads=None, force=False, bookmarks=()):
872 def pull(repo, remote, heads=None, force=False, bookmarks=()):
866 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks)
873 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks)
867 if pullop.remote.local():
874 if pullop.remote.local():
868 missing = set(pullop.remote.requirements) - pullop.repo.supported
875 missing = set(pullop.remote.requirements) - pullop.repo.supported
869 if missing:
876 if missing:
870 msg = _("required features are not"
877 msg = _("required features are not"
871 " supported in the destination:"
878 " supported in the destination:"
872 " %s") % (', '.join(sorted(missing)))
879 " %s") % (', '.join(sorted(missing)))
873 raise util.Abort(msg)
880 raise util.Abort(msg)
874
881
875 pullop.remotebookmarks = remote.listkeys('bookmarks')
882 pullop.remotebookmarks = remote.listkeys('bookmarks')
876 lock = pullop.repo.lock()
883 lock = pullop.repo.lock()
877 try:
884 try:
878 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
885 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
879 _pulldiscovery(pullop)
886 _pulldiscovery(pullop)
880 if _canusebundle2(pullop):
887 if _canusebundle2(pullop):
881 _pullbundle2(pullop)
888 _pullbundle2(pullop)
882 _pullchangeset(pullop)
889 _pullchangeset(pullop)
883 _pullphase(pullop)
890 _pullphase(pullop)
884 _pullbookmarks(pullop)
891 _pullbookmarks(pullop)
885 _pullobsolete(pullop)
892 _pullobsolete(pullop)
886 pullop.trmanager.close()
893 pullop.trmanager.close()
887 finally:
894 finally:
888 pullop.trmanager.release()
895 pullop.trmanager.release()
889 lock.release()
896 lock.release()
890
897
891 return pullop
898 return pullop
892
899
893 # list of steps to perform discovery before pull
900 # list of steps to perform discovery before pull
894 pulldiscoveryorder = []
901 pulldiscoveryorder = []
895
902
896 # Mapping between step name and function
903 # Mapping between step name and function
897 #
904 #
898 # This exists to help extensions wrap steps if necessary
905 # This exists to help extensions wrap steps if necessary
899 pulldiscoverymapping = {}
906 pulldiscoverymapping = {}
900
907
901 def pulldiscovery(stepname):
908 def pulldiscovery(stepname):
902 """decorator for function performing discovery before pull
909 """decorator for function performing discovery before pull
903
910
904 The function is added to the step -> function mapping and appended to the
911 The function is added to the step -> function mapping and appended to the
905 list of steps. Beware that decorated function will be added in order (this
912 list of steps. Beware that decorated function will be added in order (this
906 may matter).
913 may matter).
907
914
908 You can only use this decorator for a new step, if you want to wrap a step
915 You can only use this decorator for a new step, if you want to wrap a step
909 from an extension, change the pulldiscovery dictionary directly."""
916 from an extension, change the pulldiscovery dictionary directly."""
910 def dec(func):
917 def dec(func):
911 assert stepname not in pulldiscoverymapping
918 assert stepname not in pulldiscoverymapping
912 pulldiscoverymapping[stepname] = func
919 pulldiscoverymapping[stepname] = func
913 pulldiscoveryorder.append(stepname)
920 pulldiscoveryorder.append(stepname)
914 return func
921 return func
915 return dec
922 return dec
916
923
917 def _pulldiscovery(pullop):
924 def _pulldiscovery(pullop):
918 """Run all discovery steps"""
925 """Run all discovery steps"""
919 for stepname in pulldiscoveryorder:
926 for stepname in pulldiscoveryorder:
920 step = pulldiscoverymapping[stepname]
927 step = pulldiscoverymapping[stepname]
921 step(pullop)
928 step(pullop)
922
929
923 @pulldiscovery('changegroup')
930 @pulldiscovery('changegroup')
924 def _pulldiscoverychangegroup(pullop):
931 def _pulldiscoverychangegroup(pullop):
925 """discovery phase for the pull
932 """discovery phase for the pull
926
933
927 Current handle changeset discovery only, will change handle all discovery
934 Current handle changeset discovery only, will change handle all discovery
928 at some point."""
935 at some point."""
929 tmp = discovery.findcommonincoming(pullop.repo,
936 tmp = discovery.findcommonincoming(pullop.repo,
930 pullop.remote,
937 pullop.remote,
931 heads=pullop.heads,
938 heads=pullop.heads,
932 force=pullop.force)
939 force=pullop.force)
933 common, fetch, rheads = tmp
940 common, fetch, rheads = tmp
934 nm = pullop.repo.unfiltered().changelog.nodemap
941 nm = pullop.repo.unfiltered().changelog.nodemap
935 if fetch and rheads:
942 if fetch and rheads:
936 # If a remote heads in filtered locally, lets drop it from the unknown
943 # If a remote heads in filtered locally, lets drop it from the unknown
937 # remote heads and put in back in common.
944 # remote heads and put in back in common.
938 #
945 #
939 # This is a hackish solution to catch most of "common but locally
946 # This is a hackish solution to catch most of "common but locally
940 # hidden situation". We do not performs discovery on unfiltered
947 # hidden situation". We do not performs discovery on unfiltered
941 # repository because it end up doing a pathological amount of round
948 # repository because it end up doing a pathological amount of round
942 # trip for w huge amount of changeset we do not care about.
949 # trip for w huge amount of changeset we do not care about.
943 #
950 #
944 # If a set of such "common but filtered" changeset exist on the server
951 # If a set of such "common but filtered" changeset exist on the server
945 # but are not including a remote heads, we'll not be able to detect it,
952 # but are not including a remote heads, we'll not be able to detect it,
946 scommon = set(common)
953 scommon = set(common)
947 filteredrheads = []
954 filteredrheads = []
948 for n in rheads:
955 for n in rheads:
949 if n in nm:
956 if n in nm:
950 if n not in scommon:
957 if n not in scommon:
951 common.append(n)
958 common.append(n)
952 else:
959 else:
953 filteredrheads.append(n)
960 filteredrheads.append(n)
954 if not filteredrheads:
961 if not filteredrheads:
955 fetch = []
962 fetch = []
956 rheads = filteredrheads
963 rheads = filteredrheads
957 pullop.common = common
964 pullop.common = common
958 pullop.fetch = fetch
965 pullop.fetch = fetch
959 pullop.rheads = rheads
966 pullop.rheads = rheads
960
967
961 def _pullbundle2(pullop):
968 def _pullbundle2(pullop):
962 """pull data using bundle2
969 """pull data using bundle2
963
970
964 For now, the only supported data are changegroup."""
971 For now, the only supported data are changegroup."""
965 remotecaps = bundle2.bundle2caps(pullop.remote)
972 remotecaps = bundle2.bundle2caps(pullop.remote)
966 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
973 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
967 # pulling changegroup
974 # pulling changegroup
968 pullop.stepsdone.add('changegroup')
975 pullop.stepsdone.add('changegroup')
969
976
970 kwargs['common'] = pullop.common
977 kwargs['common'] = pullop.common
971 kwargs['heads'] = pullop.heads or pullop.rheads
978 kwargs['heads'] = pullop.heads or pullop.rheads
972 kwargs['cg'] = pullop.fetch
979 kwargs['cg'] = pullop.fetch
973 if 'listkeys' in remotecaps:
980 if 'listkeys' in remotecaps:
974 kwargs['listkeys'] = ['phase', 'bookmarks']
981 kwargs['listkeys'] = ['phase', 'bookmarks']
975 if not pullop.fetch:
982 if not pullop.fetch:
976 pullop.repo.ui.status(_("no changes found\n"))
983 pullop.repo.ui.status(_("no changes found\n"))
977 pullop.cgresult = 0
984 pullop.cgresult = 0
978 else:
985 else:
979 if pullop.heads is None and list(pullop.common) == [nullid]:
986 if pullop.heads is None and list(pullop.common) == [nullid]:
980 pullop.repo.ui.status(_("requesting all changes\n"))
987 pullop.repo.ui.status(_("requesting all changes\n"))
981 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
988 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
982 remoteversions = bundle2.obsmarkersversion(remotecaps)
989 remoteversions = bundle2.obsmarkersversion(remotecaps)
983 if obsolete.commonversion(remoteversions) is not None:
990 if obsolete.commonversion(remoteversions) is not None:
984 kwargs['obsmarkers'] = True
991 kwargs['obsmarkers'] = True
985 pullop.stepsdone.add('obsmarkers')
992 pullop.stepsdone.add('obsmarkers')
986 _pullbundle2extraprepare(pullop, kwargs)
993 _pullbundle2extraprepare(pullop, kwargs)
987 bundle = pullop.remote.getbundle('pull', **kwargs)
994 bundle = pullop.remote.getbundle('pull', **kwargs)
988 try:
995 try:
989 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
996 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
990 except error.BundleValueError, exc:
997 except error.BundleValueError, exc:
991 raise util.Abort('missing support for %s' % exc)
998 raise util.Abort('missing support for %s' % exc)
992
999
993 if pullop.fetch:
1000 if pullop.fetch:
994 results = [cg['return'] for cg in op.records['changegroup']]
1001 results = [cg['return'] for cg in op.records['changegroup']]
995 pullop.cgresult = changegroup.combineresults(results)
1002 pullop.cgresult = changegroup.combineresults(results)
996
1003
997 # processing phases change
1004 # processing phases change
998 for namespace, value in op.records['listkeys']:
1005 for namespace, value in op.records['listkeys']:
999 if namespace == 'phases':
1006 if namespace == 'phases':
1000 _pullapplyphases(pullop, value)
1007 _pullapplyphases(pullop, value)
1001
1008
1002 # processing bookmark update
1009 # processing bookmark update
1003 for namespace, value in op.records['listkeys']:
1010 for namespace, value in op.records['listkeys']:
1004 if namespace == 'bookmarks':
1011 if namespace == 'bookmarks':
1005 pullop.remotebookmarks = value
1012 pullop.remotebookmarks = value
1006 _pullbookmarks(pullop)
1013 _pullbookmarks(pullop)
1007
1014
1008 def _pullbundle2extraprepare(pullop, kwargs):
1015 def _pullbundle2extraprepare(pullop, kwargs):
1009 """hook function so that extensions can extend the getbundle call"""
1016 """hook function so that extensions can extend the getbundle call"""
1010 pass
1017 pass
1011
1018
1012 def _pullchangeset(pullop):
1019 def _pullchangeset(pullop):
1013 """pull changeset from unbundle into the local repo"""
1020 """pull changeset from unbundle into the local repo"""
1014 # We delay the open of the transaction as late as possible so we
1021 # We delay the open of the transaction as late as possible so we
1015 # don't open transaction for nothing or you break future useful
1022 # don't open transaction for nothing or you break future useful
1016 # rollback call
1023 # rollback call
1017 if 'changegroup' in pullop.stepsdone:
1024 if 'changegroup' in pullop.stepsdone:
1018 return
1025 return
1019 pullop.stepsdone.add('changegroup')
1026 pullop.stepsdone.add('changegroup')
1020 if not pullop.fetch:
1027 if not pullop.fetch:
1021 pullop.repo.ui.status(_("no changes found\n"))
1028 pullop.repo.ui.status(_("no changes found\n"))
1022 pullop.cgresult = 0
1029 pullop.cgresult = 0
1023 return
1030 return
1024 pullop.gettransaction()
1031 pullop.gettransaction()
1025 if pullop.heads is None and list(pullop.common) == [nullid]:
1032 if pullop.heads is None and list(pullop.common) == [nullid]:
1026 pullop.repo.ui.status(_("requesting all changes\n"))
1033 pullop.repo.ui.status(_("requesting all changes\n"))
1027 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1034 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1028 # issue1320, avoid a race if remote changed after discovery
1035 # issue1320, avoid a race if remote changed after discovery
1029 pullop.heads = pullop.rheads
1036 pullop.heads = pullop.rheads
1030
1037
1031 if pullop.remote.capable('getbundle'):
1038 if pullop.remote.capable('getbundle'):
1032 # TODO: get bundlecaps from remote
1039 # TODO: get bundlecaps from remote
1033 cg = pullop.remote.getbundle('pull', common=pullop.common,
1040 cg = pullop.remote.getbundle('pull', common=pullop.common,
1034 heads=pullop.heads or pullop.rheads)
1041 heads=pullop.heads or pullop.rheads)
1035 elif pullop.heads is None:
1042 elif pullop.heads is None:
1036 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1043 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1037 elif not pullop.remote.capable('changegroupsubset'):
1044 elif not pullop.remote.capable('changegroupsubset'):
1038 raise util.Abort(_("partial pull cannot be done because "
1045 raise util.Abort(_("partial pull cannot be done because "
1039 "other repository doesn't support "
1046 "other repository doesn't support "
1040 "changegroupsubset."))
1047 "changegroupsubset."))
1041 else:
1048 else:
1042 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1049 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1043 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1050 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1044 pullop.remote.url())
1051 pullop.remote.url())
1045
1052
1046 def _pullphase(pullop):
1053 def _pullphase(pullop):
1047 # Get remote phases data from remote
1054 # Get remote phases data from remote
1048 if 'phases' in pullop.stepsdone:
1055 if 'phases' in pullop.stepsdone:
1049 return
1056 return
1050 remotephases = pullop.remote.listkeys('phases')
1057 remotephases = pullop.remote.listkeys('phases')
1051 _pullapplyphases(pullop, remotephases)
1058 _pullapplyphases(pullop, remotephases)
1052
1059
1053 def _pullapplyphases(pullop, remotephases):
1060 def _pullapplyphases(pullop, remotephases):
1054 """apply phase movement from observed remote state"""
1061 """apply phase movement from observed remote state"""
1055 if 'phases' in pullop.stepsdone:
1062 if 'phases' in pullop.stepsdone:
1056 return
1063 return
1057 pullop.stepsdone.add('phases')
1064 pullop.stepsdone.add('phases')
1058 publishing = bool(remotephases.get('publishing', False))
1065 publishing = bool(remotephases.get('publishing', False))
1059 if remotephases and not publishing:
1066 if remotephases and not publishing:
1060 # remote is new and unpublishing
1067 # remote is new and unpublishing
1061 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1068 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1062 pullop.pulledsubset,
1069 pullop.pulledsubset,
1063 remotephases)
1070 remotephases)
1064 dheads = pullop.pulledsubset
1071 dheads = pullop.pulledsubset
1065 else:
1072 else:
1066 # Remote is old or publishing all common changesets
1073 # Remote is old or publishing all common changesets
1067 # should be seen as public
1074 # should be seen as public
1068 pheads = pullop.pulledsubset
1075 pheads = pullop.pulledsubset
1069 dheads = []
1076 dheads = []
1070 unfi = pullop.repo.unfiltered()
1077 unfi = pullop.repo.unfiltered()
1071 phase = unfi._phasecache.phase
1078 phase = unfi._phasecache.phase
1072 rev = unfi.changelog.nodemap.get
1079 rev = unfi.changelog.nodemap.get
1073 public = phases.public
1080 public = phases.public
1074 draft = phases.draft
1081 draft = phases.draft
1075
1082
1076 # exclude changesets already public locally and update the others
1083 # exclude changesets already public locally and update the others
1077 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1084 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1078 if pheads:
1085 if pheads:
1079 tr = pullop.gettransaction()
1086 tr = pullop.gettransaction()
1080 phases.advanceboundary(pullop.repo, tr, public, pheads)
1087 phases.advanceboundary(pullop.repo, tr, public, pheads)
1081
1088
1082 # exclude changesets already draft locally and update the others
1089 # exclude changesets already draft locally and update the others
1083 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1090 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1084 if dheads:
1091 if dheads:
1085 tr = pullop.gettransaction()
1092 tr = pullop.gettransaction()
1086 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1093 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1087
1094
1088 def _pullbookmarks(pullop):
1095 def _pullbookmarks(pullop):
1089 """process the remote bookmark information to update the local one"""
1096 """process the remote bookmark information to update the local one"""
1090 if 'bookmarks' in pullop.stepsdone:
1097 if 'bookmarks' in pullop.stepsdone:
1091 return
1098 return
1092 pullop.stepsdone.add('bookmarks')
1099 pullop.stepsdone.add('bookmarks')
1093 repo = pullop.repo
1100 repo = pullop.repo
1094 remotebookmarks = pullop.remotebookmarks
1101 remotebookmarks = pullop.remotebookmarks
1095 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1102 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1096 pullop.remote.url(),
1103 pullop.remote.url(),
1097 pullop.gettransaction,
1104 pullop.gettransaction,
1098 explicit=pullop.explicitbookmarks)
1105 explicit=pullop.explicitbookmarks)
1099
1106
1100 def _pullobsolete(pullop):
1107 def _pullobsolete(pullop):
1101 """utility function to pull obsolete markers from a remote
1108 """utility function to pull obsolete markers from a remote
1102
1109
1103 The `gettransaction` is function that return the pull transaction, creating
1110 The `gettransaction` is function that return the pull transaction, creating
1104 one if necessary. We return the transaction to inform the calling code that
1111 one if necessary. We return the transaction to inform the calling code that
1105 a new transaction have been created (when applicable).
1112 a new transaction have been created (when applicable).
1106
1113
1107 Exists mostly to allow overriding for experimentation purpose"""
1114 Exists mostly to allow overriding for experimentation purpose"""
1108 if 'obsmarkers' in pullop.stepsdone:
1115 if 'obsmarkers' in pullop.stepsdone:
1109 return
1116 return
1110 pullop.stepsdone.add('obsmarkers')
1117 pullop.stepsdone.add('obsmarkers')
1111 tr = None
1118 tr = None
1112 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1119 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1113 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1120 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1114 remoteobs = pullop.remote.listkeys('obsolete')
1121 remoteobs = pullop.remote.listkeys('obsolete')
1115 if 'dump0' in remoteobs:
1122 if 'dump0' in remoteobs:
1116 tr = pullop.gettransaction()
1123 tr = pullop.gettransaction()
1117 for key in sorted(remoteobs, reverse=True):
1124 for key in sorted(remoteobs, reverse=True):
1118 if key.startswith('dump'):
1125 if key.startswith('dump'):
1119 data = base85.b85decode(remoteobs[key])
1126 data = base85.b85decode(remoteobs[key])
1120 pullop.repo.obsstore.mergemarkers(tr, data)
1127 pullop.repo.obsstore.mergemarkers(tr, data)
1121 pullop.repo.invalidatevolatilesets()
1128 pullop.repo.invalidatevolatilesets()
1122 return tr
1129 return tr
1123
1130
1124 def caps20to10(repo):
1131 def caps20to10(repo):
1125 """return a set with appropriate options to use bundle20 during getbundle"""
1132 """return a set with appropriate options to use bundle20 during getbundle"""
1126 caps = set(['HG20'])
1133 caps = set(['HG20'])
1127 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1134 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1128 caps.add('bundle2=' + urllib.quote(capsblob))
1135 caps.add('bundle2=' + urllib.quote(capsblob))
1129 return caps
1136 return caps
1130
1137
1131 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1138 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1132 getbundle2partsorder = []
1139 getbundle2partsorder = []
1133
1140
1134 # Mapping between step name and function
1141 # Mapping between step name and function
1135 #
1142 #
1136 # This exists to help extensions wrap steps if necessary
1143 # This exists to help extensions wrap steps if necessary
1137 getbundle2partsmapping = {}
1144 getbundle2partsmapping = {}
1138
1145
1139 def getbundle2partsgenerator(stepname, idx=None):
1146 def getbundle2partsgenerator(stepname, idx=None):
1140 """decorator for function generating bundle2 part for getbundle
1147 """decorator for function generating bundle2 part for getbundle
1141
1148
1142 The function is added to the step -> function mapping and appended to the
1149 The function is added to the step -> function mapping and appended to the
1143 list of steps. Beware that decorated functions will be added in order
1150 list of steps. Beware that decorated functions will be added in order
1144 (this may matter).
1151 (this may matter).
1145
1152
1146 You can only use this decorator for new steps, if you want to wrap a step
1153 You can only use this decorator for new steps, if you want to wrap a step
1147 from an extension, attack the getbundle2partsmapping dictionary directly."""
1154 from an extension, attack the getbundle2partsmapping dictionary directly."""
1148 def dec(func):
1155 def dec(func):
1149 assert stepname not in getbundle2partsmapping
1156 assert stepname not in getbundle2partsmapping
1150 getbundle2partsmapping[stepname] = func
1157 getbundle2partsmapping[stepname] = func
1151 if idx is None:
1158 if idx is None:
1152 getbundle2partsorder.append(stepname)
1159 getbundle2partsorder.append(stepname)
1153 else:
1160 else:
1154 getbundle2partsorder.insert(idx, stepname)
1161 getbundle2partsorder.insert(idx, stepname)
1155 return func
1162 return func
1156 return dec
1163 return dec
1157
1164
1158 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1165 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1159 **kwargs):
1166 **kwargs):
1160 """return a full bundle (with potentially multiple kind of parts)
1167 """return a full bundle (with potentially multiple kind of parts)
1161
1168
1162 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1169 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1163 passed. For now, the bundle can contain only changegroup, but this will
1170 passed. For now, the bundle can contain only changegroup, but this will
1164 changes when more part type will be available for bundle2.
1171 changes when more part type will be available for bundle2.
1165
1172
1166 This is different from changegroup.getchangegroup that only returns an HG10
1173 This is different from changegroup.getchangegroup that only returns an HG10
1167 changegroup bundle. They may eventually get reunited in the future when we
1174 changegroup bundle. They may eventually get reunited in the future when we
1168 have a clearer idea of the API we what to query different data.
1175 have a clearer idea of the API we what to query different data.
1169
1176
1170 The implementation is at a very early stage and will get massive rework
1177 The implementation is at a very early stage and will get massive rework
1171 when the API of bundle is refined.
1178 when the API of bundle is refined.
1172 """
1179 """
1173 # bundle10 case
1180 # bundle10 case
1174 usebundle2 = False
1181 usebundle2 = False
1175 if bundlecaps is not None:
1182 if bundlecaps is not None:
1176 usebundle2 = util.any((cap.startswith('HG2') for cap in bundlecaps))
1183 usebundle2 = util.any((cap.startswith('HG2') for cap in bundlecaps))
1177 if not usebundle2:
1184 if not usebundle2:
1178 if bundlecaps and not kwargs.get('cg', True):
1185 if bundlecaps and not kwargs.get('cg', True):
1179 raise ValueError(_('request for bundle10 must include changegroup'))
1186 raise ValueError(_('request for bundle10 must include changegroup'))
1180
1187
1181 if kwargs:
1188 if kwargs:
1182 raise ValueError(_('unsupported getbundle arguments: %s')
1189 raise ValueError(_('unsupported getbundle arguments: %s')
1183 % ', '.join(sorted(kwargs.keys())))
1190 % ', '.join(sorted(kwargs.keys())))
1184 return changegroup.getchangegroup(repo, source, heads=heads,
1191 return changegroup.getchangegroup(repo, source, heads=heads,
1185 common=common, bundlecaps=bundlecaps)
1192 common=common, bundlecaps=bundlecaps)
1186
1193
1187 # bundle20 case
1194 # bundle20 case
1188 b2caps = {}
1195 b2caps = {}
1189 for bcaps in bundlecaps:
1196 for bcaps in bundlecaps:
1190 if bcaps.startswith('bundle2='):
1197 if bcaps.startswith('bundle2='):
1191 blob = urllib.unquote(bcaps[len('bundle2='):])
1198 blob = urllib.unquote(bcaps[len('bundle2='):])
1192 b2caps.update(bundle2.decodecaps(blob))
1199 b2caps.update(bundle2.decodecaps(blob))
1193 bundler = bundle2.bundle20(repo.ui, b2caps)
1200 bundler = bundle2.bundle20(repo.ui, b2caps)
1194
1201
1195 kwargs['heads'] = heads
1202 kwargs['heads'] = heads
1196 kwargs['common'] = common
1203 kwargs['common'] = common
1197
1204
1198 for name in getbundle2partsorder:
1205 for name in getbundle2partsorder:
1199 func = getbundle2partsmapping[name]
1206 func = getbundle2partsmapping[name]
1200 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1207 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1201 **kwargs)
1208 **kwargs)
1202
1209
1203 return util.chunkbuffer(bundler.getchunks())
1210 return util.chunkbuffer(bundler.getchunks())
1204
1211
1205 @getbundle2partsgenerator('changegroup')
1212 @getbundle2partsgenerator('changegroup')
1206 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1213 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1207 b2caps=None, heads=None, common=None, **kwargs):
1214 b2caps=None, heads=None, common=None, **kwargs):
1208 """add a changegroup part to the requested bundle"""
1215 """add a changegroup part to the requested bundle"""
1209 cg = None
1216 cg = None
1210 if kwargs.get('cg', True):
1217 if kwargs.get('cg', True):
1211 # build changegroup bundle here.
1218 # build changegroup bundle here.
1212 version = None
1219 version = None
1213 cgversions = b2caps.get('changegroup')
1220 cgversions = b2caps.get('changegroup')
1214 if not cgversions: # 3.1 and 3.2 ship with an empty value
1221 if not cgversions: # 3.1 and 3.2 ship with an empty value
1215 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1222 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1216 common=common,
1223 common=common,
1217 bundlecaps=bundlecaps)
1224 bundlecaps=bundlecaps)
1218 else:
1225 else:
1219 cgversions = [v for v in cgversions if v in changegroup.packermap]
1226 cgversions = [v for v in cgversions if v in changegroup.packermap]
1220 if not cgversions:
1227 if not cgversions:
1221 raise ValueError(_('no common changegroup version'))
1228 raise ValueError(_('no common changegroup version'))
1222 version = max(cgversions)
1229 version = max(cgversions)
1223 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1230 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1224 common=common,
1231 common=common,
1225 bundlecaps=bundlecaps,
1232 bundlecaps=bundlecaps,
1226 version=version)
1233 version=version)
1227
1234
1228 if cg:
1235 if cg:
1229 part = bundler.newpart('changegroup', data=cg)
1236 part = bundler.newpart('changegroup', data=cg)
1230 if version is not None:
1237 if version is not None:
1231 part.addparam('version', version)
1238 part.addparam('version', version)
1232
1239
1233 @getbundle2partsgenerator('listkeys')
1240 @getbundle2partsgenerator('listkeys')
1234 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1241 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1235 b2caps=None, **kwargs):
1242 b2caps=None, **kwargs):
1236 """add parts containing listkeys namespaces to the requested bundle"""
1243 """add parts containing listkeys namespaces to the requested bundle"""
1237 listkeys = kwargs.get('listkeys', ())
1244 listkeys = kwargs.get('listkeys', ())
1238 for namespace in listkeys:
1245 for namespace in listkeys:
1239 part = bundler.newpart('listkeys')
1246 part = bundler.newpart('listkeys')
1240 part.addparam('namespace', namespace)
1247 part.addparam('namespace', namespace)
1241 keys = repo.listkeys(namespace).items()
1248 keys = repo.listkeys(namespace).items()
1242 part.data = pushkey.encodekeys(keys)
1249 part.data = pushkey.encodekeys(keys)
1243
1250
1244 @getbundle2partsgenerator('obsmarkers')
1251 @getbundle2partsgenerator('obsmarkers')
1245 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1252 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1246 b2caps=None, heads=None, **kwargs):
1253 b2caps=None, heads=None, **kwargs):
1247 """add an obsolescence markers part to the requested bundle"""
1254 """add an obsolescence markers part to the requested bundle"""
1248 if kwargs.get('obsmarkers', False):
1255 if kwargs.get('obsmarkers', False):
1249 if heads is None:
1256 if heads is None:
1250 heads = repo.heads()
1257 heads = repo.heads()
1251 subset = [c.node() for c in repo.set('::%ln', heads)]
1258 subset = [c.node() for c in repo.set('::%ln', heads)]
1252 markers = repo.obsstore.relevantmarkers(subset)
1259 markers = repo.obsstore.relevantmarkers(subset)
1253 buildobsmarkerspart(bundler, markers)
1260 buildobsmarkerspart(bundler, markers)
1254
1261
1255 def check_heads(repo, their_heads, context):
1262 def check_heads(repo, their_heads, context):
1256 """check if the heads of a repo have been modified
1263 """check if the heads of a repo have been modified
1257
1264
1258 Used by peer for unbundling.
1265 Used by peer for unbundling.
1259 """
1266 """
1260 heads = repo.heads()
1267 heads = repo.heads()
1261 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1268 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1262 if not (their_heads == ['force'] or their_heads == heads or
1269 if not (their_heads == ['force'] or their_heads == heads or
1263 their_heads == ['hashed', heads_hash]):
1270 their_heads == ['hashed', heads_hash]):
1264 # someone else committed/pushed/unbundled while we
1271 # someone else committed/pushed/unbundled while we
1265 # were transferring data
1272 # were transferring data
1266 raise error.PushRaced('repository changed while %s - '
1273 raise error.PushRaced('repository changed while %s - '
1267 'please try again' % context)
1274 'please try again' % context)
1268
1275
1269 def unbundle(repo, cg, heads, source, url):
1276 def unbundle(repo, cg, heads, source, url):
1270 """Apply a bundle to a repo.
1277 """Apply a bundle to a repo.
1271
1278
1272 this function makes sure the repo is locked during the application and have
1279 this function makes sure the repo is locked during the application and have
1273 mechanism to check that no push race occurred between the creation of the
1280 mechanism to check that no push race occurred between the creation of the
1274 bundle and its application.
1281 bundle and its application.
1275
1282
1276 If the push was raced as PushRaced exception is raised."""
1283 If the push was raced as PushRaced exception is raised."""
1277 r = 0
1284 r = 0
1278 # need a transaction when processing a bundle2 stream
1285 # need a transaction when processing a bundle2 stream
1279 wlock = lock = tr = None
1286 wlock = lock = tr = None
1280 try:
1287 try:
1281 check_heads(repo, heads, 'uploading changes')
1288 check_heads(repo, heads, 'uploading changes')
1282 # push can proceed
1289 # push can proceed
1283 if util.safehasattr(cg, 'params'):
1290 if util.safehasattr(cg, 'params'):
1284 try:
1291 try:
1285 wlock = repo.wlock()
1292 wlock = repo.wlock()
1286 lock = repo.lock()
1293 lock = repo.lock()
1287 tr = repo.transaction(source)
1294 tr = repo.transaction(source)
1288 tr.hookargs['source'] = source
1295 tr.hookargs['source'] = source
1289 tr.hookargs['url'] = url
1296 tr.hookargs['url'] = url
1290 tr.hookargs['bundle2'] = '1'
1297 tr.hookargs['bundle2'] = '1'
1291 r = bundle2.processbundle(repo, cg, lambda: tr).reply
1298 r = bundle2.processbundle(repo, cg, lambda: tr).reply
1292 tr.close()
1299 tr.close()
1293 except Exception, exc:
1300 except Exception, exc:
1294 exc.duringunbundle2 = True
1301 exc.duringunbundle2 = True
1295 raise
1302 raise
1296 else:
1303 else:
1297 lock = repo.lock()
1304 lock = repo.lock()
1298 r = changegroup.addchangegroup(repo, cg, source, url)
1305 r = changegroup.addchangegroup(repo, cg, source, url)
1299 finally:
1306 finally:
1300 lockmod.release(tr, lock, wlock)
1307 lockmod.release(tr, lock, wlock)
1301 return r
1308 return r
@@ -1,112 +1,111 b''
1 $ cat > bundle2.py << EOF
1 $ cat > bundle2.py << EOF
2 > """A small extension to test bundle2 pushback parts.
2 > """A small extension to test bundle2 pushback parts.
3 > Current bundle2 implementation doesn't provide a way to generate those
3 > Current bundle2 implementation doesn't provide a way to generate those
4 > parts, so they must be created by extensions.
4 > parts, so they must be created by extensions.
5 > """
5 > """
6 > from mercurial import bundle2, pushkey, exchange, util
6 > from mercurial import bundle2, pushkey, exchange, util
7 > def _newhandlechangegroup(op, inpart):
7 > def _newhandlechangegroup(op, inpart):
8 > """This function wraps the changegroup part handler for getbundle.
8 > """This function wraps the changegroup part handler for getbundle.
9 > It issues an additional pushkey part to send a new
9 > It issues an additional pushkey part to send a new
10 > bookmark back to the client"""
10 > bookmark back to the client"""
11 > result = bundle2.handlechangegroup(op, inpart)
11 > result = bundle2.handlechangegroup(op, inpart)
12 > if 'pushback' in op.reply.capabilities:
12 > if 'pushback' in op.reply.capabilities:
13 > params = {'namespace': 'bookmarks',
13 > params = {'namespace': 'bookmarks',
14 > 'key': 'new-server-mark',
14 > 'key': 'new-server-mark',
15 > 'old': '',
15 > 'old': '',
16 > 'new': 'tip'}
16 > 'new': 'tip'}
17 > encodedparams = [(k, pushkey.encode(v)) for (k,v) in params.items()]
17 > encodedparams = [(k, pushkey.encode(v)) for (k,v) in params.items()]
18 > op.reply.newpart('pushkey', mandatoryparams=encodedparams)
18 > op.reply.newpart('pushkey', mandatoryparams=encodedparams)
19 > else:
19 > else:
20 > op.reply.newpart('output', data='pushback not enabled')
20 > op.reply.newpart('output', data='pushback not enabled')
21 > return result
21 > return result
22 > _newhandlechangegroup.params = bundle2.handlechangegroup.params
22 > _newhandlechangegroup.params = bundle2.handlechangegroup.params
23 > bundle2.parthandlermapping['changegroup'] = _newhandlechangegroup
23 > bundle2.parthandlermapping['changegroup'] = _newhandlechangegroup
24 > EOF
24 > EOF
25
25
26 $ cat >> $HGRCPATH <<EOF
26 $ cat >> $HGRCPATH <<EOF
27 > [ui]
27 > [ui]
28 > ssh = python "$TESTDIR/dummyssh"
28 > ssh = python "$TESTDIR/dummyssh"
29 > username = nobody <no.reply@example.com>
29 > username = nobody <no.reply@example.com>
30 >
30 >
31 > [alias]
31 > [alias]
32 > tglog = log -G -T "{desc} [{phase}:{node|short}]"
32 > tglog = log -G -T "{desc} [{phase}:{node|short}]"
33 > EOF
33 > EOF
34
34
35 Set up server repository
35 Set up server repository
36
36
37 $ hg init server
37 $ hg init server
38 $ cd server
38 $ cd server
39 $ echo c0 > f0
39 $ echo c0 > f0
40 $ hg commit -Am 0
40 $ hg commit -Am 0
41 adding f0
41 adding f0
42
42
43 Set up client repository
43 Set up client repository
44
44
45 $ cd ..
45 $ cd ..
46 $ hg clone ssh://user@dummy/server client -q
46 $ hg clone ssh://user@dummy/server client -q
47 $ cd client
47 $ cd client
48
48
49 Enable extension
49 Enable extension
50 $ cat >> $HGRCPATH <<EOF
50 $ cat >> $HGRCPATH <<EOF
51 > [extensions]
51 > [extensions]
52 > bundle2=$TESTTMP/bundle2.py
52 > bundle2=$TESTTMP/bundle2.py
53 > [experimental]
53 > [experimental]
54 > bundle2-exp = True
54 > bundle2-exp = True
55 > EOF
55 > EOF
56
56
57 Without config
57 Without config
58
58
59 $ cd ../client
59 $ cd ../client
60 $ echo c1 > f1
60 $ echo c1 > f1
61 $ hg commit -Am 1
61 $ hg commit -Am 1
62 adding f1
62 adding f1
63 $ hg push
63 $ hg push
64 pushing to ssh://user@dummy/server
64 pushing to ssh://user@dummy/server
65 searching for changes
65 searching for changes
66 remote: pushback not enabled
66 remote: pushback not enabled
67 remote: adding changesets
67 remote: adding changesets
68 remote: adding manifests
68 remote: adding manifests
69 remote: adding file changes
69 remote: adding file changes
70 remote: added 1 changesets with 1 changes to 1 files
70 remote: added 1 changesets with 1 changes to 1 files
71 $ hg bookmark
71 $ hg bookmark
72 no bookmarks set
72 no bookmarks set
73
73
74 $ cd ../server
74 $ cd ../server
75 $ hg tglog
75 $ hg tglog
76 o 1 [public:2b9c7234e035]
76 o 1 [public:2b9c7234e035]
77 |
77 |
78 @ 0 [public:6cee5c8f3e5b]
78 @ 0 [public:6cee5c8f3e5b]
79
79
80
80
81
81
82
82
83 With config
83 With config
84
84
85 $ cd ../client
85 $ cd ../client
86 $ echo '[experimental]' >> .hg/hgrc
86 $ echo '[experimental]' >> .hg/hgrc
87 $ echo 'bundle2.pushback = True' >> .hg/hgrc
87 $ echo 'bundle2.pushback = True' >> .hg/hgrc
88 $ echo c2 > f2
88 $ echo c2 > f2
89 $ hg commit -Am 2
89 $ hg commit -Am 2
90 adding f2
90 adding f2
91 $ hg push
91 $ hg push
92 pushing to ssh://user@dummy/server
92 pushing to ssh://user@dummy/server
93 searching for changes
93 searching for changes
94 "wlock" acquired after "lock" at: */mercurial/bookmarks.py:259 (pushbookmark) (glob)
95 remote: adding changesets
94 remote: adding changesets
96 remote: adding manifests
95 remote: adding manifests
97 remote: adding file changes
96 remote: adding file changes
98 remote: added 1 changesets with 1 changes to 1 files
97 remote: added 1 changesets with 1 changes to 1 files
99 $ hg bookmark
98 $ hg bookmark
100 new-server-mark 2:0a76dfb2e179
99 new-server-mark 2:0a76dfb2e179
101
100
102 $ cd ../server
101 $ cd ../server
103 $ hg tglog
102 $ hg tglog
104 o 2 [public:0a76dfb2e179]
103 o 2 [public:0a76dfb2e179]
105 |
104 |
106 o 1 [public:2b9c7234e035]
105 o 1 [public:2b9c7234e035]
107 |
106 |
108 @ 0 [public:6cee5c8f3e5b]
107 @ 0 [public:6cee5c8f3e5b]
109
108
110
109
111
110
112
111
General Comments 0
You need to be logged in to leave comments. Login now