##// END OF EJS Templates
discovery: run discovery on filtered repository...
Pierre-Yves David -
r23848:c5456b64 default
parent child Browse files
Show More
@@ -1,1294 +1,1318 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno, urllib
10 import errno, urllib
11 import util, scmutil, changegroup, base85, error
11 import util, scmutil, changegroup, base85, error
12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13
13
14 def readbundle(ui, fh, fname, vfs=None):
14 def readbundle(ui, fh, fname, vfs=None):
15 header = changegroup.readexactly(fh, 4)
15 header = changegroup.readexactly(fh, 4)
16
16
17 alg = None
17 alg = None
18 if not fname:
18 if not fname:
19 fname = "stream"
19 fname = "stream"
20 if not header.startswith('HG') and header.startswith('\0'):
20 if not header.startswith('HG') and header.startswith('\0'):
21 fh = changegroup.headerlessfixup(fh, header)
21 fh = changegroup.headerlessfixup(fh, header)
22 header = "HG10"
22 header = "HG10"
23 alg = 'UN'
23 alg = 'UN'
24 elif vfs:
24 elif vfs:
25 fname = vfs.join(fname)
25 fname = vfs.join(fname)
26
26
27 magic, version = header[0:2], header[2:4]
27 magic, version = header[0:2], header[2:4]
28
28
29 if magic != 'HG':
29 if magic != 'HG':
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 if version == '10':
31 if version == '10':
32 if alg is None:
32 if alg is None:
33 alg = changegroup.readexactly(fh, 2)
33 alg = changegroup.readexactly(fh, 2)
34 return changegroup.cg1unpacker(fh, alg)
34 return changegroup.cg1unpacker(fh, alg)
35 elif version == '2Y':
35 elif version == '2Y':
36 return bundle2.unbundle20(ui, fh, header=magic + version)
36 return bundle2.unbundle20(ui, fh, header=magic + version)
37 else:
37 else:
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39
39
40 def buildobsmarkerspart(bundler, markers):
40 def buildobsmarkerspart(bundler, markers):
41 """add an obsmarker part to the bundler with <markers>
41 """add an obsmarker part to the bundler with <markers>
42
42
43 No part is created if markers is empty.
43 No part is created if markers is empty.
44 Raises ValueError if the bundler doesn't support any known obsmarker format.
44 Raises ValueError if the bundler doesn't support any known obsmarker format.
45 """
45 """
46 if markers:
46 if markers:
47 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
47 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
48 version = obsolete.commonversion(remoteversions)
48 version = obsolete.commonversion(remoteversions)
49 if version is None:
49 if version is None:
50 raise ValueError('bundler do not support common obsmarker format')
50 raise ValueError('bundler do not support common obsmarker format')
51 stream = obsolete.encodemarkers(markers, True, version=version)
51 stream = obsolete.encodemarkers(markers, True, version=version)
52 return bundler.newpart('b2x:obsmarkers', data=stream)
52 return bundler.newpart('b2x:obsmarkers', data=stream)
53 return None
53 return None
54
54
55 class pushoperation(object):
55 class pushoperation(object):
56 """A object that represent a single push operation
56 """A object that represent a single push operation
57
57
58 It purpose is to carry push related state and very common operation.
58 It purpose is to carry push related state and very common operation.
59
59
60 A new should be created at the beginning of each push and discarded
60 A new should be created at the beginning of each push and discarded
61 afterward.
61 afterward.
62 """
62 """
63
63
64 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
64 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
65 bookmarks=()):
65 bookmarks=()):
66 # repo we push from
66 # repo we push from
67 self.repo = repo
67 self.repo = repo
68 self.ui = repo.ui
68 self.ui = repo.ui
69 # repo we push to
69 # repo we push to
70 self.remote = remote
70 self.remote = remote
71 # force option provided
71 # force option provided
72 self.force = force
72 self.force = force
73 # revs to be pushed (None is "all")
73 # revs to be pushed (None is "all")
74 self.revs = revs
74 self.revs = revs
75 # bookmark explicitly pushed
75 # bookmark explicitly pushed
76 self.bookmarks = bookmarks
76 self.bookmarks = bookmarks
77 # allow push of new branch
77 # allow push of new branch
78 self.newbranch = newbranch
78 self.newbranch = newbranch
79 # did a local lock get acquired?
79 # did a local lock get acquired?
80 self.locallocked = None
80 self.locallocked = None
81 # step already performed
81 # step already performed
82 # (used to check what steps have been already performed through bundle2)
82 # (used to check what steps have been already performed through bundle2)
83 self.stepsdone = set()
83 self.stepsdone = set()
84 # Integer version of the changegroup push result
84 # Integer version of the changegroup push result
85 # - None means nothing to push
85 # - None means nothing to push
86 # - 0 means HTTP error
86 # - 0 means HTTP error
87 # - 1 means we pushed and remote head count is unchanged *or*
87 # - 1 means we pushed and remote head count is unchanged *or*
88 # we have outgoing changesets but refused to push
88 # we have outgoing changesets but refused to push
89 # - other values as described by addchangegroup()
89 # - other values as described by addchangegroup()
90 self.cgresult = None
90 self.cgresult = None
91 # Boolean value for the bookmark push
91 # Boolean value for the bookmark push
92 self.bkresult = None
92 self.bkresult = None
93 # discover.outgoing object (contains common and outgoing data)
93 # discover.outgoing object (contains common and outgoing data)
94 self.outgoing = None
94 self.outgoing = None
95 # all remote heads before the push
95 # all remote heads before the push
96 self.remoteheads = None
96 self.remoteheads = None
97 # testable as a boolean indicating if any nodes are missing locally.
97 # testable as a boolean indicating if any nodes are missing locally.
98 self.incoming = None
98 self.incoming = None
99 # phases changes that must be pushed along side the changesets
99 # phases changes that must be pushed along side the changesets
100 self.outdatedphases = None
100 self.outdatedphases = None
101 # phases changes that must be pushed if changeset push fails
101 # phases changes that must be pushed if changeset push fails
102 self.fallbackoutdatedphases = None
102 self.fallbackoutdatedphases = None
103 # outgoing obsmarkers
103 # outgoing obsmarkers
104 self.outobsmarkers = set()
104 self.outobsmarkers = set()
105 # outgoing bookmarks
105 # outgoing bookmarks
106 self.outbookmarks = []
106 self.outbookmarks = []
107 # transaction manager
107 # transaction manager
108 self.trmanager = None
108 self.trmanager = None
109
109
110 @util.propertycache
110 @util.propertycache
111 def futureheads(self):
111 def futureheads(self):
112 """future remote heads if the changeset push succeeds"""
112 """future remote heads if the changeset push succeeds"""
113 return self.outgoing.missingheads
113 return self.outgoing.missingheads
114
114
115 @util.propertycache
115 @util.propertycache
116 def fallbackheads(self):
116 def fallbackheads(self):
117 """future remote heads if the changeset push fails"""
117 """future remote heads if the changeset push fails"""
118 if self.revs is None:
118 if self.revs is None:
119 # not target to push, all common are relevant
119 # not target to push, all common are relevant
120 return self.outgoing.commonheads
120 return self.outgoing.commonheads
121 unfi = self.repo.unfiltered()
121 unfi = self.repo.unfiltered()
122 # I want cheads = heads(::missingheads and ::commonheads)
122 # I want cheads = heads(::missingheads and ::commonheads)
123 # (missingheads is revs with secret changeset filtered out)
123 # (missingheads is revs with secret changeset filtered out)
124 #
124 #
125 # This can be expressed as:
125 # This can be expressed as:
126 # cheads = ( (missingheads and ::commonheads)
126 # cheads = ( (missingheads and ::commonheads)
127 # + (commonheads and ::missingheads))"
127 # + (commonheads and ::missingheads))"
128 # )
128 # )
129 #
129 #
130 # while trying to push we already computed the following:
130 # while trying to push we already computed the following:
131 # common = (::commonheads)
131 # common = (::commonheads)
132 # missing = ((commonheads::missingheads) - commonheads)
132 # missing = ((commonheads::missingheads) - commonheads)
133 #
133 #
134 # We can pick:
134 # We can pick:
135 # * missingheads part of common (::commonheads)
135 # * missingheads part of common (::commonheads)
136 common = set(self.outgoing.common)
136 common = set(self.outgoing.common)
137 nm = self.repo.changelog.nodemap
137 nm = self.repo.changelog.nodemap
138 cheads = [node for node in self.revs if nm[node] in common]
138 cheads = [node for node in self.revs if nm[node] in common]
139 # and
139 # and
140 # * commonheads parents on missing
140 # * commonheads parents on missing
141 revset = unfi.set('%ln and parents(roots(%ln))',
141 revset = unfi.set('%ln and parents(roots(%ln))',
142 self.outgoing.commonheads,
142 self.outgoing.commonheads,
143 self.outgoing.missing)
143 self.outgoing.missing)
144 cheads.extend(c.node() for c in revset)
144 cheads.extend(c.node() for c in revset)
145 return cheads
145 return cheads
146
146
147 @property
147 @property
148 def commonheads(self):
148 def commonheads(self):
149 """set of all common heads after changeset bundle push"""
149 """set of all common heads after changeset bundle push"""
150 if self.cgresult:
150 if self.cgresult:
151 return self.futureheads
151 return self.futureheads
152 else:
152 else:
153 return self.fallbackheads
153 return self.fallbackheads
154
154
155 # mapping of message used when pushing bookmark
155 # mapping of message used when pushing bookmark
156 bookmsgmap = {'update': (_("updating bookmark %s\n"),
156 bookmsgmap = {'update': (_("updating bookmark %s\n"),
157 _('updating bookmark %s failed!\n')),
157 _('updating bookmark %s failed!\n')),
158 'export': (_("exporting bookmark %s\n"),
158 'export': (_("exporting bookmark %s\n"),
159 _('exporting bookmark %s failed!\n')),
159 _('exporting bookmark %s failed!\n')),
160 'delete': (_("deleting remote bookmark %s\n"),
160 'delete': (_("deleting remote bookmark %s\n"),
161 _('deleting remote bookmark %s failed!\n')),
161 _('deleting remote bookmark %s failed!\n')),
162 }
162 }
163
163
164
164
165 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
165 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
166 '''Push outgoing changesets (limited by revs) from a local
166 '''Push outgoing changesets (limited by revs) from a local
167 repository to remote. Return an integer:
167 repository to remote. Return an integer:
168 - None means nothing to push
168 - None means nothing to push
169 - 0 means HTTP error
169 - 0 means HTTP error
170 - 1 means we pushed and remote head count is unchanged *or*
170 - 1 means we pushed and remote head count is unchanged *or*
171 we have outgoing changesets but refused to push
171 we have outgoing changesets but refused to push
172 - other values as described by addchangegroup()
172 - other values as described by addchangegroup()
173 '''
173 '''
174 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
174 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
175 if pushop.remote.local():
175 if pushop.remote.local():
176 missing = (set(pushop.repo.requirements)
176 missing = (set(pushop.repo.requirements)
177 - pushop.remote.local().supported)
177 - pushop.remote.local().supported)
178 if missing:
178 if missing:
179 msg = _("required features are not"
179 msg = _("required features are not"
180 " supported in the destination:"
180 " supported in the destination:"
181 " %s") % (', '.join(sorted(missing)))
181 " %s") % (', '.join(sorted(missing)))
182 raise util.Abort(msg)
182 raise util.Abort(msg)
183
183
184 # there are two ways to push to remote repo:
184 # there are two ways to push to remote repo:
185 #
185 #
186 # addchangegroup assumes local user can lock remote
186 # addchangegroup assumes local user can lock remote
187 # repo (local filesystem, old ssh servers).
187 # repo (local filesystem, old ssh servers).
188 #
188 #
189 # unbundle assumes local user cannot lock remote repo (new ssh
189 # unbundle assumes local user cannot lock remote repo (new ssh
190 # servers, http servers).
190 # servers, http servers).
191
191
192 if not pushop.remote.canpush():
192 if not pushop.remote.canpush():
193 raise util.Abort(_("destination does not support push"))
193 raise util.Abort(_("destination does not support push"))
194 # get local lock as we might write phase data
194 # get local lock as we might write phase data
195 locallock = None
195 locallock = None
196 try:
196 try:
197 locallock = pushop.repo.lock()
197 locallock = pushop.repo.lock()
198 pushop.locallocked = True
198 pushop.locallocked = True
199 except IOError, err:
199 except IOError, err:
200 pushop.locallocked = False
200 pushop.locallocked = False
201 if err.errno != errno.EACCES:
201 if err.errno != errno.EACCES:
202 raise
202 raise
203 # source repo cannot be locked.
203 # source repo cannot be locked.
204 # We do not abort the push, but just disable the local phase
204 # We do not abort the push, but just disable the local phase
205 # synchronisation.
205 # synchronisation.
206 msg = 'cannot lock source repository: %s\n' % err
206 msg = 'cannot lock source repository: %s\n' % err
207 pushop.ui.debug(msg)
207 pushop.ui.debug(msg)
208 try:
208 try:
209 if pushop.locallocked:
209 if pushop.locallocked:
210 pushop.trmanager = transactionmanager(repo,
210 pushop.trmanager = transactionmanager(repo,
211 'push-response',
211 'push-response',
212 pushop.remote.url())
212 pushop.remote.url())
213 pushop.repo.checkpush(pushop)
213 pushop.repo.checkpush(pushop)
214 lock = None
214 lock = None
215 unbundle = pushop.remote.capable('unbundle')
215 unbundle = pushop.remote.capable('unbundle')
216 if not unbundle:
216 if not unbundle:
217 lock = pushop.remote.lock()
217 lock = pushop.remote.lock()
218 try:
218 try:
219 _pushdiscovery(pushop)
219 _pushdiscovery(pushop)
220 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
220 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
221 False)
221 False)
222 and pushop.remote.capable('bundle2-exp')):
222 and pushop.remote.capable('bundle2-exp')):
223 _pushbundle2(pushop)
223 _pushbundle2(pushop)
224 _pushchangeset(pushop)
224 _pushchangeset(pushop)
225 _pushsyncphase(pushop)
225 _pushsyncphase(pushop)
226 _pushobsolete(pushop)
226 _pushobsolete(pushop)
227 _pushbookmark(pushop)
227 _pushbookmark(pushop)
228 finally:
228 finally:
229 if lock is not None:
229 if lock is not None:
230 lock.release()
230 lock.release()
231 if pushop.trmanager:
231 if pushop.trmanager:
232 pushop.trmanager.close()
232 pushop.trmanager.close()
233 finally:
233 finally:
234 if pushop.trmanager:
234 if pushop.trmanager:
235 pushop.trmanager.release()
235 pushop.trmanager.release()
236 if locallock is not None:
236 if locallock is not None:
237 locallock.release()
237 locallock.release()
238
238
239 return pushop
239 return pushop
240
240
241 # list of steps to perform discovery before push
241 # list of steps to perform discovery before push
242 pushdiscoveryorder = []
242 pushdiscoveryorder = []
243
243
244 # Mapping between step name and function
244 # Mapping between step name and function
245 #
245 #
246 # This exists to help extensions wrap steps if necessary
246 # This exists to help extensions wrap steps if necessary
247 pushdiscoverymapping = {}
247 pushdiscoverymapping = {}
248
248
249 def pushdiscovery(stepname):
249 def pushdiscovery(stepname):
250 """decorator for function performing discovery before push
250 """decorator for function performing discovery before push
251
251
252 The function is added to the step -> function mapping and appended to the
252 The function is added to the step -> function mapping and appended to the
253 list of steps. Beware that decorated function will be added in order (this
253 list of steps. Beware that decorated function will be added in order (this
254 may matter).
254 may matter).
255
255
256 You can only use this decorator for a new step, if you want to wrap a step
256 You can only use this decorator for a new step, if you want to wrap a step
257 from an extension, change the pushdiscovery dictionary directly."""
257 from an extension, change the pushdiscovery dictionary directly."""
258 def dec(func):
258 def dec(func):
259 assert stepname not in pushdiscoverymapping
259 assert stepname not in pushdiscoverymapping
260 pushdiscoverymapping[stepname] = func
260 pushdiscoverymapping[stepname] = func
261 pushdiscoveryorder.append(stepname)
261 pushdiscoveryorder.append(stepname)
262 return func
262 return func
263 return dec
263 return dec
264
264
265 def _pushdiscovery(pushop):
265 def _pushdiscovery(pushop):
266 """Run all discovery steps"""
266 """Run all discovery steps"""
267 for stepname in pushdiscoveryorder:
267 for stepname in pushdiscoveryorder:
268 step = pushdiscoverymapping[stepname]
268 step = pushdiscoverymapping[stepname]
269 step(pushop)
269 step(pushop)
270
270
271 @pushdiscovery('changeset')
271 @pushdiscovery('changeset')
272 def _pushdiscoverychangeset(pushop):
272 def _pushdiscoverychangeset(pushop):
273 """discover the changeset that need to be pushed"""
273 """discover the changeset that need to be pushed"""
274 unfi = pushop.repo.unfiltered()
275 fci = discovery.findcommonincoming
274 fci = discovery.findcommonincoming
276 commoninc = fci(unfi, pushop.remote, force=pushop.force)
275 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
277 common, inc, remoteheads = commoninc
276 common, inc, remoteheads = commoninc
278 fco = discovery.findcommonoutgoing
277 fco = discovery.findcommonoutgoing
279 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
278 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
280 commoninc=commoninc, force=pushop.force)
279 commoninc=commoninc, force=pushop.force)
281 pushop.outgoing = outgoing
280 pushop.outgoing = outgoing
282 pushop.remoteheads = remoteheads
281 pushop.remoteheads = remoteheads
283 pushop.incoming = inc
282 pushop.incoming = inc
284
283
285 @pushdiscovery('phase')
284 @pushdiscovery('phase')
286 def _pushdiscoveryphase(pushop):
285 def _pushdiscoveryphase(pushop):
287 """discover the phase that needs to be pushed
286 """discover the phase that needs to be pushed
288
287
289 (computed for both success and failure case for changesets push)"""
288 (computed for both success and failure case for changesets push)"""
290 outgoing = pushop.outgoing
289 outgoing = pushop.outgoing
291 unfi = pushop.repo.unfiltered()
290 unfi = pushop.repo.unfiltered()
292 remotephases = pushop.remote.listkeys('phases')
291 remotephases = pushop.remote.listkeys('phases')
293 publishing = remotephases.get('publishing', False)
292 publishing = remotephases.get('publishing', False)
294 ana = phases.analyzeremotephases(pushop.repo,
293 ana = phases.analyzeremotephases(pushop.repo,
295 pushop.fallbackheads,
294 pushop.fallbackheads,
296 remotephases)
295 remotephases)
297 pheads, droots = ana
296 pheads, droots = ana
298 extracond = ''
297 extracond = ''
299 if not publishing:
298 if not publishing:
300 extracond = ' and public()'
299 extracond = ' and public()'
301 revset = 'heads((%%ln::%%ln) %s)' % extracond
300 revset = 'heads((%%ln::%%ln) %s)' % extracond
302 # Get the list of all revs draft on remote by public here.
301 # Get the list of all revs draft on remote by public here.
303 # XXX Beware that revset break if droots is not strictly
302 # XXX Beware that revset break if droots is not strictly
304 # XXX root we may want to ensure it is but it is costly
303 # XXX root we may want to ensure it is but it is costly
305 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
304 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
306 if not outgoing.missing:
305 if not outgoing.missing:
307 future = fallback
306 future = fallback
308 else:
307 else:
309 # adds changeset we are going to push as draft
308 # adds changeset we are going to push as draft
310 #
309 #
311 # should not be necessary for publishing server, but because of an
310 # should not be necessary for publishing server, but because of an
312 # issue fixed in xxxxx we have to do it anyway.
311 # issue fixed in xxxxx we have to do it anyway.
313 fdroots = list(unfi.set('roots(%ln + %ln::)',
312 fdroots = list(unfi.set('roots(%ln + %ln::)',
314 outgoing.missing, droots))
313 outgoing.missing, droots))
315 fdroots = [f.node() for f in fdroots]
314 fdroots = [f.node() for f in fdroots]
316 future = list(unfi.set(revset, fdroots, pushop.futureheads))
315 future = list(unfi.set(revset, fdroots, pushop.futureheads))
317 pushop.outdatedphases = future
316 pushop.outdatedphases = future
318 pushop.fallbackoutdatedphases = fallback
317 pushop.fallbackoutdatedphases = fallback
319
318
320 @pushdiscovery('obsmarker')
319 @pushdiscovery('obsmarker')
321 def _pushdiscoveryobsmarkers(pushop):
320 def _pushdiscoveryobsmarkers(pushop):
322 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
321 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
323 and pushop.repo.obsstore
322 and pushop.repo.obsstore
324 and 'obsolete' in pushop.remote.listkeys('namespaces')):
323 and 'obsolete' in pushop.remote.listkeys('namespaces')):
325 repo = pushop.repo
324 repo = pushop.repo
326 # very naive computation, that can be quite expensive on big repo.
325 # very naive computation, that can be quite expensive on big repo.
327 # However: evolution is currently slow on them anyway.
326 # However: evolution is currently slow on them anyway.
328 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
327 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
329 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
328 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
330
329
331 @pushdiscovery('bookmarks')
330 @pushdiscovery('bookmarks')
332 def _pushdiscoverybookmarks(pushop):
331 def _pushdiscoverybookmarks(pushop):
333 ui = pushop.ui
332 ui = pushop.ui
334 repo = pushop.repo.unfiltered()
333 repo = pushop.repo.unfiltered()
335 remote = pushop.remote
334 remote = pushop.remote
336 ui.debug("checking for updated bookmarks\n")
335 ui.debug("checking for updated bookmarks\n")
337 ancestors = ()
336 ancestors = ()
338 if pushop.revs:
337 if pushop.revs:
339 revnums = map(repo.changelog.rev, pushop.revs)
338 revnums = map(repo.changelog.rev, pushop.revs)
340 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
339 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
341 remotebookmark = remote.listkeys('bookmarks')
340 remotebookmark = remote.listkeys('bookmarks')
342
341
343 explicit = set(pushop.bookmarks)
342 explicit = set(pushop.bookmarks)
344
343
345 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
344 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
346 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
345 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
347 for b, scid, dcid in advsrc:
346 for b, scid, dcid in advsrc:
348 if b in explicit:
347 if b in explicit:
349 explicit.remove(b)
348 explicit.remove(b)
350 if not ancestors or repo[scid].rev() in ancestors:
349 if not ancestors or repo[scid].rev() in ancestors:
351 pushop.outbookmarks.append((b, dcid, scid))
350 pushop.outbookmarks.append((b, dcid, scid))
352 # search added bookmark
351 # search added bookmark
353 for b, scid, dcid in addsrc:
352 for b, scid, dcid in addsrc:
354 if b in explicit:
353 if b in explicit:
355 explicit.remove(b)
354 explicit.remove(b)
356 pushop.outbookmarks.append((b, '', scid))
355 pushop.outbookmarks.append((b, '', scid))
357 # search for overwritten bookmark
356 # search for overwritten bookmark
358 for b, scid, dcid in advdst + diverge + differ:
357 for b, scid, dcid in advdst + diverge + differ:
359 if b in explicit:
358 if b in explicit:
360 explicit.remove(b)
359 explicit.remove(b)
361 pushop.outbookmarks.append((b, dcid, scid))
360 pushop.outbookmarks.append((b, dcid, scid))
362 # search for bookmark to delete
361 # search for bookmark to delete
363 for b, scid, dcid in adddst:
362 for b, scid, dcid in adddst:
364 if b in explicit:
363 if b in explicit:
365 explicit.remove(b)
364 explicit.remove(b)
366 # treat as "deleted locally"
365 # treat as "deleted locally"
367 pushop.outbookmarks.append((b, dcid, ''))
366 pushop.outbookmarks.append((b, dcid, ''))
368 # identical bookmarks shouldn't get reported
367 # identical bookmarks shouldn't get reported
369 for b, scid, dcid in same:
368 for b, scid, dcid in same:
370 if b in explicit:
369 if b in explicit:
371 explicit.remove(b)
370 explicit.remove(b)
372
371
373 if explicit:
372 if explicit:
374 explicit = sorted(explicit)
373 explicit = sorted(explicit)
375 # we should probably list all of them
374 # we should probably list all of them
376 ui.warn(_('bookmark %s does not exist on the local '
375 ui.warn(_('bookmark %s does not exist on the local '
377 'or remote repository!\n') % explicit[0])
376 'or remote repository!\n') % explicit[0])
378 pushop.bkresult = 2
377 pushop.bkresult = 2
379
378
380 pushop.outbookmarks.sort()
379 pushop.outbookmarks.sort()
381
380
382 def _pushcheckoutgoing(pushop):
381 def _pushcheckoutgoing(pushop):
383 outgoing = pushop.outgoing
382 outgoing = pushop.outgoing
384 unfi = pushop.repo.unfiltered()
383 unfi = pushop.repo.unfiltered()
385 if not outgoing.missing:
384 if not outgoing.missing:
386 # nothing to push
385 # nothing to push
387 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
386 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
388 return False
387 return False
389 # something to push
388 # something to push
390 if not pushop.force:
389 if not pushop.force:
391 # if repo.obsstore == False --> no obsolete
390 # if repo.obsstore == False --> no obsolete
392 # then, save the iteration
391 # then, save the iteration
393 if unfi.obsstore:
392 if unfi.obsstore:
394 # this message are here for 80 char limit reason
393 # this message are here for 80 char limit reason
395 mso = _("push includes obsolete changeset: %s!")
394 mso = _("push includes obsolete changeset: %s!")
396 mst = {"unstable": _("push includes unstable changeset: %s!"),
395 mst = {"unstable": _("push includes unstable changeset: %s!"),
397 "bumped": _("push includes bumped changeset: %s!"),
396 "bumped": _("push includes bumped changeset: %s!"),
398 "divergent": _("push includes divergent changeset: %s!")}
397 "divergent": _("push includes divergent changeset: %s!")}
399 # If we are to push if there is at least one
398 # If we are to push if there is at least one
400 # obsolete or unstable changeset in missing, at
399 # obsolete or unstable changeset in missing, at
401 # least one of the missinghead will be obsolete or
400 # least one of the missinghead will be obsolete or
402 # unstable. So checking heads only is ok
401 # unstable. So checking heads only is ok
403 for node in outgoing.missingheads:
402 for node in outgoing.missingheads:
404 ctx = unfi[node]
403 ctx = unfi[node]
405 if ctx.obsolete():
404 if ctx.obsolete():
406 raise util.Abort(mso % ctx)
405 raise util.Abort(mso % ctx)
407 elif ctx.troubled():
406 elif ctx.troubled():
408 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
407 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
409 newbm = pushop.ui.configlist('bookmarks', 'pushing')
408 newbm = pushop.ui.configlist('bookmarks', 'pushing')
410 discovery.checkheads(unfi, pushop.remote, outgoing,
409 discovery.checkheads(unfi, pushop.remote, outgoing,
411 pushop.remoteheads,
410 pushop.remoteheads,
412 pushop.newbranch,
411 pushop.newbranch,
413 bool(pushop.incoming),
412 bool(pushop.incoming),
414 newbm)
413 newbm)
415 return True
414 return True
416
415
417 # List of names of steps to perform for an outgoing bundle2, order matters.
416 # List of names of steps to perform for an outgoing bundle2, order matters.
418 b2partsgenorder = []
417 b2partsgenorder = []
419
418
420 # Mapping between step name and function
419 # Mapping between step name and function
421 #
420 #
422 # This exists to help extensions wrap steps if necessary
421 # This exists to help extensions wrap steps if necessary
423 b2partsgenmapping = {}
422 b2partsgenmapping = {}
424
423
425 def b2partsgenerator(stepname):
424 def b2partsgenerator(stepname):
426 """decorator for function generating bundle2 part
425 """decorator for function generating bundle2 part
427
426
428 The function is added to the step -> function mapping and appended to the
427 The function is added to the step -> function mapping and appended to the
429 list of steps. Beware that decorated functions will be added in order
428 list of steps. Beware that decorated functions will be added in order
430 (this may matter).
429 (this may matter).
431
430
432 You can only use this decorator for new steps, if you want to wrap a step
431 You can only use this decorator for new steps, if you want to wrap a step
433 from an extension, attack the b2partsgenmapping dictionary directly."""
432 from an extension, attack the b2partsgenmapping dictionary directly."""
434 def dec(func):
433 def dec(func):
435 assert stepname not in b2partsgenmapping
434 assert stepname not in b2partsgenmapping
436 b2partsgenmapping[stepname] = func
435 b2partsgenmapping[stepname] = func
437 b2partsgenorder.append(stepname)
436 b2partsgenorder.append(stepname)
438 return func
437 return func
439 return dec
438 return dec
440
439
441 @b2partsgenerator('changeset')
440 @b2partsgenerator('changeset')
442 def _pushb2ctx(pushop, bundler):
441 def _pushb2ctx(pushop, bundler):
443 """handle changegroup push through bundle2
442 """handle changegroup push through bundle2
444
443
445 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
444 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
446 """
445 """
447 if 'changesets' in pushop.stepsdone:
446 if 'changesets' in pushop.stepsdone:
448 return
447 return
449 pushop.stepsdone.add('changesets')
448 pushop.stepsdone.add('changesets')
450 # Send known heads to the server for race detection.
449 # Send known heads to the server for race detection.
451 if not _pushcheckoutgoing(pushop):
450 if not _pushcheckoutgoing(pushop):
452 return
451 return
453 pushop.repo.prepushoutgoinghooks(pushop.repo,
452 pushop.repo.prepushoutgoinghooks(pushop.repo,
454 pushop.remote,
453 pushop.remote,
455 pushop.outgoing)
454 pushop.outgoing)
456 if not pushop.force:
455 if not pushop.force:
457 bundler.newpart('b2x:check:heads', data=iter(pushop.remoteheads))
456 bundler.newpart('b2x:check:heads', data=iter(pushop.remoteheads))
458 b2caps = bundle2.bundle2caps(pushop.remote)
457 b2caps = bundle2.bundle2caps(pushop.remote)
459 version = None
458 version = None
460 cgversions = b2caps.get('b2x:changegroup')
459 cgversions = b2caps.get('b2x:changegroup')
461 if not cgversions: # 3.1 and 3.2 ship with an empty value
460 if not cgversions: # 3.1 and 3.2 ship with an empty value
462 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
461 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
463 pushop.outgoing)
462 pushop.outgoing)
464 else:
463 else:
465 cgversions = [v for v in cgversions if v in changegroup.packermap]
464 cgversions = [v for v in cgversions if v in changegroup.packermap]
466 if not cgversions:
465 if not cgversions:
467 raise ValueError(_('no common changegroup version'))
466 raise ValueError(_('no common changegroup version'))
468 version = max(cgversions)
467 version = max(cgversions)
469 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
468 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
470 pushop.outgoing,
469 pushop.outgoing,
471 version=version)
470 version=version)
472 cgpart = bundler.newpart('b2x:changegroup', data=cg)
471 cgpart = bundler.newpart('b2x:changegroup', data=cg)
473 if version is not None:
472 if version is not None:
474 cgpart.addparam('version', version)
473 cgpart.addparam('version', version)
475 def handlereply(op):
474 def handlereply(op):
476 """extract addchangegroup returns from server reply"""
475 """extract addchangegroup returns from server reply"""
477 cgreplies = op.records.getreplies(cgpart.id)
476 cgreplies = op.records.getreplies(cgpart.id)
478 assert len(cgreplies['changegroup']) == 1
477 assert len(cgreplies['changegroup']) == 1
479 pushop.cgresult = cgreplies['changegroup'][0]['return']
478 pushop.cgresult = cgreplies['changegroup'][0]['return']
480 return handlereply
479 return handlereply
481
480
482 @b2partsgenerator('phase')
481 @b2partsgenerator('phase')
483 def _pushb2phases(pushop, bundler):
482 def _pushb2phases(pushop, bundler):
484 """handle phase push through bundle2"""
483 """handle phase push through bundle2"""
485 if 'phases' in pushop.stepsdone:
484 if 'phases' in pushop.stepsdone:
486 return
485 return
487 b2caps = bundle2.bundle2caps(pushop.remote)
486 b2caps = bundle2.bundle2caps(pushop.remote)
488 if not 'b2x:pushkey' in b2caps:
487 if not 'b2x:pushkey' in b2caps:
489 return
488 return
490 pushop.stepsdone.add('phases')
489 pushop.stepsdone.add('phases')
491 part2node = []
490 part2node = []
492 enc = pushkey.encode
491 enc = pushkey.encode
493 for newremotehead in pushop.outdatedphases:
492 for newremotehead in pushop.outdatedphases:
494 part = bundler.newpart('b2x:pushkey')
493 part = bundler.newpart('b2x:pushkey')
495 part.addparam('namespace', enc('phases'))
494 part.addparam('namespace', enc('phases'))
496 part.addparam('key', enc(newremotehead.hex()))
495 part.addparam('key', enc(newremotehead.hex()))
497 part.addparam('old', enc(str(phases.draft)))
496 part.addparam('old', enc(str(phases.draft)))
498 part.addparam('new', enc(str(phases.public)))
497 part.addparam('new', enc(str(phases.public)))
499 part2node.append((part.id, newremotehead))
498 part2node.append((part.id, newremotehead))
500 def handlereply(op):
499 def handlereply(op):
501 for partid, node in part2node:
500 for partid, node in part2node:
502 partrep = op.records.getreplies(partid)
501 partrep = op.records.getreplies(partid)
503 results = partrep['pushkey']
502 results = partrep['pushkey']
504 assert len(results) <= 1
503 assert len(results) <= 1
505 msg = None
504 msg = None
506 if not results:
505 if not results:
507 msg = _('server ignored update of %s to public!\n') % node
506 msg = _('server ignored update of %s to public!\n') % node
508 elif not int(results[0]['return']):
507 elif not int(results[0]['return']):
509 msg = _('updating %s to public failed!\n') % node
508 msg = _('updating %s to public failed!\n') % node
510 if msg is not None:
509 if msg is not None:
511 pushop.ui.warn(msg)
510 pushop.ui.warn(msg)
512 return handlereply
511 return handlereply
513
512
514 @b2partsgenerator('obsmarkers')
513 @b2partsgenerator('obsmarkers')
515 def _pushb2obsmarkers(pushop, bundler):
514 def _pushb2obsmarkers(pushop, bundler):
516 if 'obsmarkers' in pushop.stepsdone:
515 if 'obsmarkers' in pushop.stepsdone:
517 return
516 return
518 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
517 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
519 if obsolete.commonversion(remoteversions) is None:
518 if obsolete.commonversion(remoteversions) is None:
520 return
519 return
521 pushop.stepsdone.add('obsmarkers')
520 pushop.stepsdone.add('obsmarkers')
522 if pushop.outobsmarkers:
521 if pushop.outobsmarkers:
523 buildobsmarkerspart(bundler, pushop.outobsmarkers)
522 buildobsmarkerspart(bundler, pushop.outobsmarkers)
524
523
525 @b2partsgenerator('bookmarks')
524 @b2partsgenerator('bookmarks')
526 def _pushb2bookmarks(pushop, bundler):
525 def _pushb2bookmarks(pushop, bundler):
527 """handle phase push through bundle2"""
526 """handle phase push through bundle2"""
528 if 'bookmarks' in pushop.stepsdone:
527 if 'bookmarks' in pushop.stepsdone:
529 return
528 return
530 b2caps = bundle2.bundle2caps(pushop.remote)
529 b2caps = bundle2.bundle2caps(pushop.remote)
531 if 'b2x:pushkey' not in b2caps:
530 if 'b2x:pushkey' not in b2caps:
532 return
531 return
533 pushop.stepsdone.add('bookmarks')
532 pushop.stepsdone.add('bookmarks')
534 part2book = []
533 part2book = []
535 enc = pushkey.encode
534 enc = pushkey.encode
536 for book, old, new in pushop.outbookmarks:
535 for book, old, new in pushop.outbookmarks:
537 part = bundler.newpart('b2x:pushkey')
536 part = bundler.newpart('b2x:pushkey')
538 part.addparam('namespace', enc('bookmarks'))
537 part.addparam('namespace', enc('bookmarks'))
539 part.addparam('key', enc(book))
538 part.addparam('key', enc(book))
540 part.addparam('old', enc(old))
539 part.addparam('old', enc(old))
541 part.addparam('new', enc(new))
540 part.addparam('new', enc(new))
542 action = 'update'
541 action = 'update'
543 if not old:
542 if not old:
544 action = 'export'
543 action = 'export'
545 elif not new:
544 elif not new:
546 action = 'delete'
545 action = 'delete'
547 part2book.append((part.id, book, action))
546 part2book.append((part.id, book, action))
548
547
549
548
550 def handlereply(op):
549 def handlereply(op):
551 ui = pushop.ui
550 ui = pushop.ui
552 for partid, book, action in part2book:
551 for partid, book, action in part2book:
553 partrep = op.records.getreplies(partid)
552 partrep = op.records.getreplies(partid)
554 results = partrep['pushkey']
553 results = partrep['pushkey']
555 assert len(results) <= 1
554 assert len(results) <= 1
556 if not results:
555 if not results:
557 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
556 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
558 else:
557 else:
559 ret = int(results[0]['return'])
558 ret = int(results[0]['return'])
560 if ret:
559 if ret:
561 ui.status(bookmsgmap[action][0] % book)
560 ui.status(bookmsgmap[action][0] % book)
562 else:
561 else:
563 ui.warn(bookmsgmap[action][1] % book)
562 ui.warn(bookmsgmap[action][1] % book)
564 if pushop.bkresult is not None:
563 if pushop.bkresult is not None:
565 pushop.bkresult = 1
564 pushop.bkresult = 1
566 return handlereply
565 return handlereply
567
566
568
567
569 def _pushbundle2(pushop):
568 def _pushbundle2(pushop):
570 """push data to the remote using bundle2
569 """push data to the remote using bundle2
571
570
572 The only currently supported type of data is changegroup but this will
571 The only currently supported type of data is changegroup but this will
573 evolve in the future."""
572 evolve in the future."""
574 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
573 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
575 pushback = (pushop.trmanager
574 pushback = (pushop.trmanager
576 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
575 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
577
576
578 # create reply capability
577 # create reply capability
579 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
578 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
580 allowpushback=pushback))
579 allowpushback=pushback))
581 bundler.newpart('b2x:replycaps', data=capsblob)
580 bundler.newpart('b2x:replycaps', data=capsblob)
582 replyhandlers = []
581 replyhandlers = []
583 for partgenname in b2partsgenorder:
582 for partgenname in b2partsgenorder:
584 partgen = b2partsgenmapping[partgenname]
583 partgen = b2partsgenmapping[partgenname]
585 ret = partgen(pushop, bundler)
584 ret = partgen(pushop, bundler)
586 if callable(ret):
585 if callable(ret):
587 replyhandlers.append(ret)
586 replyhandlers.append(ret)
588 # do not push if nothing to push
587 # do not push if nothing to push
589 if bundler.nbparts <= 1:
588 if bundler.nbparts <= 1:
590 return
589 return
591 stream = util.chunkbuffer(bundler.getchunks())
590 stream = util.chunkbuffer(bundler.getchunks())
592 try:
591 try:
593 reply = pushop.remote.unbundle(stream, ['force'], 'push')
592 reply = pushop.remote.unbundle(stream, ['force'], 'push')
594 except error.BundleValueError, exc:
593 except error.BundleValueError, exc:
595 raise util.Abort('missing support for %s' % exc)
594 raise util.Abort('missing support for %s' % exc)
596 try:
595 try:
597 trgetter = None
596 trgetter = None
598 if pushback:
597 if pushback:
599 trgetter = pushop.trmanager.transaction
598 trgetter = pushop.trmanager.transaction
600 op = bundle2.processbundle(pushop.repo, reply, trgetter)
599 op = bundle2.processbundle(pushop.repo, reply, trgetter)
601 except error.BundleValueError, exc:
600 except error.BundleValueError, exc:
602 raise util.Abort('missing support for %s' % exc)
601 raise util.Abort('missing support for %s' % exc)
603 for rephand in replyhandlers:
602 for rephand in replyhandlers:
604 rephand(op)
603 rephand(op)
605
604
606 def _pushchangeset(pushop):
605 def _pushchangeset(pushop):
607 """Make the actual push of changeset bundle to remote repo"""
606 """Make the actual push of changeset bundle to remote repo"""
608 if 'changesets' in pushop.stepsdone:
607 if 'changesets' in pushop.stepsdone:
609 return
608 return
610 pushop.stepsdone.add('changesets')
609 pushop.stepsdone.add('changesets')
611 if not _pushcheckoutgoing(pushop):
610 if not _pushcheckoutgoing(pushop):
612 return
611 return
613 pushop.repo.prepushoutgoinghooks(pushop.repo,
612 pushop.repo.prepushoutgoinghooks(pushop.repo,
614 pushop.remote,
613 pushop.remote,
615 pushop.outgoing)
614 pushop.outgoing)
616 outgoing = pushop.outgoing
615 outgoing = pushop.outgoing
617 unbundle = pushop.remote.capable('unbundle')
616 unbundle = pushop.remote.capable('unbundle')
618 # TODO: get bundlecaps from remote
617 # TODO: get bundlecaps from remote
619 bundlecaps = None
618 bundlecaps = None
620 # create a changegroup from local
619 # create a changegroup from local
621 if pushop.revs is None and not (outgoing.excluded
620 if pushop.revs is None and not (outgoing.excluded
622 or pushop.repo.changelog.filteredrevs):
621 or pushop.repo.changelog.filteredrevs):
623 # push everything,
622 # push everything,
624 # use the fast path, no race possible on push
623 # use the fast path, no race possible on push
625 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
624 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
626 cg = changegroup.getsubset(pushop.repo,
625 cg = changegroup.getsubset(pushop.repo,
627 outgoing,
626 outgoing,
628 bundler,
627 bundler,
629 'push',
628 'push',
630 fastpath=True)
629 fastpath=True)
631 else:
630 else:
632 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
631 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
633 bundlecaps)
632 bundlecaps)
634
633
635 # apply changegroup to remote
634 # apply changegroup to remote
636 if unbundle:
635 if unbundle:
637 # local repo finds heads on server, finds out what
636 # local repo finds heads on server, finds out what
638 # revs it must push. once revs transferred, if server
637 # revs it must push. once revs transferred, if server
639 # finds it has different heads (someone else won
638 # finds it has different heads (someone else won
640 # commit/push race), server aborts.
639 # commit/push race), server aborts.
641 if pushop.force:
640 if pushop.force:
642 remoteheads = ['force']
641 remoteheads = ['force']
643 else:
642 else:
644 remoteheads = pushop.remoteheads
643 remoteheads = pushop.remoteheads
645 # ssh: return remote's addchangegroup()
644 # ssh: return remote's addchangegroup()
646 # http: return remote's addchangegroup() or 0 for error
645 # http: return remote's addchangegroup() or 0 for error
647 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
646 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
648 pushop.repo.url())
647 pushop.repo.url())
649 else:
648 else:
650 # we return an integer indicating remote head count
649 # we return an integer indicating remote head count
651 # change
650 # change
652 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
651 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
653 pushop.repo.url())
652 pushop.repo.url())
654
653
655 def _pushsyncphase(pushop):
654 def _pushsyncphase(pushop):
656 """synchronise phase information locally and remotely"""
655 """synchronise phase information locally and remotely"""
657 cheads = pushop.commonheads
656 cheads = pushop.commonheads
658 # even when we don't push, exchanging phase data is useful
657 # even when we don't push, exchanging phase data is useful
659 remotephases = pushop.remote.listkeys('phases')
658 remotephases = pushop.remote.listkeys('phases')
660 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
659 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
661 and remotephases # server supports phases
660 and remotephases # server supports phases
662 and pushop.cgresult is None # nothing was pushed
661 and pushop.cgresult is None # nothing was pushed
663 and remotephases.get('publishing', False)):
662 and remotephases.get('publishing', False)):
664 # When:
663 # When:
665 # - this is a subrepo push
664 # - this is a subrepo push
666 # - and remote support phase
665 # - and remote support phase
667 # - and no changeset was pushed
666 # - and no changeset was pushed
668 # - and remote is publishing
667 # - and remote is publishing
669 # We may be in issue 3871 case!
668 # We may be in issue 3871 case!
670 # We drop the possible phase synchronisation done by
669 # We drop the possible phase synchronisation done by
671 # courtesy to publish changesets possibly locally draft
670 # courtesy to publish changesets possibly locally draft
672 # on the remote.
671 # on the remote.
673 remotephases = {'publishing': 'True'}
672 remotephases = {'publishing': 'True'}
674 if not remotephases: # old server or public only reply from non-publishing
673 if not remotephases: # old server or public only reply from non-publishing
675 _localphasemove(pushop, cheads)
674 _localphasemove(pushop, cheads)
676 # don't push any phase data as there is nothing to push
675 # don't push any phase data as there is nothing to push
677 else:
676 else:
678 ana = phases.analyzeremotephases(pushop.repo, cheads,
677 ana = phases.analyzeremotephases(pushop.repo, cheads,
679 remotephases)
678 remotephases)
680 pheads, droots = ana
679 pheads, droots = ana
681 ### Apply remote phase on local
680 ### Apply remote phase on local
682 if remotephases.get('publishing', False):
681 if remotephases.get('publishing', False):
683 _localphasemove(pushop, cheads)
682 _localphasemove(pushop, cheads)
684 else: # publish = False
683 else: # publish = False
685 _localphasemove(pushop, pheads)
684 _localphasemove(pushop, pheads)
686 _localphasemove(pushop, cheads, phases.draft)
685 _localphasemove(pushop, cheads, phases.draft)
687 ### Apply local phase on remote
686 ### Apply local phase on remote
688
687
689 if pushop.cgresult:
688 if pushop.cgresult:
690 if 'phases' in pushop.stepsdone:
689 if 'phases' in pushop.stepsdone:
691 # phases already pushed though bundle2
690 # phases already pushed though bundle2
692 return
691 return
693 outdated = pushop.outdatedphases
692 outdated = pushop.outdatedphases
694 else:
693 else:
695 outdated = pushop.fallbackoutdatedphases
694 outdated = pushop.fallbackoutdatedphases
696
695
697 pushop.stepsdone.add('phases')
696 pushop.stepsdone.add('phases')
698
697
699 # filter heads already turned public by the push
698 # filter heads already turned public by the push
700 outdated = [c for c in outdated if c.node() not in pheads]
699 outdated = [c for c in outdated if c.node() not in pheads]
701 # fallback to independent pushkey command
700 # fallback to independent pushkey command
702 for newremotehead in outdated:
701 for newremotehead in outdated:
703 r = pushop.remote.pushkey('phases',
702 r = pushop.remote.pushkey('phases',
704 newremotehead.hex(),
703 newremotehead.hex(),
705 str(phases.draft),
704 str(phases.draft),
706 str(phases.public))
705 str(phases.public))
707 if not r:
706 if not r:
708 pushop.ui.warn(_('updating %s to public failed!\n')
707 pushop.ui.warn(_('updating %s to public failed!\n')
709 % newremotehead)
708 % newremotehead)
710
709
711 def _localphasemove(pushop, nodes, phase=phases.public):
710 def _localphasemove(pushop, nodes, phase=phases.public):
712 """move <nodes> to <phase> in the local source repo"""
711 """move <nodes> to <phase> in the local source repo"""
713 if pushop.trmanager:
712 if pushop.trmanager:
714 phases.advanceboundary(pushop.repo,
713 phases.advanceboundary(pushop.repo,
715 pushop.trmanager.transaction(),
714 pushop.trmanager.transaction(),
716 phase,
715 phase,
717 nodes)
716 nodes)
718 else:
717 else:
719 # repo is not locked, do not change any phases!
718 # repo is not locked, do not change any phases!
720 # Informs the user that phases should have been moved when
719 # Informs the user that phases should have been moved when
721 # applicable.
720 # applicable.
722 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
721 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
723 phasestr = phases.phasenames[phase]
722 phasestr = phases.phasenames[phase]
724 if actualmoves:
723 if actualmoves:
725 pushop.ui.status(_('cannot lock source repo, skipping '
724 pushop.ui.status(_('cannot lock source repo, skipping '
726 'local %s phase update\n') % phasestr)
725 'local %s phase update\n') % phasestr)
727
726
728 def _pushobsolete(pushop):
727 def _pushobsolete(pushop):
729 """utility function to push obsolete markers to a remote"""
728 """utility function to push obsolete markers to a remote"""
730 if 'obsmarkers' in pushop.stepsdone:
729 if 'obsmarkers' in pushop.stepsdone:
731 return
730 return
732 pushop.ui.debug('try to push obsolete markers to remote\n')
731 pushop.ui.debug('try to push obsolete markers to remote\n')
733 repo = pushop.repo
732 repo = pushop.repo
734 remote = pushop.remote
733 remote = pushop.remote
735 pushop.stepsdone.add('obsmarkers')
734 pushop.stepsdone.add('obsmarkers')
736 if pushop.outobsmarkers:
735 if pushop.outobsmarkers:
737 rslts = []
736 rslts = []
738 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
737 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
739 for key in sorted(remotedata, reverse=True):
738 for key in sorted(remotedata, reverse=True):
740 # reverse sort to ensure we end with dump0
739 # reverse sort to ensure we end with dump0
741 data = remotedata[key]
740 data = remotedata[key]
742 rslts.append(remote.pushkey('obsolete', key, '', data))
741 rslts.append(remote.pushkey('obsolete', key, '', data))
743 if [r for r in rslts if not r]:
742 if [r for r in rslts if not r]:
744 msg = _('failed to push some obsolete markers!\n')
743 msg = _('failed to push some obsolete markers!\n')
745 repo.ui.warn(msg)
744 repo.ui.warn(msg)
746
745
747 def _pushbookmark(pushop):
746 def _pushbookmark(pushop):
748 """Update bookmark position on remote"""
747 """Update bookmark position on remote"""
749 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
748 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
750 return
749 return
751 pushop.stepsdone.add('bookmarks')
750 pushop.stepsdone.add('bookmarks')
752 ui = pushop.ui
751 ui = pushop.ui
753 remote = pushop.remote
752 remote = pushop.remote
754
753
755 for b, old, new in pushop.outbookmarks:
754 for b, old, new in pushop.outbookmarks:
756 action = 'update'
755 action = 'update'
757 if not old:
756 if not old:
758 action = 'export'
757 action = 'export'
759 elif not new:
758 elif not new:
760 action = 'delete'
759 action = 'delete'
761 if remote.pushkey('bookmarks', b, old, new):
760 if remote.pushkey('bookmarks', b, old, new):
762 ui.status(bookmsgmap[action][0] % b)
761 ui.status(bookmsgmap[action][0] % b)
763 else:
762 else:
764 ui.warn(bookmsgmap[action][1] % b)
763 ui.warn(bookmsgmap[action][1] % b)
765 # discovery can have set the value form invalid entry
764 # discovery can have set the value form invalid entry
766 if pushop.bkresult is not None:
765 if pushop.bkresult is not None:
767 pushop.bkresult = 1
766 pushop.bkresult = 1
768
767
769 class pulloperation(object):
768 class pulloperation(object):
770 """A object that represent a single pull operation
769 """A object that represent a single pull operation
771
770
772 It purpose is to carry pull related state and very common operation.
771 It purpose is to carry pull related state and very common operation.
773
772
774 A new should be created at the beginning of each pull and discarded
773 A new should be created at the beginning of each pull and discarded
775 afterward.
774 afterward.
776 """
775 """
777
776
778 def __init__(self, repo, remote, heads=None, force=False, bookmarks=()):
777 def __init__(self, repo, remote, heads=None, force=False, bookmarks=()):
779 # repo we pull into
778 # repo we pull into
780 self.repo = repo
779 self.repo = repo
781 # repo we pull from
780 # repo we pull from
782 self.remote = remote
781 self.remote = remote
783 # revision we try to pull (None is "all")
782 # revision we try to pull (None is "all")
784 self.heads = heads
783 self.heads = heads
785 # bookmark pulled explicitly
784 # bookmark pulled explicitly
786 self.explicitbookmarks = bookmarks
785 self.explicitbookmarks = bookmarks
787 # do we force pull?
786 # do we force pull?
788 self.force = force
787 self.force = force
789 # transaction manager
788 # transaction manager
790 self.trmanager = None
789 self.trmanager = None
791 # set of common changeset between local and remote before pull
790 # set of common changeset between local and remote before pull
792 self.common = None
791 self.common = None
793 # set of pulled head
792 # set of pulled head
794 self.rheads = None
793 self.rheads = None
795 # list of missing changeset to fetch remotely
794 # list of missing changeset to fetch remotely
796 self.fetch = None
795 self.fetch = None
797 # remote bookmarks data
796 # remote bookmarks data
798 self.remotebookmarks = None
797 self.remotebookmarks = None
799 # result of changegroup pulling (used as return code by pull)
798 # result of changegroup pulling (used as return code by pull)
800 self.cgresult = None
799 self.cgresult = None
801 # list of step already done
800 # list of step already done
802 self.stepsdone = set()
801 self.stepsdone = set()
803
802
804 @util.propertycache
803 @util.propertycache
805 def pulledsubset(self):
804 def pulledsubset(self):
806 """heads of the set of changeset target by the pull"""
805 """heads of the set of changeset target by the pull"""
807 # compute target subset
806 # compute target subset
808 if self.heads is None:
807 if self.heads is None:
809 # We pulled every thing possible
808 # We pulled every thing possible
810 # sync on everything common
809 # sync on everything common
811 c = set(self.common)
810 c = set(self.common)
812 ret = list(self.common)
811 ret = list(self.common)
813 for n in self.rheads:
812 for n in self.rheads:
814 if n not in c:
813 if n not in c:
815 ret.append(n)
814 ret.append(n)
816 return ret
815 return ret
817 else:
816 else:
818 # We pulled a specific subset
817 # We pulled a specific subset
819 # sync on this subset
818 # sync on this subset
820 return self.heads
819 return self.heads
821
820
822 def gettransaction(self):
821 def gettransaction(self):
823 # deprecated; talk to trmanager directly
822 # deprecated; talk to trmanager directly
824 return self.trmanager.transaction()
823 return self.trmanager.transaction()
825
824
826 class transactionmanager(object):
825 class transactionmanager(object):
827 """An object to manage the life cycle of a transaction
826 """An object to manage the life cycle of a transaction
828
827
829 It creates the transaction on demand and calls the appropriate hooks when
828 It creates the transaction on demand and calls the appropriate hooks when
830 closing the transaction."""
829 closing the transaction."""
831 def __init__(self, repo, source, url):
830 def __init__(self, repo, source, url):
832 self.repo = repo
831 self.repo = repo
833 self.source = source
832 self.source = source
834 self.url = url
833 self.url = url
835 self._tr = None
834 self._tr = None
836
835
837 def transaction(self):
836 def transaction(self):
838 """Return an open transaction object, constructing if necessary"""
837 """Return an open transaction object, constructing if necessary"""
839 if not self._tr:
838 if not self._tr:
840 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
839 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
841 self._tr = self.repo.transaction(trname)
840 self._tr = self.repo.transaction(trname)
842 self._tr.hookargs['source'] = self.source
841 self._tr.hookargs['source'] = self.source
843 self._tr.hookargs['url'] = self.url
842 self._tr.hookargs['url'] = self.url
844 return self._tr
843 return self._tr
845
844
846 def close(self):
845 def close(self):
847 """close transaction if created"""
846 """close transaction if created"""
848 if self._tr is not None:
847 if self._tr is not None:
849 repo = self.repo
848 repo = self.repo
850 p = lambda: self._tr.writepending() and repo.root or ""
849 p = lambda: self._tr.writepending() and repo.root or ""
851 repo.hook('b2x-pretransactionclose', throw=True, pending=p,
850 repo.hook('b2x-pretransactionclose', throw=True, pending=p,
852 **self._tr.hookargs)
851 **self._tr.hookargs)
853 hookargs = dict(self._tr.hookargs)
852 hookargs = dict(self._tr.hookargs)
854 def runhooks():
853 def runhooks():
855 repo.hook('b2x-transactionclose', **hookargs)
854 repo.hook('b2x-transactionclose', **hookargs)
856 self._tr.addpostclose('b2x-hook-transactionclose',
855 self._tr.addpostclose('b2x-hook-transactionclose',
857 lambda tr: repo._afterlock(runhooks))
856 lambda tr: repo._afterlock(runhooks))
858 self._tr.close()
857 self._tr.close()
859
858
860 def release(self):
859 def release(self):
861 """release transaction if created"""
860 """release transaction if created"""
862 if self._tr is not None:
861 if self._tr is not None:
863 self._tr.release()
862 self._tr.release()
864
863
865 def pull(repo, remote, heads=None, force=False, bookmarks=()):
864 def pull(repo, remote, heads=None, force=False, bookmarks=()):
866 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks)
865 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks)
867 if pullop.remote.local():
866 if pullop.remote.local():
868 missing = set(pullop.remote.requirements) - pullop.repo.supported
867 missing = set(pullop.remote.requirements) - pullop.repo.supported
869 if missing:
868 if missing:
870 msg = _("required features are not"
869 msg = _("required features are not"
871 " supported in the destination:"
870 " supported in the destination:"
872 " %s") % (', '.join(sorted(missing)))
871 " %s") % (', '.join(sorted(missing)))
873 raise util.Abort(msg)
872 raise util.Abort(msg)
874
873
875 pullop.remotebookmarks = remote.listkeys('bookmarks')
874 pullop.remotebookmarks = remote.listkeys('bookmarks')
876 lock = pullop.repo.lock()
875 lock = pullop.repo.lock()
877 try:
876 try:
878 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
877 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
879 _pulldiscovery(pullop)
878 _pulldiscovery(pullop)
880 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
879 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
881 and pullop.remote.capable('bundle2-exp')):
880 and pullop.remote.capable('bundle2-exp')):
882 _pullbundle2(pullop)
881 _pullbundle2(pullop)
883 _pullchangeset(pullop)
882 _pullchangeset(pullop)
884 _pullphase(pullop)
883 _pullphase(pullop)
885 _pullbookmarks(pullop)
884 _pullbookmarks(pullop)
886 _pullobsolete(pullop)
885 _pullobsolete(pullop)
887 pullop.trmanager.close()
886 pullop.trmanager.close()
888 finally:
887 finally:
889 pullop.trmanager.release()
888 pullop.trmanager.release()
890 lock.release()
889 lock.release()
891
890
892 return pullop
891 return pullop
893
892
894 # list of steps to perform discovery before pull
893 # list of steps to perform discovery before pull
895 pulldiscoveryorder = []
894 pulldiscoveryorder = []
896
895
897 # Mapping between step name and function
896 # Mapping between step name and function
898 #
897 #
899 # This exists to help extensions wrap steps if necessary
898 # This exists to help extensions wrap steps if necessary
900 pulldiscoverymapping = {}
899 pulldiscoverymapping = {}
901
900
902 def pulldiscovery(stepname):
901 def pulldiscovery(stepname):
903 """decorator for function performing discovery before pull
902 """decorator for function performing discovery before pull
904
903
905 The function is added to the step -> function mapping and appended to the
904 The function is added to the step -> function mapping and appended to the
906 list of steps. Beware that decorated function will be added in order (this
905 list of steps. Beware that decorated function will be added in order (this
907 may matter).
906 may matter).
908
907
909 You can only use this decorator for a new step, if you want to wrap a step
908 You can only use this decorator for a new step, if you want to wrap a step
910 from an extension, change the pulldiscovery dictionary directly."""
909 from an extension, change the pulldiscovery dictionary directly."""
911 def dec(func):
910 def dec(func):
912 assert stepname not in pulldiscoverymapping
911 assert stepname not in pulldiscoverymapping
913 pulldiscoverymapping[stepname] = func
912 pulldiscoverymapping[stepname] = func
914 pulldiscoveryorder.append(stepname)
913 pulldiscoveryorder.append(stepname)
915 return func
914 return func
916 return dec
915 return dec
917
916
918 def _pulldiscovery(pullop):
917 def _pulldiscovery(pullop):
919 """Run all discovery steps"""
918 """Run all discovery steps"""
920 for stepname in pulldiscoveryorder:
919 for stepname in pulldiscoveryorder:
921 step = pulldiscoverymapping[stepname]
920 step = pulldiscoverymapping[stepname]
922 step(pullop)
921 step(pullop)
923
922
924 @pulldiscovery('changegroup')
923 @pulldiscovery('changegroup')
925 def _pulldiscoverychangegroup(pullop):
924 def _pulldiscoverychangegroup(pullop):
926 """discovery phase for the pull
925 """discovery phase for the pull
927
926
928 Current handle changeset discovery only, will change handle all discovery
927 Current handle changeset discovery only, will change handle all discovery
929 at some point."""
928 at some point."""
930 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
929 tmp = discovery.findcommonincoming(pullop.repo,
931 pullop.remote,
930 pullop.remote,
932 heads=pullop.heads,
931 heads=pullop.heads,
933 force=pullop.force)
932 force=pullop.force)
934 pullop.common, pullop.fetch, pullop.rheads = tmp
933 common, fetch, rheads = tmp
934 nm = pullop.repo.unfiltered().changelog.nodemap
935 if fetch and rheads:
936 # If a remote heads in filtered locally, lets drop it from the unknown
937 # remote heads and put in back in common.
938 #
939 # This is a hackish solution to catch most of "common but locally
940 # hidden situation". We do not performs discovery on unfiltered
941 # repository because it end up doing a pathological amount of round
942 # trip for w huge amount of changeset we do not care about.
943 #
944 # If a set of such "common but filtered" changeset exist on the server
945 # but are not including a remote heads, we'll not be able to detect it,
946 scommon = set(common)
947 filteredrheads = []
948 for n in rheads:
949 if n in nm and n not in scommon:
950 common.append(n)
951 else:
952 filteredrheads.append(n)
953 if not filteredrheads:
954 fetch = []
955 rheads = filteredrheads
956 pullop.common = common
957 pullop.fetch = fetch
958 pullop.rheads = rheads
935
959
936 def _pullbundle2(pullop):
960 def _pullbundle2(pullop):
937 """pull data using bundle2
961 """pull data using bundle2
938
962
939 For now, the only supported data are changegroup."""
963 For now, the only supported data are changegroup."""
940 remotecaps = bundle2.bundle2caps(pullop.remote)
964 remotecaps = bundle2.bundle2caps(pullop.remote)
941 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
965 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
942 # pulling changegroup
966 # pulling changegroup
943 pullop.stepsdone.add('changegroup')
967 pullop.stepsdone.add('changegroup')
944
968
945 kwargs['common'] = pullop.common
969 kwargs['common'] = pullop.common
946 kwargs['heads'] = pullop.heads or pullop.rheads
970 kwargs['heads'] = pullop.heads or pullop.rheads
947 kwargs['cg'] = pullop.fetch
971 kwargs['cg'] = pullop.fetch
948 if 'b2x:listkeys' in remotecaps:
972 if 'b2x:listkeys' in remotecaps:
949 kwargs['listkeys'] = ['phase', 'bookmarks']
973 kwargs['listkeys'] = ['phase', 'bookmarks']
950 if not pullop.fetch:
974 if not pullop.fetch:
951 pullop.repo.ui.status(_("no changes found\n"))
975 pullop.repo.ui.status(_("no changes found\n"))
952 pullop.cgresult = 0
976 pullop.cgresult = 0
953 else:
977 else:
954 if pullop.heads is None and list(pullop.common) == [nullid]:
978 if pullop.heads is None and list(pullop.common) == [nullid]:
955 pullop.repo.ui.status(_("requesting all changes\n"))
979 pullop.repo.ui.status(_("requesting all changes\n"))
956 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
980 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
957 remoteversions = bundle2.obsmarkersversion(remotecaps)
981 remoteversions = bundle2.obsmarkersversion(remotecaps)
958 if obsolete.commonversion(remoteversions) is not None:
982 if obsolete.commonversion(remoteversions) is not None:
959 kwargs['obsmarkers'] = True
983 kwargs['obsmarkers'] = True
960 pullop.stepsdone.add('obsmarkers')
984 pullop.stepsdone.add('obsmarkers')
961 _pullbundle2extraprepare(pullop, kwargs)
985 _pullbundle2extraprepare(pullop, kwargs)
962 if kwargs.keys() == ['format']:
986 if kwargs.keys() == ['format']:
963 return # nothing to pull
987 return # nothing to pull
964 bundle = pullop.remote.getbundle('pull', **kwargs)
988 bundle = pullop.remote.getbundle('pull', **kwargs)
965 try:
989 try:
966 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
990 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
967 except error.BundleValueError, exc:
991 except error.BundleValueError, exc:
968 raise util.Abort('missing support for %s' % exc)
992 raise util.Abort('missing support for %s' % exc)
969
993
970 if pullop.fetch:
994 if pullop.fetch:
971 changedheads = 0
995 changedheads = 0
972 pullop.cgresult = 1
996 pullop.cgresult = 1
973 for cg in op.records['changegroup']:
997 for cg in op.records['changegroup']:
974 ret = cg['return']
998 ret = cg['return']
975 # If any changegroup result is 0, return 0
999 # If any changegroup result is 0, return 0
976 if ret == 0:
1000 if ret == 0:
977 pullop.cgresult = 0
1001 pullop.cgresult = 0
978 break
1002 break
979 if ret < -1:
1003 if ret < -1:
980 changedheads += ret + 1
1004 changedheads += ret + 1
981 elif ret > 1:
1005 elif ret > 1:
982 changedheads += ret - 1
1006 changedheads += ret - 1
983 if changedheads > 0:
1007 if changedheads > 0:
984 pullop.cgresult = 1 + changedheads
1008 pullop.cgresult = 1 + changedheads
985 elif changedheads < 0:
1009 elif changedheads < 0:
986 pullop.cgresult = -1 + changedheads
1010 pullop.cgresult = -1 + changedheads
987
1011
988 # processing phases change
1012 # processing phases change
989 for namespace, value in op.records['listkeys']:
1013 for namespace, value in op.records['listkeys']:
990 if namespace == 'phases':
1014 if namespace == 'phases':
991 _pullapplyphases(pullop, value)
1015 _pullapplyphases(pullop, value)
992
1016
993 # processing bookmark update
1017 # processing bookmark update
994 for namespace, value in op.records['listkeys']:
1018 for namespace, value in op.records['listkeys']:
995 if namespace == 'bookmarks':
1019 if namespace == 'bookmarks':
996 pullop.remotebookmarks = value
1020 pullop.remotebookmarks = value
997 _pullbookmarks(pullop)
1021 _pullbookmarks(pullop)
998
1022
999 def _pullbundle2extraprepare(pullop, kwargs):
1023 def _pullbundle2extraprepare(pullop, kwargs):
1000 """hook function so that extensions can extend the getbundle call"""
1024 """hook function so that extensions can extend the getbundle call"""
1001 pass
1025 pass
1002
1026
1003 def _pullchangeset(pullop):
1027 def _pullchangeset(pullop):
1004 """pull changeset from unbundle into the local repo"""
1028 """pull changeset from unbundle into the local repo"""
1005 # We delay the open of the transaction as late as possible so we
1029 # We delay the open of the transaction as late as possible so we
1006 # don't open transaction for nothing or you break future useful
1030 # don't open transaction for nothing or you break future useful
1007 # rollback call
1031 # rollback call
1008 if 'changegroup' in pullop.stepsdone:
1032 if 'changegroup' in pullop.stepsdone:
1009 return
1033 return
1010 pullop.stepsdone.add('changegroup')
1034 pullop.stepsdone.add('changegroup')
1011 if not pullop.fetch:
1035 if not pullop.fetch:
1012 pullop.repo.ui.status(_("no changes found\n"))
1036 pullop.repo.ui.status(_("no changes found\n"))
1013 pullop.cgresult = 0
1037 pullop.cgresult = 0
1014 return
1038 return
1015 pullop.gettransaction()
1039 pullop.gettransaction()
1016 if pullop.heads is None and list(pullop.common) == [nullid]:
1040 if pullop.heads is None and list(pullop.common) == [nullid]:
1017 pullop.repo.ui.status(_("requesting all changes\n"))
1041 pullop.repo.ui.status(_("requesting all changes\n"))
1018 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1042 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1019 # issue1320, avoid a race if remote changed after discovery
1043 # issue1320, avoid a race if remote changed after discovery
1020 pullop.heads = pullop.rheads
1044 pullop.heads = pullop.rheads
1021
1045
1022 if pullop.remote.capable('getbundle'):
1046 if pullop.remote.capable('getbundle'):
1023 # TODO: get bundlecaps from remote
1047 # TODO: get bundlecaps from remote
1024 cg = pullop.remote.getbundle('pull', common=pullop.common,
1048 cg = pullop.remote.getbundle('pull', common=pullop.common,
1025 heads=pullop.heads or pullop.rheads)
1049 heads=pullop.heads or pullop.rheads)
1026 elif pullop.heads is None:
1050 elif pullop.heads is None:
1027 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1051 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1028 elif not pullop.remote.capable('changegroupsubset'):
1052 elif not pullop.remote.capable('changegroupsubset'):
1029 raise util.Abort(_("partial pull cannot be done because "
1053 raise util.Abort(_("partial pull cannot be done because "
1030 "other repository doesn't support "
1054 "other repository doesn't support "
1031 "changegroupsubset."))
1055 "changegroupsubset."))
1032 else:
1056 else:
1033 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1057 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1034 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1058 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1035 pullop.remote.url())
1059 pullop.remote.url())
1036
1060
1037 def _pullphase(pullop):
1061 def _pullphase(pullop):
1038 # Get remote phases data from remote
1062 # Get remote phases data from remote
1039 if 'phases' in pullop.stepsdone:
1063 if 'phases' in pullop.stepsdone:
1040 return
1064 return
1041 remotephases = pullop.remote.listkeys('phases')
1065 remotephases = pullop.remote.listkeys('phases')
1042 _pullapplyphases(pullop, remotephases)
1066 _pullapplyphases(pullop, remotephases)
1043
1067
1044 def _pullapplyphases(pullop, remotephases):
1068 def _pullapplyphases(pullop, remotephases):
1045 """apply phase movement from observed remote state"""
1069 """apply phase movement from observed remote state"""
1046 if 'phases' in pullop.stepsdone:
1070 if 'phases' in pullop.stepsdone:
1047 return
1071 return
1048 pullop.stepsdone.add('phases')
1072 pullop.stepsdone.add('phases')
1049 publishing = bool(remotephases.get('publishing', False))
1073 publishing = bool(remotephases.get('publishing', False))
1050 if remotephases and not publishing:
1074 if remotephases and not publishing:
1051 # remote is new and unpublishing
1075 # remote is new and unpublishing
1052 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1076 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1053 pullop.pulledsubset,
1077 pullop.pulledsubset,
1054 remotephases)
1078 remotephases)
1055 dheads = pullop.pulledsubset
1079 dheads = pullop.pulledsubset
1056 else:
1080 else:
1057 # Remote is old or publishing all common changesets
1081 # Remote is old or publishing all common changesets
1058 # should be seen as public
1082 # should be seen as public
1059 pheads = pullop.pulledsubset
1083 pheads = pullop.pulledsubset
1060 dheads = []
1084 dheads = []
1061 unfi = pullop.repo.unfiltered()
1085 unfi = pullop.repo.unfiltered()
1062 phase = unfi._phasecache.phase
1086 phase = unfi._phasecache.phase
1063 rev = unfi.changelog.nodemap.get
1087 rev = unfi.changelog.nodemap.get
1064 public = phases.public
1088 public = phases.public
1065 draft = phases.draft
1089 draft = phases.draft
1066
1090
1067 # exclude changesets already public locally and update the others
1091 # exclude changesets already public locally and update the others
1068 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1092 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1069 if pheads:
1093 if pheads:
1070 tr = pullop.gettransaction()
1094 tr = pullop.gettransaction()
1071 phases.advanceboundary(pullop.repo, tr, public, pheads)
1095 phases.advanceboundary(pullop.repo, tr, public, pheads)
1072
1096
1073 # exclude changesets already draft locally and update the others
1097 # exclude changesets already draft locally and update the others
1074 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1098 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1075 if dheads:
1099 if dheads:
1076 tr = pullop.gettransaction()
1100 tr = pullop.gettransaction()
1077 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1101 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1078
1102
1079 def _pullbookmarks(pullop):
1103 def _pullbookmarks(pullop):
1080 """process the remote bookmark information to update the local one"""
1104 """process the remote bookmark information to update the local one"""
1081 if 'bookmarks' in pullop.stepsdone:
1105 if 'bookmarks' in pullop.stepsdone:
1082 return
1106 return
1083 pullop.stepsdone.add('bookmarks')
1107 pullop.stepsdone.add('bookmarks')
1084 repo = pullop.repo
1108 repo = pullop.repo
1085 remotebookmarks = pullop.remotebookmarks
1109 remotebookmarks = pullop.remotebookmarks
1086 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1110 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1087 pullop.remote.url(),
1111 pullop.remote.url(),
1088 pullop.gettransaction,
1112 pullop.gettransaction,
1089 explicit=pullop.explicitbookmarks)
1113 explicit=pullop.explicitbookmarks)
1090
1114
1091 def _pullobsolete(pullop):
1115 def _pullobsolete(pullop):
1092 """utility function to pull obsolete markers from a remote
1116 """utility function to pull obsolete markers from a remote
1093
1117
1094 The `gettransaction` is function that return the pull transaction, creating
1118 The `gettransaction` is function that return the pull transaction, creating
1095 one if necessary. We return the transaction to inform the calling code that
1119 one if necessary. We return the transaction to inform the calling code that
1096 a new transaction have been created (when applicable).
1120 a new transaction have been created (when applicable).
1097
1121
1098 Exists mostly to allow overriding for experimentation purpose"""
1122 Exists mostly to allow overriding for experimentation purpose"""
1099 if 'obsmarkers' in pullop.stepsdone:
1123 if 'obsmarkers' in pullop.stepsdone:
1100 return
1124 return
1101 pullop.stepsdone.add('obsmarkers')
1125 pullop.stepsdone.add('obsmarkers')
1102 tr = None
1126 tr = None
1103 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1127 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1104 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1128 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1105 remoteobs = pullop.remote.listkeys('obsolete')
1129 remoteobs = pullop.remote.listkeys('obsolete')
1106 if 'dump0' in remoteobs:
1130 if 'dump0' in remoteobs:
1107 tr = pullop.gettransaction()
1131 tr = pullop.gettransaction()
1108 for key in sorted(remoteobs, reverse=True):
1132 for key in sorted(remoteobs, reverse=True):
1109 if key.startswith('dump'):
1133 if key.startswith('dump'):
1110 data = base85.b85decode(remoteobs[key])
1134 data = base85.b85decode(remoteobs[key])
1111 pullop.repo.obsstore.mergemarkers(tr, data)
1135 pullop.repo.obsstore.mergemarkers(tr, data)
1112 pullop.repo.invalidatevolatilesets()
1136 pullop.repo.invalidatevolatilesets()
1113 return tr
1137 return tr
1114
1138
1115 def caps20to10(repo):
1139 def caps20to10(repo):
1116 """return a set with appropriate options to use bundle20 during getbundle"""
1140 """return a set with appropriate options to use bundle20 during getbundle"""
1117 caps = set(['HG2Y'])
1141 caps = set(['HG2Y'])
1118 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1142 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1119 caps.add('bundle2=' + urllib.quote(capsblob))
1143 caps.add('bundle2=' + urllib.quote(capsblob))
1120 return caps
1144 return caps
1121
1145
1122 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1146 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1123 getbundle2partsorder = []
1147 getbundle2partsorder = []
1124
1148
1125 # Mapping between step name and function
1149 # Mapping between step name and function
1126 #
1150 #
1127 # This exists to help extensions wrap steps if necessary
1151 # This exists to help extensions wrap steps if necessary
1128 getbundle2partsmapping = {}
1152 getbundle2partsmapping = {}
1129
1153
1130 def getbundle2partsgenerator(stepname):
1154 def getbundle2partsgenerator(stepname):
1131 """decorator for function generating bundle2 part for getbundle
1155 """decorator for function generating bundle2 part for getbundle
1132
1156
1133 The function is added to the step -> function mapping and appended to the
1157 The function is added to the step -> function mapping and appended to the
1134 list of steps. Beware that decorated functions will be added in order
1158 list of steps. Beware that decorated functions will be added in order
1135 (this may matter).
1159 (this may matter).
1136
1160
1137 You can only use this decorator for new steps, if you want to wrap a step
1161 You can only use this decorator for new steps, if you want to wrap a step
1138 from an extension, attack the getbundle2partsmapping dictionary directly."""
1162 from an extension, attack the getbundle2partsmapping dictionary directly."""
1139 def dec(func):
1163 def dec(func):
1140 assert stepname not in getbundle2partsmapping
1164 assert stepname not in getbundle2partsmapping
1141 getbundle2partsmapping[stepname] = func
1165 getbundle2partsmapping[stepname] = func
1142 getbundle2partsorder.append(stepname)
1166 getbundle2partsorder.append(stepname)
1143 return func
1167 return func
1144 return dec
1168 return dec
1145
1169
1146 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1170 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1147 **kwargs):
1171 **kwargs):
1148 """return a full bundle (with potentially multiple kind of parts)
1172 """return a full bundle (with potentially multiple kind of parts)
1149
1173
1150 Could be a bundle HG10 or a bundle HG2Y depending on bundlecaps
1174 Could be a bundle HG10 or a bundle HG2Y depending on bundlecaps
1151 passed. For now, the bundle can contain only changegroup, but this will
1175 passed. For now, the bundle can contain only changegroup, but this will
1152 changes when more part type will be available for bundle2.
1176 changes when more part type will be available for bundle2.
1153
1177
1154 This is different from changegroup.getchangegroup that only returns an HG10
1178 This is different from changegroup.getchangegroup that only returns an HG10
1155 changegroup bundle. They may eventually get reunited in the future when we
1179 changegroup bundle. They may eventually get reunited in the future when we
1156 have a clearer idea of the API we what to query different data.
1180 have a clearer idea of the API we what to query different data.
1157
1181
1158 The implementation is at a very early stage and will get massive rework
1182 The implementation is at a very early stage and will get massive rework
1159 when the API of bundle is refined.
1183 when the API of bundle is refined.
1160 """
1184 """
1161 # bundle10 case
1185 # bundle10 case
1162 if bundlecaps is None or 'HG2Y' not in bundlecaps:
1186 if bundlecaps is None or 'HG2Y' not in bundlecaps:
1163 if bundlecaps and not kwargs.get('cg', True):
1187 if bundlecaps and not kwargs.get('cg', True):
1164 raise ValueError(_('request for bundle10 must include changegroup'))
1188 raise ValueError(_('request for bundle10 must include changegroup'))
1165
1189
1166 if kwargs:
1190 if kwargs:
1167 raise ValueError(_('unsupported getbundle arguments: %s')
1191 raise ValueError(_('unsupported getbundle arguments: %s')
1168 % ', '.join(sorted(kwargs.keys())))
1192 % ', '.join(sorted(kwargs.keys())))
1169 return changegroup.getchangegroup(repo, source, heads=heads,
1193 return changegroup.getchangegroup(repo, source, heads=heads,
1170 common=common, bundlecaps=bundlecaps)
1194 common=common, bundlecaps=bundlecaps)
1171
1195
1172 # bundle20 case
1196 # bundle20 case
1173 b2caps = {}
1197 b2caps = {}
1174 for bcaps in bundlecaps:
1198 for bcaps in bundlecaps:
1175 if bcaps.startswith('bundle2='):
1199 if bcaps.startswith('bundle2='):
1176 blob = urllib.unquote(bcaps[len('bundle2='):])
1200 blob = urllib.unquote(bcaps[len('bundle2='):])
1177 b2caps.update(bundle2.decodecaps(blob))
1201 b2caps.update(bundle2.decodecaps(blob))
1178 bundler = bundle2.bundle20(repo.ui, b2caps)
1202 bundler = bundle2.bundle20(repo.ui, b2caps)
1179
1203
1180 kwargs['heads'] = heads
1204 kwargs['heads'] = heads
1181 kwargs['common'] = common
1205 kwargs['common'] = common
1182
1206
1183 for name in getbundle2partsorder:
1207 for name in getbundle2partsorder:
1184 func = getbundle2partsmapping[name]
1208 func = getbundle2partsmapping[name]
1185 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1209 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1186 **kwargs)
1210 **kwargs)
1187
1211
1188 return util.chunkbuffer(bundler.getchunks())
1212 return util.chunkbuffer(bundler.getchunks())
1189
1213
1190 @getbundle2partsgenerator('changegroup')
1214 @getbundle2partsgenerator('changegroup')
1191 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1215 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1192 b2caps=None, heads=None, common=None, **kwargs):
1216 b2caps=None, heads=None, common=None, **kwargs):
1193 """add a changegroup part to the requested bundle"""
1217 """add a changegroup part to the requested bundle"""
1194 cg = None
1218 cg = None
1195 if kwargs.get('cg', True):
1219 if kwargs.get('cg', True):
1196 # build changegroup bundle here.
1220 # build changegroup bundle here.
1197 version = None
1221 version = None
1198 cgversions = b2caps.get('b2x:changegroup')
1222 cgversions = b2caps.get('b2x:changegroup')
1199 if not cgversions: # 3.1 and 3.2 ship with an empty value
1223 if not cgversions: # 3.1 and 3.2 ship with an empty value
1200 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1224 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1201 common=common,
1225 common=common,
1202 bundlecaps=bundlecaps)
1226 bundlecaps=bundlecaps)
1203 else:
1227 else:
1204 cgversions = [v for v in cgversions if v in changegroup.packermap]
1228 cgversions = [v for v in cgversions if v in changegroup.packermap]
1205 if not cgversions:
1229 if not cgversions:
1206 raise ValueError(_('no common changegroup version'))
1230 raise ValueError(_('no common changegroup version'))
1207 version = max(cgversions)
1231 version = max(cgversions)
1208 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1232 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1209 common=common,
1233 common=common,
1210 bundlecaps=bundlecaps,
1234 bundlecaps=bundlecaps,
1211 version=version)
1235 version=version)
1212
1236
1213 if cg:
1237 if cg:
1214 part = bundler.newpart('b2x:changegroup', data=cg)
1238 part = bundler.newpart('b2x:changegroup', data=cg)
1215 if version is not None:
1239 if version is not None:
1216 part.addparam('version', version)
1240 part.addparam('version', version)
1217
1241
1218 @getbundle2partsgenerator('listkeys')
1242 @getbundle2partsgenerator('listkeys')
1219 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1243 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1220 b2caps=None, **kwargs):
1244 b2caps=None, **kwargs):
1221 """add parts containing listkeys namespaces to the requested bundle"""
1245 """add parts containing listkeys namespaces to the requested bundle"""
1222 listkeys = kwargs.get('listkeys', ())
1246 listkeys = kwargs.get('listkeys', ())
1223 for namespace in listkeys:
1247 for namespace in listkeys:
1224 part = bundler.newpart('b2x:listkeys')
1248 part = bundler.newpart('b2x:listkeys')
1225 part.addparam('namespace', namespace)
1249 part.addparam('namespace', namespace)
1226 keys = repo.listkeys(namespace).items()
1250 keys = repo.listkeys(namespace).items()
1227 part.data = pushkey.encodekeys(keys)
1251 part.data = pushkey.encodekeys(keys)
1228
1252
1229 @getbundle2partsgenerator('obsmarkers')
1253 @getbundle2partsgenerator('obsmarkers')
1230 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1254 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1231 b2caps=None, heads=None, **kwargs):
1255 b2caps=None, heads=None, **kwargs):
1232 """add an obsolescence markers part to the requested bundle"""
1256 """add an obsolescence markers part to the requested bundle"""
1233 if kwargs.get('obsmarkers', False):
1257 if kwargs.get('obsmarkers', False):
1234 if heads is None:
1258 if heads is None:
1235 heads = repo.heads()
1259 heads = repo.heads()
1236 subset = [c.node() for c in repo.set('::%ln', heads)]
1260 subset = [c.node() for c in repo.set('::%ln', heads)]
1237 markers = repo.obsstore.relevantmarkers(subset)
1261 markers = repo.obsstore.relevantmarkers(subset)
1238 buildobsmarkerspart(bundler, markers)
1262 buildobsmarkerspart(bundler, markers)
1239
1263
1240 def check_heads(repo, their_heads, context):
1264 def check_heads(repo, their_heads, context):
1241 """check if the heads of a repo have been modified
1265 """check if the heads of a repo have been modified
1242
1266
1243 Used by peer for unbundling.
1267 Used by peer for unbundling.
1244 """
1268 """
1245 heads = repo.heads()
1269 heads = repo.heads()
1246 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1270 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1247 if not (their_heads == ['force'] or their_heads == heads or
1271 if not (their_heads == ['force'] or their_heads == heads or
1248 their_heads == ['hashed', heads_hash]):
1272 their_heads == ['hashed', heads_hash]):
1249 # someone else committed/pushed/unbundled while we
1273 # someone else committed/pushed/unbundled while we
1250 # were transferring data
1274 # were transferring data
1251 raise error.PushRaced('repository changed while %s - '
1275 raise error.PushRaced('repository changed while %s - '
1252 'please try again' % context)
1276 'please try again' % context)
1253
1277
1254 def unbundle(repo, cg, heads, source, url):
1278 def unbundle(repo, cg, heads, source, url):
1255 """Apply a bundle to a repo.
1279 """Apply a bundle to a repo.
1256
1280
1257 this function makes sure the repo is locked during the application and have
1281 this function makes sure the repo is locked during the application and have
1258 mechanism to check that no push race occurred between the creation of the
1282 mechanism to check that no push race occurred between the creation of the
1259 bundle and its application.
1283 bundle and its application.
1260
1284
1261 If the push was raced as PushRaced exception is raised."""
1285 If the push was raced as PushRaced exception is raised."""
1262 r = 0
1286 r = 0
1263 # need a transaction when processing a bundle2 stream
1287 # need a transaction when processing a bundle2 stream
1264 tr = None
1288 tr = None
1265 lock = repo.lock()
1289 lock = repo.lock()
1266 try:
1290 try:
1267 check_heads(repo, heads, 'uploading changes')
1291 check_heads(repo, heads, 'uploading changes')
1268 # push can proceed
1292 # push can proceed
1269 if util.safehasattr(cg, 'params'):
1293 if util.safehasattr(cg, 'params'):
1270 try:
1294 try:
1271 tr = repo.transaction('unbundle')
1295 tr = repo.transaction('unbundle')
1272 tr.hookargs['source'] = source
1296 tr.hookargs['source'] = source
1273 tr.hookargs['url'] = url
1297 tr.hookargs['url'] = url
1274 tr.hookargs['bundle2-exp'] = '1'
1298 tr.hookargs['bundle2-exp'] = '1'
1275 r = bundle2.processbundle(repo, cg, lambda: tr).reply
1299 r = bundle2.processbundle(repo, cg, lambda: tr).reply
1276 p = lambda: tr.writepending() and repo.root or ""
1300 p = lambda: tr.writepending() and repo.root or ""
1277 repo.hook('b2x-pretransactionclose', throw=True, pending=p,
1301 repo.hook('b2x-pretransactionclose', throw=True, pending=p,
1278 **tr.hookargs)
1302 **tr.hookargs)
1279 hookargs = dict(tr.hookargs)
1303 hookargs = dict(tr.hookargs)
1280 def runhooks():
1304 def runhooks():
1281 repo.hook('b2x-transactionclose', **hookargs)
1305 repo.hook('b2x-transactionclose', **hookargs)
1282 tr.addpostclose('b2x-hook-transactionclose',
1306 tr.addpostclose('b2x-hook-transactionclose',
1283 lambda tr: repo._afterlock(runhooks))
1307 lambda tr: repo._afterlock(runhooks))
1284 tr.close()
1308 tr.close()
1285 except Exception, exc:
1309 except Exception, exc:
1286 exc.duringunbundle2 = True
1310 exc.duringunbundle2 = True
1287 raise
1311 raise
1288 else:
1312 else:
1289 r = changegroup.addchangegroup(repo, cg, source, url)
1313 r = changegroup.addchangegroup(repo, cg, source, url)
1290 finally:
1314 finally:
1291 if tr is not None:
1315 if tr is not None:
1292 tr.release()
1316 tr.release()
1293 lock.release()
1317 lock.release()
1294 return r
1318 return r
@@ -1,869 +1,873 b''
1 # wireproto.py - generic wire protocol support functions
1 # wireproto.py - generic wire protocol support functions
2 #
2 #
3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import urllib, tempfile, os, sys
8 import urllib, tempfile, os, sys
9 from i18n import _
9 from i18n import _
10 from node import bin, hex
10 from node import bin, hex
11 import changegroup as changegroupmod, bundle2, pushkey as pushkeymod
11 import changegroup as changegroupmod, bundle2, pushkey as pushkeymod
12 import peer, error, encoding, util, store, exchange
12 import peer, error, encoding, util, store, exchange
13
13
14
14
15 class abstractserverproto(object):
15 class abstractserverproto(object):
16 """abstract class that summarizes the protocol API
16 """abstract class that summarizes the protocol API
17
17
18 Used as reference and documentation.
18 Used as reference and documentation.
19 """
19 """
20
20
21 def getargs(self, args):
21 def getargs(self, args):
22 """return the value for arguments in <args>
22 """return the value for arguments in <args>
23
23
24 returns a list of values (same order as <args>)"""
24 returns a list of values (same order as <args>)"""
25 raise NotImplementedError()
25 raise NotImplementedError()
26
26
27 def getfile(self, fp):
27 def getfile(self, fp):
28 """write the whole content of a file into a file like object
28 """write the whole content of a file into a file like object
29
29
30 The file is in the form::
30 The file is in the form::
31
31
32 (<chunk-size>\n<chunk>)+0\n
32 (<chunk-size>\n<chunk>)+0\n
33
33
34 chunk size is the ascii version of the int.
34 chunk size is the ascii version of the int.
35 """
35 """
36 raise NotImplementedError()
36 raise NotImplementedError()
37
37
38 def redirect(self):
38 def redirect(self):
39 """may setup interception for stdout and stderr
39 """may setup interception for stdout and stderr
40
40
41 See also the `restore` method."""
41 See also the `restore` method."""
42 raise NotImplementedError()
42 raise NotImplementedError()
43
43
44 # If the `redirect` function does install interception, the `restore`
44 # If the `redirect` function does install interception, the `restore`
45 # function MUST be defined. If interception is not used, this function
45 # function MUST be defined. If interception is not used, this function
46 # MUST NOT be defined.
46 # MUST NOT be defined.
47 #
47 #
48 # left commented here on purpose
48 # left commented here on purpose
49 #
49 #
50 #def restore(self):
50 #def restore(self):
51 # """reinstall previous stdout and stderr and return intercepted stdout
51 # """reinstall previous stdout and stderr and return intercepted stdout
52 # """
52 # """
53 # raise NotImplementedError()
53 # raise NotImplementedError()
54
54
55 def groupchunks(self, cg):
55 def groupchunks(self, cg):
56 """return 4096 chunks from a changegroup object
56 """return 4096 chunks from a changegroup object
57
57
58 Some protocols may have compressed the contents."""
58 Some protocols may have compressed the contents."""
59 raise NotImplementedError()
59 raise NotImplementedError()
60
60
61 # abstract batching support
61 # abstract batching support
62
62
63 class future(object):
63 class future(object):
64 '''placeholder for a value to be set later'''
64 '''placeholder for a value to be set later'''
65 def set(self, value):
65 def set(self, value):
66 if util.safehasattr(self, 'value'):
66 if util.safehasattr(self, 'value'):
67 raise error.RepoError("future is already set")
67 raise error.RepoError("future is already set")
68 self.value = value
68 self.value = value
69
69
70 class batcher(object):
70 class batcher(object):
71 '''base class for batches of commands submittable in a single request
71 '''base class for batches of commands submittable in a single request
72
72
73 All methods invoked on instances of this class are simply queued and
73 All methods invoked on instances of this class are simply queued and
74 return a a future for the result. Once you call submit(), all the queued
74 return a a future for the result. Once you call submit(), all the queued
75 calls are performed and the results set in their respective futures.
75 calls are performed and the results set in their respective futures.
76 '''
76 '''
77 def __init__(self):
77 def __init__(self):
78 self.calls = []
78 self.calls = []
79 def __getattr__(self, name):
79 def __getattr__(self, name):
80 def call(*args, **opts):
80 def call(*args, **opts):
81 resref = future()
81 resref = future()
82 self.calls.append((name, args, opts, resref,))
82 self.calls.append((name, args, opts, resref,))
83 return resref
83 return resref
84 return call
84 return call
85 def submit(self):
85 def submit(self):
86 pass
86 pass
87
87
88 class localbatch(batcher):
88 class localbatch(batcher):
89 '''performs the queued calls directly'''
89 '''performs the queued calls directly'''
90 def __init__(self, local):
90 def __init__(self, local):
91 batcher.__init__(self)
91 batcher.__init__(self)
92 self.local = local
92 self.local = local
93 def submit(self):
93 def submit(self):
94 for name, args, opts, resref in self.calls:
94 for name, args, opts, resref in self.calls:
95 resref.set(getattr(self.local, name)(*args, **opts))
95 resref.set(getattr(self.local, name)(*args, **opts))
96
96
97 class remotebatch(batcher):
97 class remotebatch(batcher):
98 '''batches the queued calls; uses as few roundtrips as possible'''
98 '''batches the queued calls; uses as few roundtrips as possible'''
99 def __init__(self, remote):
99 def __init__(self, remote):
100 '''remote must support _submitbatch(encbatch) and
100 '''remote must support _submitbatch(encbatch) and
101 _submitone(op, encargs)'''
101 _submitone(op, encargs)'''
102 batcher.__init__(self)
102 batcher.__init__(self)
103 self.remote = remote
103 self.remote = remote
104 def submit(self):
104 def submit(self):
105 req, rsp = [], []
105 req, rsp = [], []
106 for name, args, opts, resref in self.calls:
106 for name, args, opts, resref in self.calls:
107 mtd = getattr(self.remote, name)
107 mtd = getattr(self.remote, name)
108 batchablefn = getattr(mtd, 'batchable', None)
108 batchablefn = getattr(mtd, 'batchable', None)
109 if batchablefn is not None:
109 if batchablefn is not None:
110 batchable = batchablefn(mtd.im_self, *args, **opts)
110 batchable = batchablefn(mtd.im_self, *args, **opts)
111 encargsorres, encresref = batchable.next()
111 encargsorres, encresref = batchable.next()
112 if encresref:
112 if encresref:
113 req.append((name, encargsorres,))
113 req.append((name, encargsorres,))
114 rsp.append((batchable, encresref, resref,))
114 rsp.append((batchable, encresref, resref,))
115 else:
115 else:
116 resref.set(encargsorres)
116 resref.set(encargsorres)
117 else:
117 else:
118 if req:
118 if req:
119 self._submitreq(req, rsp)
119 self._submitreq(req, rsp)
120 req, rsp = [], []
120 req, rsp = [], []
121 resref.set(mtd(*args, **opts))
121 resref.set(mtd(*args, **opts))
122 if req:
122 if req:
123 self._submitreq(req, rsp)
123 self._submitreq(req, rsp)
124 def _submitreq(self, req, rsp):
124 def _submitreq(self, req, rsp):
125 encresults = self.remote._submitbatch(req)
125 encresults = self.remote._submitbatch(req)
126 for encres, r in zip(encresults, rsp):
126 for encres, r in zip(encresults, rsp):
127 batchable, encresref, resref = r
127 batchable, encresref, resref = r
128 encresref.set(encres)
128 encresref.set(encres)
129 resref.set(batchable.next())
129 resref.set(batchable.next())
130
130
131 def batchable(f):
131 def batchable(f):
132 '''annotation for batchable methods
132 '''annotation for batchable methods
133
133
134 Such methods must implement a coroutine as follows:
134 Such methods must implement a coroutine as follows:
135
135
136 @batchable
136 @batchable
137 def sample(self, one, two=None):
137 def sample(self, one, two=None):
138 # Handle locally computable results first:
138 # Handle locally computable results first:
139 if not one:
139 if not one:
140 yield "a local result", None
140 yield "a local result", None
141 # Build list of encoded arguments suitable for your wire protocol:
141 # Build list of encoded arguments suitable for your wire protocol:
142 encargs = [('one', encode(one),), ('two', encode(two),)]
142 encargs = [('one', encode(one),), ('two', encode(two),)]
143 # Create future for injection of encoded result:
143 # Create future for injection of encoded result:
144 encresref = future()
144 encresref = future()
145 # Return encoded arguments and future:
145 # Return encoded arguments and future:
146 yield encargs, encresref
146 yield encargs, encresref
147 # Assuming the future to be filled with the result from the batched
147 # Assuming the future to be filled with the result from the batched
148 # request now. Decode it:
148 # request now. Decode it:
149 yield decode(encresref.value)
149 yield decode(encresref.value)
150
150
151 The decorator returns a function which wraps this coroutine as a plain
151 The decorator returns a function which wraps this coroutine as a plain
152 method, but adds the original method as an attribute called "batchable",
152 method, but adds the original method as an attribute called "batchable",
153 which is used by remotebatch to split the call into separate encoding and
153 which is used by remotebatch to split the call into separate encoding and
154 decoding phases.
154 decoding phases.
155 '''
155 '''
156 def plain(*args, **opts):
156 def plain(*args, **opts):
157 batchable = f(*args, **opts)
157 batchable = f(*args, **opts)
158 encargsorres, encresref = batchable.next()
158 encargsorres, encresref = batchable.next()
159 if not encresref:
159 if not encresref:
160 return encargsorres # a local result in this case
160 return encargsorres # a local result in this case
161 self = args[0]
161 self = args[0]
162 encresref.set(self._submitone(f.func_name, encargsorres))
162 encresref.set(self._submitone(f.func_name, encargsorres))
163 return batchable.next()
163 return batchable.next()
164 setattr(plain, 'batchable', f)
164 setattr(plain, 'batchable', f)
165 return plain
165 return plain
166
166
167 # list of nodes encoding / decoding
167 # list of nodes encoding / decoding
168
168
169 def decodelist(l, sep=' '):
169 def decodelist(l, sep=' '):
170 if l:
170 if l:
171 return map(bin, l.split(sep))
171 return map(bin, l.split(sep))
172 return []
172 return []
173
173
174 def encodelist(l, sep=' '):
174 def encodelist(l, sep=' '):
175 try:
175 return sep.join(map(hex, l))
176 return sep.join(map(hex, l))
177 except TypeError:
178 print l
179 raise
176
180
177 # batched call argument encoding
181 # batched call argument encoding
178
182
179 def escapearg(plain):
183 def escapearg(plain):
180 return (plain
184 return (plain
181 .replace(':', '::')
185 .replace(':', '::')
182 .replace(',', ':,')
186 .replace(',', ':,')
183 .replace(';', ':;')
187 .replace(';', ':;')
184 .replace('=', ':='))
188 .replace('=', ':='))
185
189
186 def unescapearg(escaped):
190 def unescapearg(escaped):
187 return (escaped
191 return (escaped
188 .replace(':=', '=')
192 .replace(':=', '=')
189 .replace(':;', ';')
193 .replace(':;', ';')
190 .replace(':,', ',')
194 .replace(':,', ',')
191 .replace('::', ':'))
195 .replace('::', ':'))
192
196
193 # mapping of options accepted by getbundle and their types
197 # mapping of options accepted by getbundle and their types
194 #
198 #
195 # Meant to be extended by extensions. It is extensions responsibility to ensure
199 # Meant to be extended by extensions. It is extensions responsibility to ensure
196 # such options are properly processed in exchange.getbundle.
200 # such options are properly processed in exchange.getbundle.
197 #
201 #
198 # supported types are:
202 # supported types are:
199 #
203 #
200 # :nodes: list of binary nodes
204 # :nodes: list of binary nodes
201 # :csv: list of comma-separated values
205 # :csv: list of comma-separated values
202 # :plain: string with no transformation needed.
206 # :plain: string with no transformation needed.
203 gboptsmap = {'heads': 'nodes',
207 gboptsmap = {'heads': 'nodes',
204 'common': 'nodes',
208 'common': 'nodes',
205 'obsmarkers': 'boolean',
209 'obsmarkers': 'boolean',
206 'bundlecaps': 'csv',
210 'bundlecaps': 'csv',
207 'listkeys': 'csv',
211 'listkeys': 'csv',
208 'cg': 'boolean'}
212 'cg': 'boolean'}
209
213
210 # client side
214 # client side
211
215
212 class wirepeer(peer.peerrepository):
216 class wirepeer(peer.peerrepository):
213
217
214 def batch(self):
218 def batch(self):
215 return remotebatch(self)
219 return remotebatch(self)
216 def _submitbatch(self, req):
220 def _submitbatch(self, req):
217 cmds = []
221 cmds = []
218 for op, argsdict in req:
222 for op, argsdict in req:
219 args = ','.join('%s=%s' % p for p in argsdict.iteritems())
223 args = ','.join('%s=%s' % p for p in argsdict.iteritems())
220 cmds.append('%s %s' % (op, args))
224 cmds.append('%s %s' % (op, args))
221 rsp = self._call("batch", cmds=';'.join(cmds))
225 rsp = self._call("batch", cmds=';'.join(cmds))
222 return rsp.split(';')
226 return rsp.split(';')
223 def _submitone(self, op, args):
227 def _submitone(self, op, args):
224 return self._call(op, **args)
228 return self._call(op, **args)
225
229
226 @batchable
230 @batchable
227 def lookup(self, key):
231 def lookup(self, key):
228 self.requirecap('lookup', _('look up remote revision'))
232 self.requirecap('lookup', _('look up remote revision'))
229 f = future()
233 f = future()
230 yield {'key': encoding.fromlocal(key)}, f
234 yield {'key': encoding.fromlocal(key)}, f
231 d = f.value
235 d = f.value
232 success, data = d[:-1].split(" ", 1)
236 success, data = d[:-1].split(" ", 1)
233 if int(success):
237 if int(success):
234 yield bin(data)
238 yield bin(data)
235 self._abort(error.RepoError(data))
239 self._abort(error.RepoError(data))
236
240
237 @batchable
241 @batchable
238 def heads(self):
242 def heads(self):
239 f = future()
243 f = future()
240 yield {}, f
244 yield {}, f
241 d = f.value
245 d = f.value
242 try:
246 try:
243 yield decodelist(d[:-1])
247 yield decodelist(d[:-1])
244 except ValueError:
248 except ValueError:
245 self._abort(error.ResponseError(_("unexpected response:"), d))
249 self._abort(error.ResponseError(_("unexpected response:"), d))
246
250
247 @batchable
251 @batchable
248 def known(self, nodes):
252 def known(self, nodes):
249 f = future()
253 f = future()
250 yield {'nodes': encodelist(nodes)}, f
254 yield {'nodes': encodelist(nodes)}, f
251 d = f.value
255 d = f.value
252 try:
256 try:
253 yield [bool(int(b)) for b in d]
257 yield [bool(int(b)) for b in d]
254 except ValueError:
258 except ValueError:
255 self._abort(error.ResponseError(_("unexpected response:"), d))
259 self._abort(error.ResponseError(_("unexpected response:"), d))
256
260
257 @batchable
261 @batchable
258 def branchmap(self):
262 def branchmap(self):
259 f = future()
263 f = future()
260 yield {}, f
264 yield {}, f
261 d = f.value
265 d = f.value
262 try:
266 try:
263 branchmap = {}
267 branchmap = {}
264 for branchpart in d.splitlines():
268 for branchpart in d.splitlines():
265 branchname, branchheads = branchpart.split(' ', 1)
269 branchname, branchheads = branchpart.split(' ', 1)
266 branchname = encoding.tolocal(urllib.unquote(branchname))
270 branchname = encoding.tolocal(urllib.unquote(branchname))
267 branchheads = decodelist(branchheads)
271 branchheads = decodelist(branchheads)
268 branchmap[branchname] = branchheads
272 branchmap[branchname] = branchheads
269 yield branchmap
273 yield branchmap
270 except TypeError:
274 except TypeError:
271 self._abort(error.ResponseError(_("unexpected response:"), d))
275 self._abort(error.ResponseError(_("unexpected response:"), d))
272
276
273 def branches(self, nodes):
277 def branches(self, nodes):
274 n = encodelist(nodes)
278 n = encodelist(nodes)
275 d = self._call("branches", nodes=n)
279 d = self._call("branches", nodes=n)
276 try:
280 try:
277 br = [tuple(decodelist(b)) for b in d.splitlines()]
281 br = [tuple(decodelist(b)) for b in d.splitlines()]
278 return br
282 return br
279 except ValueError:
283 except ValueError:
280 self._abort(error.ResponseError(_("unexpected response:"), d))
284 self._abort(error.ResponseError(_("unexpected response:"), d))
281
285
282 def between(self, pairs):
286 def between(self, pairs):
283 batch = 8 # avoid giant requests
287 batch = 8 # avoid giant requests
284 r = []
288 r = []
285 for i in xrange(0, len(pairs), batch):
289 for i in xrange(0, len(pairs), batch):
286 n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]])
290 n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]])
287 d = self._call("between", pairs=n)
291 d = self._call("between", pairs=n)
288 try:
292 try:
289 r.extend(l and decodelist(l) or [] for l in d.splitlines())
293 r.extend(l and decodelist(l) or [] for l in d.splitlines())
290 except ValueError:
294 except ValueError:
291 self._abort(error.ResponseError(_("unexpected response:"), d))
295 self._abort(error.ResponseError(_("unexpected response:"), d))
292 return r
296 return r
293
297
294 @batchable
298 @batchable
295 def pushkey(self, namespace, key, old, new):
299 def pushkey(self, namespace, key, old, new):
296 if not self.capable('pushkey'):
300 if not self.capable('pushkey'):
297 yield False, None
301 yield False, None
298 f = future()
302 f = future()
299 self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
303 self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
300 yield {'namespace': encoding.fromlocal(namespace),
304 yield {'namespace': encoding.fromlocal(namespace),
301 'key': encoding.fromlocal(key),
305 'key': encoding.fromlocal(key),
302 'old': encoding.fromlocal(old),
306 'old': encoding.fromlocal(old),
303 'new': encoding.fromlocal(new)}, f
307 'new': encoding.fromlocal(new)}, f
304 d = f.value
308 d = f.value
305 d, output = d.split('\n', 1)
309 d, output = d.split('\n', 1)
306 try:
310 try:
307 d = bool(int(d))
311 d = bool(int(d))
308 except ValueError:
312 except ValueError:
309 raise error.ResponseError(
313 raise error.ResponseError(
310 _('push failed (unexpected response):'), d)
314 _('push failed (unexpected response):'), d)
311 for l in output.splitlines(True):
315 for l in output.splitlines(True):
312 self.ui.status(_('remote: '), l)
316 self.ui.status(_('remote: '), l)
313 yield d
317 yield d
314
318
315 @batchable
319 @batchable
316 def listkeys(self, namespace):
320 def listkeys(self, namespace):
317 if not self.capable('pushkey'):
321 if not self.capable('pushkey'):
318 yield {}, None
322 yield {}, None
319 f = future()
323 f = future()
320 self.ui.debug('preparing listkeys for "%s"\n' % namespace)
324 self.ui.debug('preparing listkeys for "%s"\n' % namespace)
321 yield {'namespace': encoding.fromlocal(namespace)}, f
325 yield {'namespace': encoding.fromlocal(namespace)}, f
322 d = f.value
326 d = f.value
323 yield pushkeymod.decodekeys(d)
327 yield pushkeymod.decodekeys(d)
324
328
325 def stream_out(self):
329 def stream_out(self):
326 return self._callstream('stream_out')
330 return self._callstream('stream_out')
327
331
328 def changegroup(self, nodes, kind):
332 def changegroup(self, nodes, kind):
329 n = encodelist(nodes)
333 n = encodelist(nodes)
330 f = self._callcompressable("changegroup", roots=n)
334 f = self._callcompressable("changegroup", roots=n)
331 return changegroupmod.cg1unpacker(f, 'UN')
335 return changegroupmod.cg1unpacker(f, 'UN')
332
336
333 def changegroupsubset(self, bases, heads, kind):
337 def changegroupsubset(self, bases, heads, kind):
334 self.requirecap('changegroupsubset', _('look up remote changes'))
338 self.requirecap('changegroupsubset', _('look up remote changes'))
335 bases = encodelist(bases)
339 bases = encodelist(bases)
336 heads = encodelist(heads)
340 heads = encodelist(heads)
337 f = self._callcompressable("changegroupsubset",
341 f = self._callcompressable("changegroupsubset",
338 bases=bases, heads=heads)
342 bases=bases, heads=heads)
339 return changegroupmod.cg1unpacker(f, 'UN')
343 return changegroupmod.cg1unpacker(f, 'UN')
340
344
341 def getbundle(self, source, **kwargs):
345 def getbundle(self, source, **kwargs):
342 self.requirecap('getbundle', _('look up remote changes'))
346 self.requirecap('getbundle', _('look up remote changes'))
343 opts = {}
347 opts = {}
344 for key, value in kwargs.iteritems():
348 for key, value in kwargs.iteritems():
345 if value is None:
349 if value is None:
346 continue
350 continue
347 keytype = gboptsmap.get(key)
351 keytype = gboptsmap.get(key)
348 if keytype is None:
352 if keytype is None:
349 assert False, 'unexpected'
353 assert False, 'unexpected'
350 elif keytype == 'nodes':
354 elif keytype == 'nodes':
351 value = encodelist(value)
355 value = encodelist(value)
352 elif keytype == 'csv':
356 elif keytype == 'csv':
353 value = ','.join(value)
357 value = ','.join(value)
354 elif keytype == 'boolean':
358 elif keytype == 'boolean':
355 value = '%i' % bool(value)
359 value = '%i' % bool(value)
356 elif keytype != 'plain':
360 elif keytype != 'plain':
357 raise KeyError('unknown getbundle option type %s'
361 raise KeyError('unknown getbundle option type %s'
358 % keytype)
362 % keytype)
359 opts[key] = value
363 opts[key] = value
360 f = self._callcompressable("getbundle", **opts)
364 f = self._callcompressable("getbundle", **opts)
361 bundlecaps = kwargs.get('bundlecaps')
365 bundlecaps = kwargs.get('bundlecaps')
362 if bundlecaps is not None and 'HG2Y' in bundlecaps:
366 if bundlecaps is not None and 'HG2Y' in bundlecaps:
363 return bundle2.unbundle20(self.ui, f)
367 return bundle2.unbundle20(self.ui, f)
364 else:
368 else:
365 return changegroupmod.cg1unpacker(f, 'UN')
369 return changegroupmod.cg1unpacker(f, 'UN')
366
370
367 def unbundle(self, cg, heads, source):
371 def unbundle(self, cg, heads, source):
368 '''Send cg (a readable file-like object representing the
372 '''Send cg (a readable file-like object representing the
369 changegroup to push, typically a chunkbuffer object) to the
373 changegroup to push, typically a chunkbuffer object) to the
370 remote server as a bundle.
374 remote server as a bundle.
371
375
372 When pushing a bundle10 stream, return an integer indicating the
376 When pushing a bundle10 stream, return an integer indicating the
373 result of the push (see localrepository.addchangegroup()).
377 result of the push (see localrepository.addchangegroup()).
374
378
375 When pushing a bundle20 stream, return a bundle20 stream.'''
379 When pushing a bundle20 stream, return a bundle20 stream.'''
376
380
377 if heads != ['force'] and self.capable('unbundlehash'):
381 if heads != ['force'] and self.capable('unbundlehash'):
378 heads = encodelist(['hashed',
382 heads = encodelist(['hashed',
379 util.sha1(''.join(sorted(heads))).digest()])
383 util.sha1(''.join(sorted(heads))).digest()])
380 else:
384 else:
381 heads = encodelist(heads)
385 heads = encodelist(heads)
382
386
383 if util.safehasattr(cg, 'deltaheader'):
387 if util.safehasattr(cg, 'deltaheader'):
384 # this a bundle10, do the old style call sequence
388 # this a bundle10, do the old style call sequence
385 ret, output = self._callpush("unbundle", cg, heads=heads)
389 ret, output = self._callpush("unbundle", cg, heads=heads)
386 if ret == "":
390 if ret == "":
387 raise error.ResponseError(
391 raise error.ResponseError(
388 _('push failed:'), output)
392 _('push failed:'), output)
389 try:
393 try:
390 ret = int(ret)
394 ret = int(ret)
391 except ValueError:
395 except ValueError:
392 raise error.ResponseError(
396 raise error.ResponseError(
393 _('push failed (unexpected response):'), ret)
397 _('push failed (unexpected response):'), ret)
394
398
395 for l in output.splitlines(True):
399 for l in output.splitlines(True):
396 self.ui.status(_('remote: '), l)
400 self.ui.status(_('remote: '), l)
397 else:
401 else:
398 # bundle2 push. Send a stream, fetch a stream.
402 # bundle2 push. Send a stream, fetch a stream.
399 stream = self._calltwowaystream('unbundle', cg, heads=heads)
403 stream = self._calltwowaystream('unbundle', cg, heads=heads)
400 ret = bundle2.unbundle20(self.ui, stream)
404 ret = bundle2.unbundle20(self.ui, stream)
401 return ret
405 return ret
402
406
403 def debugwireargs(self, one, two, three=None, four=None, five=None):
407 def debugwireargs(self, one, two, three=None, four=None, five=None):
404 # don't pass optional arguments left at their default value
408 # don't pass optional arguments left at their default value
405 opts = {}
409 opts = {}
406 if three is not None:
410 if three is not None:
407 opts['three'] = three
411 opts['three'] = three
408 if four is not None:
412 if four is not None:
409 opts['four'] = four
413 opts['four'] = four
410 return self._call('debugwireargs', one=one, two=two, **opts)
414 return self._call('debugwireargs', one=one, two=two, **opts)
411
415
412 def _call(self, cmd, **args):
416 def _call(self, cmd, **args):
413 """execute <cmd> on the server
417 """execute <cmd> on the server
414
418
415 The command is expected to return a simple string.
419 The command is expected to return a simple string.
416
420
417 returns the server reply as a string."""
421 returns the server reply as a string."""
418 raise NotImplementedError()
422 raise NotImplementedError()
419
423
420 def _callstream(self, cmd, **args):
424 def _callstream(self, cmd, **args):
421 """execute <cmd> on the server
425 """execute <cmd> on the server
422
426
423 The command is expected to return a stream.
427 The command is expected to return a stream.
424
428
425 returns the server reply as a file like object."""
429 returns the server reply as a file like object."""
426 raise NotImplementedError()
430 raise NotImplementedError()
427
431
428 def _callcompressable(self, cmd, **args):
432 def _callcompressable(self, cmd, **args):
429 """execute <cmd> on the server
433 """execute <cmd> on the server
430
434
431 The command is expected to return a stream.
435 The command is expected to return a stream.
432
436
433 The stream may have been compressed in some implementations. This
437 The stream may have been compressed in some implementations. This
434 function takes care of the decompression. This is the only difference
438 function takes care of the decompression. This is the only difference
435 with _callstream.
439 with _callstream.
436
440
437 returns the server reply as a file like object.
441 returns the server reply as a file like object.
438 """
442 """
439 raise NotImplementedError()
443 raise NotImplementedError()
440
444
441 def _callpush(self, cmd, fp, **args):
445 def _callpush(self, cmd, fp, **args):
442 """execute a <cmd> on server
446 """execute a <cmd> on server
443
447
444 The command is expected to be related to a push. Push has a special
448 The command is expected to be related to a push. Push has a special
445 return method.
449 return method.
446
450
447 returns the server reply as a (ret, output) tuple. ret is either
451 returns the server reply as a (ret, output) tuple. ret is either
448 empty (error) or a stringified int.
452 empty (error) or a stringified int.
449 """
453 """
450 raise NotImplementedError()
454 raise NotImplementedError()
451
455
452 def _calltwowaystream(self, cmd, fp, **args):
456 def _calltwowaystream(self, cmd, fp, **args):
453 """execute <cmd> on server
457 """execute <cmd> on server
454
458
455 The command will send a stream to the server and get a stream in reply.
459 The command will send a stream to the server and get a stream in reply.
456 """
460 """
457 raise NotImplementedError()
461 raise NotImplementedError()
458
462
459 def _abort(self, exception):
463 def _abort(self, exception):
460 """clearly abort the wire protocol connection and raise the exception
464 """clearly abort the wire protocol connection and raise the exception
461 """
465 """
462 raise NotImplementedError()
466 raise NotImplementedError()
463
467
464 # server side
468 # server side
465
469
466 # wire protocol command can either return a string or one of these classes.
470 # wire protocol command can either return a string or one of these classes.
467 class streamres(object):
471 class streamres(object):
468 """wireproto reply: binary stream
472 """wireproto reply: binary stream
469
473
470 The call was successful and the result is a stream.
474 The call was successful and the result is a stream.
471 Iterate on the `self.gen` attribute to retrieve chunks.
475 Iterate on the `self.gen` attribute to retrieve chunks.
472 """
476 """
473 def __init__(self, gen):
477 def __init__(self, gen):
474 self.gen = gen
478 self.gen = gen
475
479
476 class pushres(object):
480 class pushres(object):
477 """wireproto reply: success with simple integer return
481 """wireproto reply: success with simple integer return
478
482
479 The call was successful and returned an integer contained in `self.res`.
483 The call was successful and returned an integer contained in `self.res`.
480 """
484 """
481 def __init__(self, res):
485 def __init__(self, res):
482 self.res = res
486 self.res = res
483
487
484 class pusherr(object):
488 class pusherr(object):
485 """wireproto reply: failure
489 """wireproto reply: failure
486
490
487 The call failed. The `self.res` attribute contains the error message.
491 The call failed. The `self.res` attribute contains the error message.
488 """
492 """
489 def __init__(self, res):
493 def __init__(self, res):
490 self.res = res
494 self.res = res
491
495
492 class ooberror(object):
496 class ooberror(object):
493 """wireproto reply: failure of a batch of operation
497 """wireproto reply: failure of a batch of operation
494
498
495 Something failed during a batch call. The error message is stored in
499 Something failed during a batch call. The error message is stored in
496 `self.message`.
500 `self.message`.
497 """
501 """
498 def __init__(self, message):
502 def __init__(self, message):
499 self.message = message
503 self.message = message
500
504
501 def dispatch(repo, proto, command):
505 def dispatch(repo, proto, command):
502 repo = repo.filtered("served")
506 repo = repo.filtered("served")
503 func, spec = commands[command]
507 func, spec = commands[command]
504 args = proto.getargs(spec)
508 args = proto.getargs(spec)
505 return func(repo, proto, *args)
509 return func(repo, proto, *args)
506
510
507 def options(cmd, keys, others):
511 def options(cmd, keys, others):
508 opts = {}
512 opts = {}
509 for k in keys:
513 for k in keys:
510 if k in others:
514 if k in others:
511 opts[k] = others[k]
515 opts[k] = others[k]
512 del others[k]
516 del others[k]
513 if others:
517 if others:
514 sys.stderr.write("warning: %s ignored unexpected arguments %s\n"
518 sys.stderr.write("warning: %s ignored unexpected arguments %s\n"
515 % (cmd, ",".join(others)))
519 % (cmd, ",".join(others)))
516 return opts
520 return opts
517
521
518 # list of commands
522 # list of commands
519 commands = {}
523 commands = {}
520
524
521 def wireprotocommand(name, args=''):
525 def wireprotocommand(name, args=''):
522 """decorator for wire protocol command"""
526 """decorator for wire protocol command"""
523 def register(func):
527 def register(func):
524 commands[name] = (func, args)
528 commands[name] = (func, args)
525 return func
529 return func
526 return register
530 return register
527
531
528 @wireprotocommand('batch', 'cmds *')
532 @wireprotocommand('batch', 'cmds *')
529 def batch(repo, proto, cmds, others):
533 def batch(repo, proto, cmds, others):
530 repo = repo.filtered("served")
534 repo = repo.filtered("served")
531 res = []
535 res = []
532 for pair in cmds.split(';'):
536 for pair in cmds.split(';'):
533 op, args = pair.split(' ', 1)
537 op, args = pair.split(' ', 1)
534 vals = {}
538 vals = {}
535 for a in args.split(','):
539 for a in args.split(','):
536 if a:
540 if a:
537 n, v = a.split('=')
541 n, v = a.split('=')
538 vals[n] = unescapearg(v)
542 vals[n] = unescapearg(v)
539 func, spec = commands[op]
543 func, spec = commands[op]
540 if spec:
544 if spec:
541 keys = spec.split()
545 keys = spec.split()
542 data = {}
546 data = {}
543 for k in keys:
547 for k in keys:
544 if k == '*':
548 if k == '*':
545 star = {}
549 star = {}
546 for key in vals.keys():
550 for key in vals.keys():
547 if key not in keys:
551 if key not in keys:
548 star[key] = vals[key]
552 star[key] = vals[key]
549 data['*'] = star
553 data['*'] = star
550 else:
554 else:
551 data[k] = vals[k]
555 data[k] = vals[k]
552 result = func(repo, proto, *[data[k] for k in keys])
556 result = func(repo, proto, *[data[k] for k in keys])
553 else:
557 else:
554 result = func(repo, proto)
558 result = func(repo, proto)
555 if isinstance(result, ooberror):
559 if isinstance(result, ooberror):
556 return result
560 return result
557 res.append(escapearg(result))
561 res.append(escapearg(result))
558 return ';'.join(res)
562 return ';'.join(res)
559
563
560 @wireprotocommand('between', 'pairs')
564 @wireprotocommand('between', 'pairs')
561 def between(repo, proto, pairs):
565 def between(repo, proto, pairs):
562 pairs = [decodelist(p, '-') for p in pairs.split(" ")]
566 pairs = [decodelist(p, '-') for p in pairs.split(" ")]
563 r = []
567 r = []
564 for b in repo.between(pairs):
568 for b in repo.between(pairs):
565 r.append(encodelist(b) + "\n")
569 r.append(encodelist(b) + "\n")
566 return "".join(r)
570 return "".join(r)
567
571
568 @wireprotocommand('branchmap')
572 @wireprotocommand('branchmap')
569 def branchmap(repo, proto):
573 def branchmap(repo, proto):
570 branchmap = repo.branchmap()
574 branchmap = repo.branchmap()
571 heads = []
575 heads = []
572 for branch, nodes in branchmap.iteritems():
576 for branch, nodes in branchmap.iteritems():
573 branchname = urllib.quote(encoding.fromlocal(branch))
577 branchname = urllib.quote(encoding.fromlocal(branch))
574 branchnodes = encodelist(nodes)
578 branchnodes = encodelist(nodes)
575 heads.append('%s %s' % (branchname, branchnodes))
579 heads.append('%s %s' % (branchname, branchnodes))
576 return '\n'.join(heads)
580 return '\n'.join(heads)
577
581
578 @wireprotocommand('branches', 'nodes')
582 @wireprotocommand('branches', 'nodes')
579 def branches(repo, proto, nodes):
583 def branches(repo, proto, nodes):
580 nodes = decodelist(nodes)
584 nodes = decodelist(nodes)
581 r = []
585 r = []
582 for b in repo.branches(nodes):
586 for b in repo.branches(nodes):
583 r.append(encodelist(b) + "\n")
587 r.append(encodelist(b) + "\n")
584 return "".join(r)
588 return "".join(r)
585
589
586
590
587 wireprotocaps = ['lookup', 'changegroupsubset', 'branchmap', 'pushkey',
591 wireprotocaps = ['lookup', 'changegroupsubset', 'branchmap', 'pushkey',
588 'known', 'getbundle', 'unbundlehash', 'batch']
592 'known', 'getbundle', 'unbundlehash', 'batch']
589
593
590 def _capabilities(repo, proto):
594 def _capabilities(repo, proto):
591 """return a list of capabilities for a repo
595 """return a list of capabilities for a repo
592
596
593 This function exists to allow extensions to easily wrap capabilities
597 This function exists to allow extensions to easily wrap capabilities
594 computation
598 computation
595
599
596 - returns a lists: easy to alter
600 - returns a lists: easy to alter
597 - change done here will be propagated to both `capabilities` and `hello`
601 - change done here will be propagated to both `capabilities` and `hello`
598 command without any other action needed.
602 command without any other action needed.
599 """
603 """
600 # copy to prevent modification of the global list
604 # copy to prevent modification of the global list
601 caps = list(wireprotocaps)
605 caps = list(wireprotocaps)
602 if _allowstream(repo.ui):
606 if _allowstream(repo.ui):
603 if repo.ui.configbool('server', 'preferuncompressed', False):
607 if repo.ui.configbool('server', 'preferuncompressed', False):
604 caps.append('stream-preferred')
608 caps.append('stream-preferred')
605 requiredformats = repo.requirements & repo.supportedformats
609 requiredformats = repo.requirements & repo.supportedformats
606 # if our local revlogs are just revlogv1, add 'stream' cap
610 # if our local revlogs are just revlogv1, add 'stream' cap
607 if not requiredformats - set(('revlogv1',)):
611 if not requiredformats - set(('revlogv1',)):
608 caps.append('stream')
612 caps.append('stream')
609 # otherwise, add 'streamreqs' detailing our local revlog format
613 # otherwise, add 'streamreqs' detailing our local revlog format
610 else:
614 else:
611 caps.append('streamreqs=%s' % ','.join(requiredformats))
615 caps.append('streamreqs=%s' % ','.join(requiredformats))
612 if repo.ui.configbool('experimental', 'bundle2-exp', False):
616 if repo.ui.configbool('experimental', 'bundle2-exp', False):
613 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
617 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
614 caps.append('bundle2-exp=' + urllib.quote(capsblob))
618 caps.append('bundle2-exp=' + urllib.quote(capsblob))
615 caps.append('unbundle=%s' % ','.join(changegroupmod.bundlepriority))
619 caps.append('unbundle=%s' % ','.join(changegroupmod.bundlepriority))
616 caps.append('httpheader=1024')
620 caps.append('httpheader=1024')
617 return caps
621 return caps
618
622
619 # If you are writing an extension and consider wrapping this function. Wrap
623 # If you are writing an extension and consider wrapping this function. Wrap
620 # `_capabilities` instead.
624 # `_capabilities` instead.
621 @wireprotocommand('capabilities')
625 @wireprotocommand('capabilities')
622 def capabilities(repo, proto):
626 def capabilities(repo, proto):
623 return ' '.join(_capabilities(repo, proto))
627 return ' '.join(_capabilities(repo, proto))
624
628
625 @wireprotocommand('changegroup', 'roots')
629 @wireprotocommand('changegroup', 'roots')
626 def changegroup(repo, proto, roots):
630 def changegroup(repo, proto, roots):
627 nodes = decodelist(roots)
631 nodes = decodelist(roots)
628 cg = changegroupmod.changegroup(repo, nodes, 'serve')
632 cg = changegroupmod.changegroup(repo, nodes, 'serve')
629 return streamres(proto.groupchunks(cg))
633 return streamres(proto.groupchunks(cg))
630
634
631 @wireprotocommand('changegroupsubset', 'bases heads')
635 @wireprotocommand('changegroupsubset', 'bases heads')
632 def changegroupsubset(repo, proto, bases, heads):
636 def changegroupsubset(repo, proto, bases, heads):
633 bases = decodelist(bases)
637 bases = decodelist(bases)
634 heads = decodelist(heads)
638 heads = decodelist(heads)
635 cg = changegroupmod.changegroupsubset(repo, bases, heads, 'serve')
639 cg = changegroupmod.changegroupsubset(repo, bases, heads, 'serve')
636 return streamres(proto.groupchunks(cg))
640 return streamres(proto.groupchunks(cg))
637
641
638 @wireprotocommand('debugwireargs', 'one two *')
642 @wireprotocommand('debugwireargs', 'one two *')
639 def debugwireargs(repo, proto, one, two, others):
643 def debugwireargs(repo, proto, one, two, others):
640 # only accept optional args from the known set
644 # only accept optional args from the known set
641 opts = options('debugwireargs', ['three', 'four'], others)
645 opts = options('debugwireargs', ['three', 'four'], others)
642 return repo.debugwireargs(one, two, **opts)
646 return repo.debugwireargs(one, two, **opts)
643
647
644 # List of options accepted by getbundle.
648 # List of options accepted by getbundle.
645 #
649 #
646 # Meant to be extended by extensions. It is the extension's responsibility to
650 # Meant to be extended by extensions. It is the extension's responsibility to
647 # ensure such options are properly processed in exchange.getbundle.
651 # ensure such options are properly processed in exchange.getbundle.
648 gboptslist = ['heads', 'common', 'bundlecaps']
652 gboptslist = ['heads', 'common', 'bundlecaps']
649
653
650 @wireprotocommand('getbundle', '*')
654 @wireprotocommand('getbundle', '*')
651 def getbundle(repo, proto, others):
655 def getbundle(repo, proto, others):
652 opts = options('getbundle', gboptsmap.keys(), others)
656 opts = options('getbundle', gboptsmap.keys(), others)
653 for k, v in opts.iteritems():
657 for k, v in opts.iteritems():
654 keytype = gboptsmap[k]
658 keytype = gboptsmap[k]
655 if keytype == 'nodes':
659 if keytype == 'nodes':
656 opts[k] = decodelist(v)
660 opts[k] = decodelist(v)
657 elif keytype == 'csv':
661 elif keytype == 'csv':
658 opts[k] = set(v.split(','))
662 opts[k] = set(v.split(','))
659 elif keytype == 'boolean':
663 elif keytype == 'boolean':
660 opts[k] = bool(v)
664 opts[k] = bool(v)
661 elif keytype != 'plain':
665 elif keytype != 'plain':
662 raise KeyError('unknown getbundle option type %s'
666 raise KeyError('unknown getbundle option type %s'
663 % keytype)
667 % keytype)
664 cg = exchange.getbundle(repo, 'serve', **opts)
668 cg = exchange.getbundle(repo, 'serve', **opts)
665 return streamres(proto.groupchunks(cg))
669 return streamres(proto.groupchunks(cg))
666
670
667 @wireprotocommand('heads')
671 @wireprotocommand('heads')
668 def heads(repo, proto):
672 def heads(repo, proto):
669 h = repo.heads()
673 h = repo.heads()
670 return encodelist(h) + "\n"
674 return encodelist(h) + "\n"
671
675
672 @wireprotocommand('hello')
676 @wireprotocommand('hello')
673 def hello(repo, proto):
677 def hello(repo, proto):
674 '''the hello command returns a set of lines describing various
678 '''the hello command returns a set of lines describing various
675 interesting things about the server, in an RFC822-like format.
679 interesting things about the server, in an RFC822-like format.
676 Currently the only one defined is "capabilities", which
680 Currently the only one defined is "capabilities", which
677 consists of a line in the form:
681 consists of a line in the form:
678
682
679 capabilities: space separated list of tokens
683 capabilities: space separated list of tokens
680 '''
684 '''
681 return "capabilities: %s\n" % (capabilities(repo, proto))
685 return "capabilities: %s\n" % (capabilities(repo, proto))
682
686
683 @wireprotocommand('listkeys', 'namespace')
687 @wireprotocommand('listkeys', 'namespace')
684 def listkeys(repo, proto, namespace):
688 def listkeys(repo, proto, namespace):
685 d = repo.listkeys(encoding.tolocal(namespace)).items()
689 d = repo.listkeys(encoding.tolocal(namespace)).items()
686 return pushkeymod.encodekeys(d)
690 return pushkeymod.encodekeys(d)
687
691
688 @wireprotocommand('lookup', 'key')
692 @wireprotocommand('lookup', 'key')
689 def lookup(repo, proto, key):
693 def lookup(repo, proto, key):
690 try:
694 try:
691 k = encoding.tolocal(key)
695 k = encoding.tolocal(key)
692 c = repo[k]
696 c = repo[k]
693 r = c.hex()
697 r = c.hex()
694 success = 1
698 success = 1
695 except Exception, inst:
699 except Exception, inst:
696 r = str(inst)
700 r = str(inst)
697 success = 0
701 success = 0
698 return "%s %s\n" % (success, r)
702 return "%s %s\n" % (success, r)
699
703
700 @wireprotocommand('known', 'nodes *')
704 @wireprotocommand('known', 'nodes *')
701 def known(repo, proto, nodes, others):
705 def known(repo, proto, nodes, others):
702 return ''.join(b and "1" or "0" for b in repo.known(decodelist(nodes)))
706 return ''.join(b and "1" or "0" for b in repo.known(decodelist(nodes)))
703
707
704 @wireprotocommand('pushkey', 'namespace key old new')
708 @wireprotocommand('pushkey', 'namespace key old new')
705 def pushkey(repo, proto, namespace, key, old, new):
709 def pushkey(repo, proto, namespace, key, old, new):
706 # compatibility with pre-1.8 clients which were accidentally
710 # compatibility with pre-1.8 clients which were accidentally
707 # sending raw binary nodes rather than utf-8-encoded hex
711 # sending raw binary nodes rather than utf-8-encoded hex
708 if len(new) == 20 and new.encode('string-escape') != new:
712 if len(new) == 20 and new.encode('string-escape') != new:
709 # looks like it could be a binary node
713 # looks like it could be a binary node
710 try:
714 try:
711 new.decode('utf-8')
715 new.decode('utf-8')
712 new = encoding.tolocal(new) # but cleanly decodes as UTF-8
716 new = encoding.tolocal(new) # but cleanly decodes as UTF-8
713 except UnicodeDecodeError:
717 except UnicodeDecodeError:
714 pass # binary, leave unmodified
718 pass # binary, leave unmodified
715 else:
719 else:
716 new = encoding.tolocal(new) # normal path
720 new = encoding.tolocal(new) # normal path
717
721
718 if util.safehasattr(proto, 'restore'):
722 if util.safehasattr(proto, 'restore'):
719
723
720 proto.redirect()
724 proto.redirect()
721
725
722 try:
726 try:
723 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
727 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
724 encoding.tolocal(old), new) or False
728 encoding.tolocal(old), new) or False
725 except util.Abort:
729 except util.Abort:
726 r = False
730 r = False
727
731
728 output = proto.restore()
732 output = proto.restore()
729
733
730 return '%s\n%s' % (int(r), output)
734 return '%s\n%s' % (int(r), output)
731
735
732 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
736 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
733 encoding.tolocal(old), new)
737 encoding.tolocal(old), new)
734 return '%s\n' % int(r)
738 return '%s\n' % int(r)
735
739
736 def _allowstream(ui):
740 def _allowstream(ui):
737 return ui.configbool('server', 'uncompressed', True, untrusted=True)
741 return ui.configbool('server', 'uncompressed', True, untrusted=True)
738
742
739 def _walkstreamfiles(repo):
743 def _walkstreamfiles(repo):
740 # this is it's own function so extensions can override it
744 # this is it's own function so extensions can override it
741 return repo.store.walk()
745 return repo.store.walk()
742
746
743 @wireprotocommand('stream_out')
747 @wireprotocommand('stream_out')
744 def stream(repo, proto):
748 def stream(repo, proto):
745 '''If the server supports streaming clone, it advertises the "stream"
749 '''If the server supports streaming clone, it advertises the "stream"
746 capability with a value representing the version and flags of the repo
750 capability with a value representing the version and flags of the repo
747 it is serving. Client checks to see if it understands the format.
751 it is serving. Client checks to see if it understands the format.
748
752
749 The format is simple: the server writes out a line with the amount
753 The format is simple: the server writes out a line with the amount
750 of files, then the total amount of bytes to be transferred (separated
754 of files, then the total amount of bytes to be transferred (separated
751 by a space). Then, for each file, the server first writes the filename
755 by a space). Then, for each file, the server first writes the filename
752 and file size (separated by the null character), then the file contents.
756 and file size (separated by the null character), then the file contents.
753 '''
757 '''
754
758
755 if not _allowstream(repo.ui):
759 if not _allowstream(repo.ui):
756 return '1\n'
760 return '1\n'
757
761
758 entries = []
762 entries = []
759 total_bytes = 0
763 total_bytes = 0
760 try:
764 try:
761 # get consistent snapshot of repo, lock during scan
765 # get consistent snapshot of repo, lock during scan
762 lock = repo.lock()
766 lock = repo.lock()
763 try:
767 try:
764 repo.ui.debug('scanning\n')
768 repo.ui.debug('scanning\n')
765 for name, ename, size in _walkstreamfiles(repo):
769 for name, ename, size in _walkstreamfiles(repo):
766 if size:
770 if size:
767 entries.append((name, size))
771 entries.append((name, size))
768 total_bytes += size
772 total_bytes += size
769 finally:
773 finally:
770 lock.release()
774 lock.release()
771 except error.LockError:
775 except error.LockError:
772 return '2\n' # error: 2
776 return '2\n' # error: 2
773
777
774 def streamer(repo, entries, total):
778 def streamer(repo, entries, total):
775 '''stream out all metadata files in repository.'''
779 '''stream out all metadata files in repository.'''
776 yield '0\n' # success
780 yield '0\n' # success
777 repo.ui.debug('%d files, %d bytes to transfer\n' %
781 repo.ui.debug('%d files, %d bytes to transfer\n' %
778 (len(entries), total_bytes))
782 (len(entries), total_bytes))
779 yield '%d %d\n' % (len(entries), total_bytes)
783 yield '%d %d\n' % (len(entries), total_bytes)
780
784
781 sopener = repo.sopener
785 sopener = repo.sopener
782 oldaudit = sopener.mustaudit
786 oldaudit = sopener.mustaudit
783 debugflag = repo.ui.debugflag
787 debugflag = repo.ui.debugflag
784 sopener.mustaudit = False
788 sopener.mustaudit = False
785
789
786 try:
790 try:
787 for name, size in entries:
791 for name, size in entries:
788 if debugflag:
792 if debugflag:
789 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
793 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
790 # partially encode name over the wire for backwards compat
794 # partially encode name over the wire for backwards compat
791 yield '%s\0%d\n' % (store.encodedir(name), size)
795 yield '%s\0%d\n' % (store.encodedir(name), size)
792 if size <= 65536:
796 if size <= 65536:
793 fp = sopener(name)
797 fp = sopener(name)
794 try:
798 try:
795 data = fp.read(size)
799 data = fp.read(size)
796 finally:
800 finally:
797 fp.close()
801 fp.close()
798 yield data
802 yield data
799 else:
803 else:
800 for chunk in util.filechunkiter(sopener(name), limit=size):
804 for chunk in util.filechunkiter(sopener(name), limit=size):
801 yield chunk
805 yield chunk
802 # replace with "finally:" when support for python 2.4 has been dropped
806 # replace with "finally:" when support for python 2.4 has been dropped
803 except Exception:
807 except Exception:
804 sopener.mustaudit = oldaudit
808 sopener.mustaudit = oldaudit
805 raise
809 raise
806 sopener.mustaudit = oldaudit
810 sopener.mustaudit = oldaudit
807
811
808 return streamres(streamer(repo, entries, total_bytes))
812 return streamres(streamer(repo, entries, total_bytes))
809
813
810 @wireprotocommand('unbundle', 'heads')
814 @wireprotocommand('unbundle', 'heads')
811 def unbundle(repo, proto, heads):
815 def unbundle(repo, proto, heads):
812 their_heads = decodelist(heads)
816 their_heads = decodelist(heads)
813
817
814 try:
818 try:
815 proto.redirect()
819 proto.redirect()
816
820
817 exchange.check_heads(repo, their_heads, 'preparing changes')
821 exchange.check_heads(repo, their_heads, 'preparing changes')
818
822
819 # write bundle data to temporary file because it can be big
823 # write bundle data to temporary file because it can be big
820 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
824 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
821 fp = os.fdopen(fd, 'wb+')
825 fp = os.fdopen(fd, 'wb+')
822 r = 0
826 r = 0
823 try:
827 try:
824 proto.getfile(fp)
828 proto.getfile(fp)
825 fp.seek(0)
829 fp.seek(0)
826 gen = exchange.readbundle(repo.ui, fp, None)
830 gen = exchange.readbundle(repo.ui, fp, None)
827 r = exchange.unbundle(repo, gen, their_heads, 'serve',
831 r = exchange.unbundle(repo, gen, their_heads, 'serve',
828 proto._client())
832 proto._client())
829 if util.safehasattr(r, 'addpart'):
833 if util.safehasattr(r, 'addpart'):
830 # The return looks streamable, we are in the bundle2 case and
834 # The return looks streamable, we are in the bundle2 case and
831 # should return a stream.
835 # should return a stream.
832 return streamres(r.getchunks())
836 return streamres(r.getchunks())
833 return pushres(r)
837 return pushres(r)
834
838
835 finally:
839 finally:
836 fp.close()
840 fp.close()
837 os.unlink(tempname)
841 os.unlink(tempname)
838 except error.BundleValueError, exc:
842 except error.BundleValueError, exc:
839 bundler = bundle2.bundle20(repo.ui)
843 bundler = bundle2.bundle20(repo.ui)
840 errpart = bundler.newpart('b2x:error:unsupportedcontent')
844 errpart = bundler.newpart('b2x:error:unsupportedcontent')
841 if exc.parttype is not None:
845 if exc.parttype is not None:
842 errpart.addparam('parttype', exc.parttype)
846 errpart.addparam('parttype', exc.parttype)
843 if exc.params:
847 if exc.params:
844 errpart.addparam('params', '\0'.join(exc.params))
848 errpart.addparam('params', '\0'.join(exc.params))
845 return streamres(bundler.getchunks())
849 return streamres(bundler.getchunks())
846 except util.Abort, inst:
850 except util.Abort, inst:
847 # The old code we moved used sys.stderr directly.
851 # The old code we moved used sys.stderr directly.
848 # We did not change it to minimise code change.
852 # We did not change it to minimise code change.
849 # This need to be moved to something proper.
853 # This need to be moved to something proper.
850 # Feel free to do it.
854 # Feel free to do it.
851 if getattr(inst, 'duringunbundle2', False):
855 if getattr(inst, 'duringunbundle2', False):
852 bundler = bundle2.bundle20(repo.ui)
856 bundler = bundle2.bundle20(repo.ui)
853 manargs = [('message', str(inst))]
857 manargs = [('message', str(inst))]
854 advargs = []
858 advargs = []
855 if inst.hint is not None:
859 if inst.hint is not None:
856 advargs.append(('hint', inst.hint))
860 advargs.append(('hint', inst.hint))
857 bundler.addpart(bundle2.bundlepart('b2x:error:abort',
861 bundler.addpart(bundle2.bundlepart('b2x:error:abort',
858 manargs, advargs))
862 manargs, advargs))
859 return streamres(bundler.getchunks())
863 return streamres(bundler.getchunks())
860 else:
864 else:
861 sys.stderr.write("abort: %s\n" % inst)
865 sys.stderr.write("abort: %s\n" % inst)
862 return pushres(0)
866 return pushres(0)
863 except error.PushRaced, exc:
867 except error.PushRaced, exc:
864 if getattr(exc, 'duringunbundle2', False):
868 if getattr(exc, 'duringunbundle2', False):
865 bundler = bundle2.bundle20(repo.ui)
869 bundler = bundle2.bundle20(repo.ui)
866 bundler.newpart('b2x:error:pushraced', [('message', str(exc))])
870 bundler.newpart('b2x:error:pushraced', [('message', str(exc))])
867 return streamres(bundler.getchunks())
871 return streamres(bundler.getchunks())
868 else:
872 else:
869 return pusherr(str(exc))
873 return pusherr(str(exc))
General Comments 0
You need to be logged in to leave comments. Login now