##// END OF EJS Templates
streamclone: refactor maybeperformstreamclone to take a pullop...
Gregory Szorc -
r26458:36279329 default
parent child Browse files
Show More
@@ -1,1481 +1,1479 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno, urllib
10 import errno, urllib
11 import util, scmutil, changegroup, base85, error
11 import util, scmutil, changegroup, base85, error
12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 import lock as lockmod
13 import lock as lockmod
14 import streamclone
14 import streamclone
15 import tags
15 import tags
16
16
17 def readbundle(ui, fh, fname, vfs=None):
17 def readbundle(ui, fh, fname, vfs=None):
18 header = changegroup.readexactly(fh, 4)
18 header = changegroup.readexactly(fh, 4)
19
19
20 alg = None
20 alg = None
21 if not fname:
21 if not fname:
22 fname = "stream"
22 fname = "stream"
23 if not header.startswith('HG') and header.startswith('\0'):
23 if not header.startswith('HG') and header.startswith('\0'):
24 fh = changegroup.headerlessfixup(fh, header)
24 fh = changegroup.headerlessfixup(fh, header)
25 header = "HG10"
25 header = "HG10"
26 alg = 'UN'
26 alg = 'UN'
27 elif vfs:
27 elif vfs:
28 fname = vfs.join(fname)
28 fname = vfs.join(fname)
29
29
30 magic, version = header[0:2], header[2:4]
30 magic, version = header[0:2], header[2:4]
31
31
32 if magic != 'HG':
32 if magic != 'HG':
33 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
33 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
34 if version == '10':
34 if version == '10':
35 if alg is None:
35 if alg is None:
36 alg = changegroup.readexactly(fh, 2)
36 alg = changegroup.readexactly(fh, 2)
37 return changegroup.cg1unpacker(fh, alg)
37 return changegroup.cg1unpacker(fh, alg)
38 elif version.startswith('2'):
38 elif version.startswith('2'):
39 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
39 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
40 else:
40 else:
41 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
41 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
42
42
43 def buildobsmarkerspart(bundler, markers):
43 def buildobsmarkerspart(bundler, markers):
44 """add an obsmarker part to the bundler with <markers>
44 """add an obsmarker part to the bundler with <markers>
45
45
46 No part is created if markers is empty.
46 No part is created if markers is empty.
47 Raises ValueError if the bundler doesn't support any known obsmarker format.
47 Raises ValueError if the bundler doesn't support any known obsmarker format.
48 """
48 """
49 if markers:
49 if markers:
50 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
50 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
51 version = obsolete.commonversion(remoteversions)
51 version = obsolete.commonversion(remoteversions)
52 if version is None:
52 if version is None:
53 raise ValueError('bundler do not support common obsmarker format')
53 raise ValueError('bundler do not support common obsmarker format')
54 stream = obsolete.encodemarkers(markers, True, version=version)
54 stream = obsolete.encodemarkers(markers, True, version=version)
55 return bundler.newpart('obsmarkers', data=stream)
55 return bundler.newpart('obsmarkers', data=stream)
56 return None
56 return None
57
57
58 def _canusebundle2(op):
58 def _canusebundle2(op):
59 """return true if a pull/push can use bundle2
59 """return true if a pull/push can use bundle2
60
60
61 Feel free to nuke this function when we drop the experimental option"""
61 Feel free to nuke this function when we drop the experimental option"""
62 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
62 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
63 and op.remote.capable('bundle2'))
63 and op.remote.capable('bundle2'))
64
64
65
65
66 class pushoperation(object):
66 class pushoperation(object):
67 """A object that represent a single push operation
67 """A object that represent a single push operation
68
68
69 It purpose is to carry push related state and very common operation.
69 It purpose is to carry push related state and very common operation.
70
70
71 A new should be created at the beginning of each push and discarded
71 A new should be created at the beginning of each push and discarded
72 afterward.
72 afterward.
73 """
73 """
74
74
75 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
75 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
76 bookmarks=()):
76 bookmarks=()):
77 # repo we push from
77 # repo we push from
78 self.repo = repo
78 self.repo = repo
79 self.ui = repo.ui
79 self.ui = repo.ui
80 # repo we push to
80 # repo we push to
81 self.remote = remote
81 self.remote = remote
82 # force option provided
82 # force option provided
83 self.force = force
83 self.force = force
84 # revs to be pushed (None is "all")
84 # revs to be pushed (None is "all")
85 self.revs = revs
85 self.revs = revs
86 # bookmark explicitly pushed
86 # bookmark explicitly pushed
87 self.bookmarks = bookmarks
87 self.bookmarks = bookmarks
88 # allow push of new branch
88 # allow push of new branch
89 self.newbranch = newbranch
89 self.newbranch = newbranch
90 # did a local lock get acquired?
90 # did a local lock get acquired?
91 self.locallocked = None
91 self.locallocked = None
92 # step already performed
92 # step already performed
93 # (used to check what steps have been already performed through bundle2)
93 # (used to check what steps have been already performed through bundle2)
94 self.stepsdone = set()
94 self.stepsdone = set()
95 # Integer version of the changegroup push result
95 # Integer version of the changegroup push result
96 # - None means nothing to push
96 # - None means nothing to push
97 # - 0 means HTTP error
97 # - 0 means HTTP error
98 # - 1 means we pushed and remote head count is unchanged *or*
98 # - 1 means we pushed and remote head count is unchanged *or*
99 # we have outgoing changesets but refused to push
99 # we have outgoing changesets but refused to push
100 # - other values as described by addchangegroup()
100 # - other values as described by addchangegroup()
101 self.cgresult = None
101 self.cgresult = None
102 # Boolean value for the bookmark push
102 # Boolean value for the bookmark push
103 self.bkresult = None
103 self.bkresult = None
104 # discover.outgoing object (contains common and outgoing data)
104 # discover.outgoing object (contains common and outgoing data)
105 self.outgoing = None
105 self.outgoing = None
106 # all remote heads before the push
106 # all remote heads before the push
107 self.remoteheads = None
107 self.remoteheads = None
108 # testable as a boolean indicating if any nodes are missing locally.
108 # testable as a boolean indicating if any nodes are missing locally.
109 self.incoming = None
109 self.incoming = None
110 # phases changes that must be pushed along side the changesets
110 # phases changes that must be pushed along side the changesets
111 self.outdatedphases = None
111 self.outdatedphases = None
112 # phases changes that must be pushed if changeset push fails
112 # phases changes that must be pushed if changeset push fails
113 self.fallbackoutdatedphases = None
113 self.fallbackoutdatedphases = None
114 # outgoing obsmarkers
114 # outgoing obsmarkers
115 self.outobsmarkers = set()
115 self.outobsmarkers = set()
116 # outgoing bookmarks
116 # outgoing bookmarks
117 self.outbookmarks = []
117 self.outbookmarks = []
118 # transaction manager
118 # transaction manager
119 self.trmanager = None
119 self.trmanager = None
120 # map { pushkey partid -> callback handling failure}
120 # map { pushkey partid -> callback handling failure}
121 # used to handle exception from mandatory pushkey part failure
121 # used to handle exception from mandatory pushkey part failure
122 self.pkfailcb = {}
122 self.pkfailcb = {}
123
123
124 @util.propertycache
124 @util.propertycache
125 def futureheads(self):
125 def futureheads(self):
126 """future remote heads if the changeset push succeeds"""
126 """future remote heads if the changeset push succeeds"""
127 return self.outgoing.missingheads
127 return self.outgoing.missingheads
128
128
129 @util.propertycache
129 @util.propertycache
130 def fallbackheads(self):
130 def fallbackheads(self):
131 """future remote heads if the changeset push fails"""
131 """future remote heads if the changeset push fails"""
132 if self.revs is None:
132 if self.revs is None:
133 # not target to push, all common are relevant
133 # not target to push, all common are relevant
134 return self.outgoing.commonheads
134 return self.outgoing.commonheads
135 unfi = self.repo.unfiltered()
135 unfi = self.repo.unfiltered()
136 # I want cheads = heads(::missingheads and ::commonheads)
136 # I want cheads = heads(::missingheads and ::commonheads)
137 # (missingheads is revs with secret changeset filtered out)
137 # (missingheads is revs with secret changeset filtered out)
138 #
138 #
139 # This can be expressed as:
139 # This can be expressed as:
140 # cheads = ( (missingheads and ::commonheads)
140 # cheads = ( (missingheads and ::commonheads)
141 # + (commonheads and ::missingheads))"
141 # + (commonheads and ::missingheads))"
142 # )
142 # )
143 #
143 #
144 # while trying to push we already computed the following:
144 # while trying to push we already computed the following:
145 # common = (::commonheads)
145 # common = (::commonheads)
146 # missing = ((commonheads::missingheads) - commonheads)
146 # missing = ((commonheads::missingheads) - commonheads)
147 #
147 #
148 # We can pick:
148 # We can pick:
149 # * missingheads part of common (::commonheads)
149 # * missingheads part of common (::commonheads)
150 common = self.outgoing.common
150 common = self.outgoing.common
151 nm = self.repo.changelog.nodemap
151 nm = self.repo.changelog.nodemap
152 cheads = [node for node in self.revs if nm[node] in common]
152 cheads = [node for node in self.revs if nm[node] in common]
153 # and
153 # and
154 # * commonheads parents on missing
154 # * commonheads parents on missing
155 revset = unfi.set('%ln and parents(roots(%ln))',
155 revset = unfi.set('%ln and parents(roots(%ln))',
156 self.outgoing.commonheads,
156 self.outgoing.commonheads,
157 self.outgoing.missing)
157 self.outgoing.missing)
158 cheads.extend(c.node() for c in revset)
158 cheads.extend(c.node() for c in revset)
159 return cheads
159 return cheads
160
160
161 @property
161 @property
162 def commonheads(self):
162 def commonheads(self):
163 """set of all common heads after changeset bundle push"""
163 """set of all common heads after changeset bundle push"""
164 if self.cgresult:
164 if self.cgresult:
165 return self.futureheads
165 return self.futureheads
166 else:
166 else:
167 return self.fallbackheads
167 return self.fallbackheads
168
168
169 # mapping of message used when pushing bookmark
169 # mapping of message used when pushing bookmark
170 bookmsgmap = {'update': (_("updating bookmark %s\n"),
170 bookmsgmap = {'update': (_("updating bookmark %s\n"),
171 _('updating bookmark %s failed!\n')),
171 _('updating bookmark %s failed!\n')),
172 'export': (_("exporting bookmark %s\n"),
172 'export': (_("exporting bookmark %s\n"),
173 _('exporting bookmark %s failed!\n')),
173 _('exporting bookmark %s failed!\n')),
174 'delete': (_("deleting remote bookmark %s\n"),
174 'delete': (_("deleting remote bookmark %s\n"),
175 _('deleting remote bookmark %s failed!\n')),
175 _('deleting remote bookmark %s failed!\n')),
176 }
176 }
177
177
178
178
179 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
179 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
180 '''Push outgoing changesets (limited by revs) from a local
180 '''Push outgoing changesets (limited by revs) from a local
181 repository to remote. Return an integer:
181 repository to remote. Return an integer:
182 - None means nothing to push
182 - None means nothing to push
183 - 0 means HTTP error
183 - 0 means HTTP error
184 - 1 means we pushed and remote head count is unchanged *or*
184 - 1 means we pushed and remote head count is unchanged *or*
185 we have outgoing changesets but refused to push
185 we have outgoing changesets but refused to push
186 - other values as described by addchangegroup()
186 - other values as described by addchangegroup()
187 '''
187 '''
188 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
188 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
189 if pushop.remote.local():
189 if pushop.remote.local():
190 missing = (set(pushop.repo.requirements)
190 missing = (set(pushop.repo.requirements)
191 - pushop.remote.local().supported)
191 - pushop.remote.local().supported)
192 if missing:
192 if missing:
193 msg = _("required features are not"
193 msg = _("required features are not"
194 " supported in the destination:"
194 " supported in the destination:"
195 " %s") % (', '.join(sorted(missing)))
195 " %s") % (', '.join(sorted(missing)))
196 raise util.Abort(msg)
196 raise util.Abort(msg)
197
197
198 # there are two ways to push to remote repo:
198 # there are two ways to push to remote repo:
199 #
199 #
200 # addchangegroup assumes local user can lock remote
200 # addchangegroup assumes local user can lock remote
201 # repo (local filesystem, old ssh servers).
201 # repo (local filesystem, old ssh servers).
202 #
202 #
203 # unbundle assumes local user cannot lock remote repo (new ssh
203 # unbundle assumes local user cannot lock remote repo (new ssh
204 # servers, http servers).
204 # servers, http servers).
205
205
206 if not pushop.remote.canpush():
206 if not pushop.remote.canpush():
207 raise util.Abort(_("destination does not support push"))
207 raise util.Abort(_("destination does not support push"))
208 # get local lock as we might write phase data
208 # get local lock as we might write phase data
209 localwlock = locallock = None
209 localwlock = locallock = None
210 try:
210 try:
211 # bundle2 push may receive a reply bundle touching bookmarks or other
211 # bundle2 push may receive a reply bundle touching bookmarks or other
212 # things requiring the wlock. Take it now to ensure proper ordering.
212 # things requiring the wlock. Take it now to ensure proper ordering.
213 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
213 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
214 if _canusebundle2(pushop) and maypushback:
214 if _canusebundle2(pushop) and maypushback:
215 localwlock = pushop.repo.wlock()
215 localwlock = pushop.repo.wlock()
216 locallock = pushop.repo.lock()
216 locallock = pushop.repo.lock()
217 pushop.locallocked = True
217 pushop.locallocked = True
218 except IOError as err:
218 except IOError as err:
219 pushop.locallocked = False
219 pushop.locallocked = False
220 if err.errno != errno.EACCES:
220 if err.errno != errno.EACCES:
221 raise
221 raise
222 # source repo cannot be locked.
222 # source repo cannot be locked.
223 # We do not abort the push, but just disable the local phase
223 # We do not abort the push, but just disable the local phase
224 # synchronisation.
224 # synchronisation.
225 msg = 'cannot lock source repository: %s\n' % err
225 msg = 'cannot lock source repository: %s\n' % err
226 pushop.ui.debug(msg)
226 pushop.ui.debug(msg)
227 try:
227 try:
228 if pushop.locallocked:
228 if pushop.locallocked:
229 pushop.trmanager = transactionmanager(repo,
229 pushop.trmanager = transactionmanager(repo,
230 'push-response',
230 'push-response',
231 pushop.remote.url())
231 pushop.remote.url())
232 pushop.repo.checkpush(pushop)
232 pushop.repo.checkpush(pushop)
233 lock = None
233 lock = None
234 unbundle = pushop.remote.capable('unbundle')
234 unbundle = pushop.remote.capable('unbundle')
235 if not unbundle:
235 if not unbundle:
236 lock = pushop.remote.lock()
236 lock = pushop.remote.lock()
237 try:
237 try:
238 _pushdiscovery(pushop)
238 _pushdiscovery(pushop)
239 if _canusebundle2(pushop):
239 if _canusebundle2(pushop):
240 _pushbundle2(pushop)
240 _pushbundle2(pushop)
241 _pushchangeset(pushop)
241 _pushchangeset(pushop)
242 _pushsyncphase(pushop)
242 _pushsyncphase(pushop)
243 _pushobsolete(pushop)
243 _pushobsolete(pushop)
244 _pushbookmark(pushop)
244 _pushbookmark(pushop)
245 finally:
245 finally:
246 if lock is not None:
246 if lock is not None:
247 lock.release()
247 lock.release()
248 if pushop.trmanager:
248 if pushop.trmanager:
249 pushop.trmanager.close()
249 pushop.trmanager.close()
250 finally:
250 finally:
251 if pushop.trmanager:
251 if pushop.trmanager:
252 pushop.trmanager.release()
252 pushop.trmanager.release()
253 if locallock is not None:
253 if locallock is not None:
254 locallock.release()
254 locallock.release()
255 if localwlock is not None:
255 if localwlock is not None:
256 localwlock.release()
256 localwlock.release()
257
257
258 return pushop
258 return pushop
259
259
260 # list of steps to perform discovery before push
260 # list of steps to perform discovery before push
261 pushdiscoveryorder = []
261 pushdiscoveryorder = []
262
262
263 # Mapping between step name and function
263 # Mapping between step name and function
264 #
264 #
265 # This exists to help extensions wrap steps if necessary
265 # This exists to help extensions wrap steps if necessary
266 pushdiscoverymapping = {}
266 pushdiscoverymapping = {}
267
267
268 def pushdiscovery(stepname):
268 def pushdiscovery(stepname):
269 """decorator for function performing discovery before push
269 """decorator for function performing discovery before push
270
270
271 The function is added to the step -> function mapping and appended to the
271 The function is added to the step -> function mapping and appended to the
272 list of steps. Beware that decorated function will be added in order (this
272 list of steps. Beware that decorated function will be added in order (this
273 may matter).
273 may matter).
274
274
275 You can only use this decorator for a new step, if you want to wrap a step
275 You can only use this decorator for a new step, if you want to wrap a step
276 from an extension, change the pushdiscovery dictionary directly."""
276 from an extension, change the pushdiscovery dictionary directly."""
277 def dec(func):
277 def dec(func):
278 assert stepname not in pushdiscoverymapping
278 assert stepname not in pushdiscoverymapping
279 pushdiscoverymapping[stepname] = func
279 pushdiscoverymapping[stepname] = func
280 pushdiscoveryorder.append(stepname)
280 pushdiscoveryorder.append(stepname)
281 return func
281 return func
282 return dec
282 return dec
283
283
284 def _pushdiscovery(pushop):
284 def _pushdiscovery(pushop):
285 """Run all discovery steps"""
285 """Run all discovery steps"""
286 for stepname in pushdiscoveryorder:
286 for stepname in pushdiscoveryorder:
287 step = pushdiscoverymapping[stepname]
287 step = pushdiscoverymapping[stepname]
288 step(pushop)
288 step(pushop)
289
289
290 @pushdiscovery('changeset')
290 @pushdiscovery('changeset')
291 def _pushdiscoverychangeset(pushop):
291 def _pushdiscoverychangeset(pushop):
292 """discover the changeset that need to be pushed"""
292 """discover the changeset that need to be pushed"""
293 fci = discovery.findcommonincoming
293 fci = discovery.findcommonincoming
294 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
294 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
295 common, inc, remoteheads = commoninc
295 common, inc, remoteheads = commoninc
296 fco = discovery.findcommonoutgoing
296 fco = discovery.findcommonoutgoing
297 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
297 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
298 commoninc=commoninc, force=pushop.force)
298 commoninc=commoninc, force=pushop.force)
299 pushop.outgoing = outgoing
299 pushop.outgoing = outgoing
300 pushop.remoteheads = remoteheads
300 pushop.remoteheads = remoteheads
301 pushop.incoming = inc
301 pushop.incoming = inc
302
302
303 @pushdiscovery('phase')
303 @pushdiscovery('phase')
304 def _pushdiscoveryphase(pushop):
304 def _pushdiscoveryphase(pushop):
305 """discover the phase that needs to be pushed
305 """discover the phase that needs to be pushed
306
306
307 (computed for both success and failure case for changesets push)"""
307 (computed for both success and failure case for changesets push)"""
308 outgoing = pushop.outgoing
308 outgoing = pushop.outgoing
309 unfi = pushop.repo.unfiltered()
309 unfi = pushop.repo.unfiltered()
310 remotephases = pushop.remote.listkeys('phases')
310 remotephases = pushop.remote.listkeys('phases')
311 publishing = remotephases.get('publishing', False)
311 publishing = remotephases.get('publishing', False)
312 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
312 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
313 and remotephases # server supports phases
313 and remotephases # server supports phases
314 and not pushop.outgoing.missing # no changesets to be pushed
314 and not pushop.outgoing.missing # no changesets to be pushed
315 and publishing):
315 and publishing):
316 # When:
316 # When:
317 # - this is a subrepo push
317 # - this is a subrepo push
318 # - and remote support phase
318 # - and remote support phase
319 # - and no changeset are to be pushed
319 # - and no changeset are to be pushed
320 # - and remote is publishing
320 # - and remote is publishing
321 # We may be in issue 3871 case!
321 # We may be in issue 3871 case!
322 # We drop the possible phase synchronisation done by
322 # We drop the possible phase synchronisation done by
323 # courtesy to publish changesets possibly locally draft
323 # courtesy to publish changesets possibly locally draft
324 # on the remote.
324 # on the remote.
325 remotephases = {'publishing': 'True'}
325 remotephases = {'publishing': 'True'}
326 ana = phases.analyzeremotephases(pushop.repo,
326 ana = phases.analyzeremotephases(pushop.repo,
327 pushop.fallbackheads,
327 pushop.fallbackheads,
328 remotephases)
328 remotephases)
329 pheads, droots = ana
329 pheads, droots = ana
330 extracond = ''
330 extracond = ''
331 if not publishing:
331 if not publishing:
332 extracond = ' and public()'
332 extracond = ' and public()'
333 revset = 'heads((%%ln::%%ln) %s)' % extracond
333 revset = 'heads((%%ln::%%ln) %s)' % extracond
334 # Get the list of all revs draft on remote by public here.
334 # Get the list of all revs draft on remote by public here.
335 # XXX Beware that revset break if droots is not strictly
335 # XXX Beware that revset break if droots is not strictly
336 # XXX root we may want to ensure it is but it is costly
336 # XXX root we may want to ensure it is but it is costly
337 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
337 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
338 if not outgoing.missing:
338 if not outgoing.missing:
339 future = fallback
339 future = fallback
340 else:
340 else:
341 # adds changeset we are going to push as draft
341 # adds changeset we are going to push as draft
342 #
342 #
343 # should not be necessary for publishing server, but because of an
343 # should not be necessary for publishing server, but because of an
344 # issue fixed in xxxxx we have to do it anyway.
344 # issue fixed in xxxxx we have to do it anyway.
345 fdroots = list(unfi.set('roots(%ln + %ln::)',
345 fdroots = list(unfi.set('roots(%ln + %ln::)',
346 outgoing.missing, droots))
346 outgoing.missing, droots))
347 fdroots = [f.node() for f in fdroots]
347 fdroots = [f.node() for f in fdroots]
348 future = list(unfi.set(revset, fdroots, pushop.futureheads))
348 future = list(unfi.set(revset, fdroots, pushop.futureheads))
349 pushop.outdatedphases = future
349 pushop.outdatedphases = future
350 pushop.fallbackoutdatedphases = fallback
350 pushop.fallbackoutdatedphases = fallback
351
351
352 @pushdiscovery('obsmarker')
352 @pushdiscovery('obsmarker')
353 def _pushdiscoveryobsmarkers(pushop):
353 def _pushdiscoveryobsmarkers(pushop):
354 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
354 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
355 and pushop.repo.obsstore
355 and pushop.repo.obsstore
356 and 'obsolete' in pushop.remote.listkeys('namespaces')):
356 and 'obsolete' in pushop.remote.listkeys('namespaces')):
357 repo = pushop.repo
357 repo = pushop.repo
358 # very naive computation, that can be quite expensive on big repo.
358 # very naive computation, that can be quite expensive on big repo.
359 # However: evolution is currently slow on them anyway.
359 # However: evolution is currently slow on them anyway.
360 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
360 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
361 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
361 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
362
362
363 @pushdiscovery('bookmarks')
363 @pushdiscovery('bookmarks')
364 def _pushdiscoverybookmarks(pushop):
364 def _pushdiscoverybookmarks(pushop):
365 ui = pushop.ui
365 ui = pushop.ui
366 repo = pushop.repo.unfiltered()
366 repo = pushop.repo.unfiltered()
367 remote = pushop.remote
367 remote = pushop.remote
368 ui.debug("checking for updated bookmarks\n")
368 ui.debug("checking for updated bookmarks\n")
369 ancestors = ()
369 ancestors = ()
370 if pushop.revs:
370 if pushop.revs:
371 revnums = map(repo.changelog.rev, pushop.revs)
371 revnums = map(repo.changelog.rev, pushop.revs)
372 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
372 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
373 remotebookmark = remote.listkeys('bookmarks')
373 remotebookmark = remote.listkeys('bookmarks')
374
374
375 explicit = set(pushop.bookmarks)
375 explicit = set(pushop.bookmarks)
376
376
377 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
377 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
378 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
378 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
379 for b, scid, dcid in advsrc:
379 for b, scid, dcid in advsrc:
380 if b in explicit:
380 if b in explicit:
381 explicit.remove(b)
381 explicit.remove(b)
382 if not ancestors or repo[scid].rev() in ancestors:
382 if not ancestors or repo[scid].rev() in ancestors:
383 pushop.outbookmarks.append((b, dcid, scid))
383 pushop.outbookmarks.append((b, dcid, scid))
384 # search added bookmark
384 # search added bookmark
385 for b, scid, dcid in addsrc:
385 for b, scid, dcid in addsrc:
386 if b in explicit:
386 if b in explicit:
387 explicit.remove(b)
387 explicit.remove(b)
388 pushop.outbookmarks.append((b, '', scid))
388 pushop.outbookmarks.append((b, '', scid))
389 # search for overwritten bookmark
389 # search for overwritten bookmark
390 for b, scid, dcid in advdst + diverge + differ:
390 for b, scid, dcid in advdst + diverge + differ:
391 if b in explicit:
391 if b in explicit:
392 explicit.remove(b)
392 explicit.remove(b)
393 pushop.outbookmarks.append((b, dcid, scid))
393 pushop.outbookmarks.append((b, dcid, scid))
394 # search for bookmark to delete
394 # search for bookmark to delete
395 for b, scid, dcid in adddst:
395 for b, scid, dcid in adddst:
396 if b in explicit:
396 if b in explicit:
397 explicit.remove(b)
397 explicit.remove(b)
398 # treat as "deleted locally"
398 # treat as "deleted locally"
399 pushop.outbookmarks.append((b, dcid, ''))
399 pushop.outbookmarks.append((b, dcid, ''))
400 # identical bookmarks shouldn't get reported
400 # identical bookmarks shouldn't get reported
401 for b, scid, dcid in same:
401 for b, scid, dcid in same:
402 if b in explicit:
402 if b in explicit:
403 explicit.remove(b)
403 explicit.remove(b)
404
404
405 if explicit:
405 if explicit:
406 explicit = sorted(explicit)
406 explicit = sorted(explicit)
407 # we should probably list all of them
407 # we should probably list all of them
408 ui.warn(_('bookmark %s does not exist on the local '
408 ui.warn(_('bookmark %s does not exist on the local '
409 'or remote repository!\n') % explicit[0])
409 'or remote repository!\n') % explicit[0])
410 pushop.bkresult = 2
410 pushop.bkresult = 2
411
411
412 pushop.outbookmarks.sort()
412 pushop.outbookmarks.sort()
413
413
414 def _pushcheckoutgoing(pushop):
414 def _pushcheckoutgoing(pushop):
415 outgoing = pushop.outgoing
415 outgoing = pushop.outgoing
416 unfi = pushop.repo.unfiltered()
416 unfi = pushop.repo.unfiltered()
417 if not outgoing.missing:
417 if not outgoing.missing:
418 # nothing to push
418 # nothing to push
419 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
419 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
420 return False
420 return False
421 # something to push
421 # something to push
422 if not pushop.force:
422 if not pushop.force:
423 # if repo.obsstore == False --> no obsolete
423 # if repo.obsstore == False --> no obsolete
424 # then, save the iteration
424 # then, save the iteration
425 if unfi.obsstore:
425 if unfi.obsstore:
426 # this message are here for 80 char limit reason
426 # this message are here for 80 char limit reason
427 mso = _("push includes obsolete changeset: %s!")
427 mso = _("push includes obsolete changeset: %s!")
428 mst = {"unstable": _("push includes unstable changeset: %s!"),
428 mst = {"unstable": _("push includes unstable changeset: %s!"),
429 "bumped": _("push includes bumped changeset: %s!"),
429 "bumped": _("push includes bumped changeset: %s!"),
430 "divergent": _("push includes divergent changeset: %s!")}
430 "divergent": _("push includes divergent changeset: %s!")}
431 # If we are to push if there is at least one
431 # If we are to push if there is at least one
432 # obsolete or unstable changeset in missing, at
432 # obsolete or unstable changeset in missing, at
433 # least one of the missinghead will be obsolete or
433 # least one of the missinghead will be obsolete or
434 # unstable. So checking heads only is ok
434 # unstable. So checking heads only is ok
435 for node in outgoing.missingheads:
435 for node in outgoing.missingheads:
436 ctx = unfi[node]
436 ctx = unfi[node]
437 if ctx.obsolete():
437 if ctx.obsolete():
438 raise util.Abort(mso % ctx)
438 raise util.Abort(mso % ctx)
439 elif ctx.troubled():
439 elif ctx.troubled():
440 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
440 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
441
441
442 # internal config: bookmarks.pushing
442 # internal config: bookmarks.pushing
443 newbm = pushop.ui.configlist('bookmarks', 'pushing')
443 newbm = pushop.ui.configlist('bookmarks', 'pushing')
444 discovery.checkheads(unfi, pushop.remote, outgoing,
444 discovery.checkheads(unfi, pushop.remote, outgoing,
445 pushop.remoteheads,
445 pushop.remoteheads,
446 pushop.newbranch,
446 pushop.newbranch,
447 bool(pushop.incoming),
447 bool(pushop.incoming),
448 newbm)
448 newbm)
449 return True
449 return True
450
450
451 # List of names of steps to perform for an outgoing bundle2, order matters.
451 # List of names of steps to perform for an outgoing bundle2, order matters.
452 b2partsgenorder = []
452 b2partsgenorder = []
453
453
454 # Mapping between step name and function
454 # Mapping between step name and function
455 #
455 #
456 # This exists to help extensions wrap steps if necessary
456 # This exists to help extensions wrap steps if necessary
457 b2partsgenmapping = {}
457 b2partsgenmapping = {}
458
458
459 def b2partsgenerator(stepname, idx=None):
459 def b2partsgenerator(stepname, idx=None):
460 """decorator for function generating bundle2 part
460 """decorator for function generating bundle2 part
461
461
462 The function is added to the step -> function mapping and appended to the
462 The function is added to the step -> function mapping and appended to the
463 list of steps. Beware that decorated functions will be added in order
463 list of steps. Beware that decorated functions will be added in order
464 (this may matter).
464 (this may matter).
465
465
466 You can only use this decorator for new steps, if you want to wrap a step
466 You can only use this decorator for new steps, if you want to wrap a step
467 from an extension, attack the b2partsgenmapping dictionary directly."""
467 from an extension, attack the b2partsgenmapping dictionary directly."""
468 def dec(func):
468 def dec(func):
469 assert stepname not in b2partsgenmapping
469 assert stepname not in b2partsgenmapping
470 b2partsgenmapping[stepname] = func
470 b2partsgenmapping[stepname] = func
471 if idx is None:
471 if idx is None:
472 b2partsgenorder.append(stepname)
472 b2partsgenorder.append(stepname)
473 else:
473 else:
474 b2partsgenorder.insert(idx, stepname)
474 b2partsgenorder.insert(idx, stepname)
475 return func
475 return func
476 return dec
476 return dec
477
477
478 def _pushb2ctxcheckheads(pushop, bundler):
478 def _pushb2ctxcheckheads(pushop, bundler):
479 """Generate race condition checking parts
479 """Generate race condition checking parts
480
480
481 Exists as an indepedent function to aid extensions
481 Exists as an indepedent function to aid extensions
482 """
482 """
483 if not pushop.force:
483 if not pushop.force:
484 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
484 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
485
485
486 @b2partsgenerator('changeset')
486 @b2partsgenerator('changeset')
487 def _pushb2ctx(pushop, bundler):
487 def _pushb2ctx(pushop, bundler):
488 """handle changegroup push through bundle2
488 """handle changegroup push through bundle2
489
489
490 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
490 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
491 """
491 """
492 if 'changesets' in pushop.stepsdone:
492 if 'changesets' in pushop.stepsdone:
493 return
493 return
494 pushop.stepsdone.add('changesets')
494 pushop.stepsdone.add('changesets')
495 # Send known heads to the server for race detection.
495 # Send known heads to the server for race detection.
496 if not _pushcheckoutgoing(pushop):
496 if not _pushcheckoutgoing(pushop):
497 return
497 return
498 pushop.repo.prepushoutgoinghooks(pushop.repo,
498 pushop.repo.prepushoutgoinghooks(pushop.repo,
499 pushop.remote,
499 pushop.remote,
500 pushop.outgoing)
500 pushop.outgoing)
501
501
502 _pushb2ctxcheckheads(pushop, bundler)
502 _pushb2ctxcheckheads(pushop, bundler)
503
503
504 b2caps = bundle2.bundle2caps(pushop.remote)
504 b2caps = bundle2.bundle2caps(pushop.remote)
505 version = None
505 version = None
506 cgversions = b2caps.get('changegroup')
506 cgversions = b2caps.get('changegroup')
507 if not cgversions: # 3.1 and 3.2 ship with an empty value
507 if not cgversions: # 3.1 and 3.2 ship with an empty value
508 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
508 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
509 pushop.outgoing)
509 pushop.outgoing)
510 else:
510 else:
511 cgversions = [v for v in cgversions if v in changegroup.packermap]
511 cgversions = [v for v in cgversions if v in changegroup.packermap]
512 if not cgversions:
512 if not cgversions:
513 raise ValueError(_('no common changegroup version'))
513 raise ValueError(_('no common changegroup version'))
514 version = max(cgversions)
514 version = max(cgversions)
515 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
515 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
516 pushop.outgoing,
516 pushop.outgoing,
517 version=version)
517 version=version)
518 cgpart = bundler.newpart('changegroup', data=cg)
518 cgpart = bundler.newpart('changegroup', data=cg)
519 if version is not None:
519 if version is not None:
520 cgpart.addparam('version', version)
520 cgpart.addparam('version', version)
521 def handlereply(op):
521 def handlereply(op):
522 """extract addchangegroup returns from server reply"""
522 """extract addchangegroup returns from server reply"""
523 cgreplies = op.records.getreplies(cgpart.id)
523 cgreplies = op.records.getreplies(cgpart.id)
524 assert len(cgreplies['changegroup']) == 1
524 assert len(cgreplies['changegroup']) == 1
525 pushop.cgresult = cgreplies['changegroup'][0]['return']
525 pushop.cgresult = cgreplies['changegroup'][0]['return']
526 return handlereply
526 return handlereply
527
527
528 @b2partsgenerator('phase')
528 @b2partsgenerator('phase')
529 def _pushb2phases(pushop, bundler):
529 def _pushb2phases(pushop, bundler):
530 """handle phase push through bundle2"""
530 """handle phase push through bundle2"""
531 if 'phases' in pushop.stepsdone:
531 if 'phases' in pushop.stepsdone:
532 return
532 return
533 b2caps = bundle2.bundle2caps(pushop.remote)
533 b2caps = bundle2.bundle2caps(pushop.remote)
534 if not 'pushkey' in b2caps:
534 if not 'pushkey' in b2caps:
535 return
535 return
536 pushop.stepsdone.add('phases')
536 pushop.stepsdone.add('phases')
537 part2node = []
537 part2node = []
538
538
539 def handlefailure(pushop, exc):
539 def handlefailure(pushop, exc):
540 targetid = int(exc.partid)
540 targetid = int(exc.partid)
541 for partid, node in part2node:
541 for partid, node in part2node:
542 if partid == targetid:
542 if partid == targetid:
543 raise error.Abort(_('updating %s to public failed') % node)
543 raise error.Abort(_('updating %s to public failed') % node)
544
544
545 enc = pushkey.encode
545 enc = pushkey.encode
546 for newremotehead in pushop.outdatedphases:
546 for newremotehead in pushop.outdatedphases:
547 part = bundler.newpart('pushkey')
547 part = bundler.newpart('pushkey')
548 part.addparam('namespace', enc('phases'))
548 part.addparam('namespace', enc('phases'))
549 part.addparam('key', enc(newremotehead.hex()))
549 part.addparam('key', enc(newremotehead.hex()))
550 part.addparam('old', enc(str(phases.draft)))
550 part.addparam('old', enc(str(phases.draft)))
551 part.addparam('new', enc(str(phases.public)))
551 part.addparam('new', enc(str(phases.public)))
552 part2node.append((part.id, newremotehead))
552 part2node.append((part.id, newremotehead))
553 pushop.pkfailcb[part.id] = handlefailure
553 pushop.pkfailcb[part.id] = handlefailure
554
554
555 def handlereply(op):
555 def handlereply(op):
556 for partid, node in part2node:
556 for partid, node in part2node:
557 partrep = op.records.getreplies(partid)
557 partrep = op.records.getreplies(partid)
558 results = partrep['pushkey']
558 results = partrep['pushkey']
559 assert len(results) <= 1
559 assert len(results) <= 1
560 msg = None
560 msg = None
561 if not results:
561 if not results:
562 msg = _('server ignored update of %s to public!\n') % node
562 msg = _('server ignored update of %s to public!\n') % node
563 elif not int(results[0]['return']):
563 elif not int(results[0]['return']):
564 msg = _('updating %s to public failed!\n') % node
564 msg = _('updating %s to public failed!\n') % node
565 if msg is not None:
565 if msg is not None:
566 pushop.ui.warn(msg)
566 pushop.ui.warn(msg)
567 return handlereply
567 return handlereply
568
568
569 @b2partsgenerator('obsmarkers')
569 @b2partsgenerator('obsmarkers')
570 def _pushb2obsmarkers(pushop, bundler):
570 def _pushb2obsmarkers(pushop, bundler):
571 if 'obsmarkers' in pushop.stepsdone:
571 if 'obsmarkers' in pushop.stepsdone:
572 return
572 return
573 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
573 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
574 if obsolete.commonversion(remoteversions) is None:
574 if obsolete.commonversion(remoteversions) is None:
575 return
575 return
576 pushop.stepsdone.add('obsmarkers')
576 pushop.stepsdone.add('obsmarkers')
577 if pushop.outobsmarkers:
577 if pushop.outobsmarkers:
578 markers = sorted(pushop.outobsmarkers)
578 markers = sorted(pushop.outobsmarkers)
579 buildobsmarkerspart(bundler, markers)
579 buildobsmarkerspart(bundler, markers)
580
580
581 @b2partsgenerator('bookmarks')
581 @b2partsgenerator('bookmarks')
582 def _pushb2bookmarks(pushop, bundler):
582 def _pushb2bookmarks(pushop, bundler):
583 """handle bookmark push through bundle2"""
583 """handle bookmark push through bundle2"""
584 if 'bookmarks' in pushop.stepsdone:
584 if 'bookmarks' in pushop.stepsdone:
585 return
585 return
586 b2caps = bundle2.bundle2caps(pushop.remote)
586 b2caps = bundle2.bundle2caps(pushop.remote)
587 if 'pushkey' not in b2caps:
587 if 'pushkey' not in b2caps:
588 return
588 return
589 pushop.stepsdone.add('bookmarks')
589 pushop.stepsdone.add('bookmarks')
590 part2book = []
590 part2book = []
591 enc = pushkey.encode
591 enc = pushkey.encode
592
592
593 def handlefailure(pushop, exc):
593 def handlefailure(pushop, exc):
594 targetid = int(exc.partid)
594 targetid = int(exc.partid)
595 for partid, book, action in part2book:
595 for partid, book, action in part2book:
596 if partid == targetid:
596 if partid == targetid:
597 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
597 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
598 # we should not be called for part we did not generated
598 # we should not be called for part we did not generated
599 assert False
599 assert False
600
600
601 for book, old, new in pushop.outbookmarks:
601 for book, old, new in pushop.outbookmarks:
602 part = bundler.newpart('pushkey')
602 part = bundler.newpart('pushkey')
603 part.addparam('namespace', enc('bookmarks'))
603 part.addparam('namespace', enc('bookmarks'))
604 part.addparam('key', enc(book))
604 part.addparam('key', enc(book))
605 part.addparam('old', enc(old))
605 part.addparam('old', enc(old))
606 part.addparam('new', enc(new))
606 part.addparam('new', enc(new))
607 action = 'update'
607 action = 'update'
608 if not old:
608 if not old:
609 action = 'export'
609 action = 'export'
610 elif not new:
610 elif not new:
611 action = 'delete'
611 action = 'delete'
612 part2book.append((part.id, book, action))
612 part2book.append((part.id, book, action))
613 pushop.pkfailcb[part.id] = handlefailure
613 pushop.pkfailcb[part.id] = handlefailure
614
614
615 def handlereply(op):
615 def handlereply(op):
616 ui = pushop.ui
616 ui = pushop.ui
617 for partid, book, action in part2book:
617 for partid, book, action in part2book:
618 partrep = op.records.getreplies(partid)
618 partrep = op.records.getreplies(partid)
619 results = partrep['pushkey']
619 results = partrep['pushkey']
620 assert len(results) <= 1
620 assert len(results) <= 1
621 if not results:
621 if not results:
622 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
622 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
623 else:
623 else:
624 ret = int(results[0]['return'])
624 ret = int(results[0]['return'])
625 if ret:
625 if ret:
626 ui.status(bookmsgmap[action][0] % book)
626 ui.status(bookmsgmap[action][0] % book)
627 else:
627 else:
628 ui.warn(bookmsgmap[action][1] % book)
628 ui.warn(bookmsgmap[action][1] % book)
629 if pushop.bkresult is not None:
629 if pushop.bkresult is not None:
630 pushop.bkresult = 1
630 pushop.bkresult = 1
631 return handlereply
631 return handlereply
632
632
633
633
634 def _pushbundle2(pushop):
634 def _pushbundle2(pushop):
635 """push data to the remote using bundle2
635 """push data to the remote using bundle2
636
636
637 The only currently supported type of data is changegroup but this will
637 The only currently supported type of data is changegroup but this will
638 evolve in the future."""
638 evolve in the future."""
639 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
639 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
640 pushback = (pushop.trmanager
640 pushback = (pushop.trmanager
641 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
641 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
642
642
643 # create reply capability
643 # create reply capability
644 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
644 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
645 allowpushback=pushback))
645 allowpushback=pushback))
646 bundler.newpart('replycaps', data=capsblob)
646 bundler.newpart('replycaps', data=capsblob)
647 replyhandlers = []
647 replyhandlers = []
648 for partgenname in b2partsgenorder:
648 for partgenname in b2partsgenorder:
649 partgen = b2partsgenmapping[partgenname]
649 partgen = b2partsgenmapping[partgenname]
650 ret = partgen(pushop, bundler)
650 ret = partgen(pushop, bundler)
651 if callable(ret):
651 if callable(ret):
652 replyhandlers.append(ret)
652 replyhandlers.append(ret)
653 # do not push if nothing to push
653 # do not push if nothing to push
654 if bundler.nbparts <= 1:
654 if bundler.nbparts <= 1:
655 return
655 return
656 stream = util.chunkbuffer(bundler.getchunks())
656 stream = util.chunkbuffer(bundler.getchunks())
657 try:
657 try:
658 try:
658 try:
659 reply = pushop.remote.unbundle(stream, ['force'], 'push')
659 reply = pushop.remote.unbundle(stream, ['force'], 'push')
660 except error.BundleValueError as exc:
660 except error.BundleValueError as exc:
661 raise util.Abort('missing support for %s' % exc)
661 raise util.Abort('missing support for %s' % exc)
662 try:
662 try:
663 trgetter = None
663 trgetter = None
664 if pushback:
664 if pushback:
665 trgetter = pushop.trmanager.transaction
665 trgetter = pushop.trmanager.transaction
666 op = bundle2.processbundle(pushop.repo, reply, trgetter)
666 op = bundle2.processbundle(pushop.repo, reply, trgetter)
667 except error.BundleValueError as exc:
667 except error.BundleValueError as exc:
668 raise util.Abort('missing support for %s' % exc)
668 raise util.Abort('missing support for %s' % exc)
669 except error.PushkeyFailed as exc:
669 except error.PushkeyFailed as exc:
670 partid = int(exc.partid)
670 partid = int(exc.partid)
671 if partid not in pushop.pkfailcb:
671 if partid not in pushop.pkfailcb:
672 raise
672 raise
673 pushop.pkfailcb[partid](pushop, exc)
673 pushop.pkfailcb[partid](pushop, exc)
674 for rephand in replyhandlers:
674 for rephand in replyhandlers:
675 rephand(op)
675 rephand(op)
676
676
677 def _pushchangeset(pushop):
677 def _pushchangeset(pushop):
678 """Make the actual push of changeset bundle to remote repo"""
678 """Make the actual push of changeset bundle to remote repo"""
679 if 'changesets' in pushop.stepsdone:
679 if 'changesets' in pushop.stepsdone:
680 return
680 return
681 pushop.stepsdone.add('changesets')
681 pushop.stepsdone.add('changesets')
682 if not _pushcheckoutgoing(pushop):
682 if not _pushcheckoutgoing(pushop):
683 return
683 return
684 pushop.repo.prepushoutgoinghooks(pushop.repo,
684 pushop.repo.prepushoutgoinghooks(pushop.repo,
685 pushop.remote,
685 pushop.remote,
686 pushop.outgoing)
686 pushop.outgoing)
687 outgoing = pushop.outgoing
687 outgoing = pushop.outgoing
688 unbundle = pushop.remote.capable('unbundle')
688 unbundle = pushop.remote.capable('unbundle')
689 # TODO: get bundlecaps from remote
689 # TODO: get bundlecaps from remote
690 bundlecaps = None
690 bundlecaps = None
691 # create a changegroup from local
691 # create a changegroup from local
692 if pushop.revs is None and not (outgoing.excluded
692 if pushop.revs is None and not (outgoing.excluded
693 or pushop.repo.changelog.filteredrevs):
693 or pushop.repo.changelog.filteredrevs):
694 # push everything,
694 # push everything,
695 # use the fast path, no race possible on push
695 # use the fast path, no race possible on push
696 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
696 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
697 cg = changegroup.getsubset(pushop.repo,
697 cg = changegroup.getsubset(pushop.repo,
698 outgoing,
698 outgoing,
699 bundler,
699 bundler,
700 'push',
700 'push',
701 fastpath=True)
701 fastpath=True)
702 else:
702 else:
703 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
703 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
704 bundlecaps)
704 bundlecaps)
705
705
706 # apply changegroup to remote
706 # apply changegroup to remote
707 if unbundle:
707 if unbundle:
708 # local repo finds heads on server, finds out what
708 # local repo finds heads on server, finds out what
709 # revs it must push. once revs transferred, if server
709 # revs it must push. once revs transferred, if server
710 # finds it has different heads (someone else won
710 # finds it has different heads (someone else won
711 # commit/push race), server aborts.
711 # commit/push race), server aborts.
712 if pushop.force:
712 if pushop.force:
713 remoteheads = ['force']
713 remoteheads = ['force']
714 else:
714 else:
715 remoteheads = pushop.remoteheads
715 remoteheads = pushop.remoteheads
716 # ssh: return remote's addchangegroup()
716 # ssh: return remote's addchangegroup()
717 # http: return remote's addchangegroup() or 0 for error
717 # http: return remote's addchangegroup() or 0 for error
718 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
718 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
719 pushop.repo.url())
719 pushop.repo.url())
720 else:
720 else:
721 # we return an integer indicating remote head count
721 # we return an integer indicating remote head count
722 # change
722 # change
723 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
723 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
724 pushop.repo.url())
724 pushop.repo.url())
725
725
726 def _pushsyncphase(pushop):
726 def _pushsyncphase(pushop):
727 """synchronise phase information locally and remotely"""
727 """synchronise phase information locally and remotely"""
728 cheads = pushop.commonheads
728 cheads = pushop.commonheads
729 # even when we don't push, exchanging phase data is useful
729 # even when we don't push, exchanging phase data is useful
730 remotephases = pushop.remote.listkeys('phases')
730 remotephases = pushop.remote.listkeys('phases')
731 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
731 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
732 and remotephases # server supports phases
732 and remotephases # server supports phases
733 and pushop.cgresult is None # nothing was pushed
733 and pushop.cgresult is None # nothing was pushed
734 and remotephases.get('publishing', False)):
734 and remotephases.get('publishing', False)):
735 # When:
735 # When:
736 # - this is a subrepo push
736 # - this is a subrepo push
737 # - and remote support phase
737 # - and remote support phase
738 # - and no changeset was pushed
738 # - and no changeset was pushed
739 # - and remote is publishing
739 # - and remote is publishing
740 # We may be in issue 3871 case!
740 # We may be in issue 3871 case!
741 # We drop the possible phase synchronisation done by
741 # We drop the possible phase synchronisation done by
742 # courtesy to publish changesets possibly locally draft
742 # courtesy to publish changesets possibly locally draft
743 # on the remote.
743 # on the remote.
744 remotephases = {'publishing': 'True'}
744 remotephases = {'publishing': 'True'}
745 if not remotephases: # old server or public only reply from non-publishing
745 if not remotephases: # old server or public only reply from non-publishing
746 _localphasemove(pushop, cheads)
746 _localphasemove(pushop, cheads)
747 # don't push any phase data as there is nothing to push
747 # don't push any phase data as there is nothing to push
748 else:
748 else:
749 ana = phases.analyzeremotephases(pushop.repo, cheads,
749 ana = phases.analyzeremotephases(pushop.repo, cheads,
750 remotephases)
750 remotephases)
751 pheads, droots = ana
751 pheads, droots = ana
752 ### Apply remote phase on local
752 ### Apply remote phase on local
753 if remotephases.get('publishing', False):
753 if remotephases.get('publishing', False):
754 _localphasemove(pushop, cheads)
754 _localphasemove(pushop, cheads)
755 else: # publish = False
755 else: # publish = False
756 _localphasemove(pushop, pheads)
756 _localphasemove(pushop, pheads)
757 _localphasemove(pushop, cheads, phases.draft)
757 _localphasemove(pushop, cheads, phases.draft)
758 ### Apply local phase on remote
758 ### Apply local phase on remote
759
759
760 if pushop.cgresult:
760 if pushop.cgresult:
761 if 'phases' in pushop.stepsdone:
761 if 'phases' in pushop.stepsdone:
762 # phases already pushed though bundle2
762 # phases already pushed though bundle2
763 return
763 return
764 outdated = pushop.outdatedphases
764 outdated = pushop.outdatedphases
765 else:
765 else:
766 outdated = pushop.fallbackoutdatedphases
766 outdated = pushop.fallbackoutdatedphases
767
767
768 pushop.stepsdone.add('phases')
768 pushop.stepsdone.add('phases')
769
769
770 # filter heads already turned public by the push
770 # filter heads already turned public by the push
771 outdated = [c for c in outdated if c.node() not in pheads]
771 outdated = [c for c in outdated if c.node() not in pheads]
772 # fallback to independent pushkey command
772 # fallback to independent pushkey command
773 for newremotehead in outdated:
773 for newremotehead in outdated:
774 r = pushop.remote.pushkey('phases',
774 r = pushop.remote.pushkey('phases',
775 newremotehead.hex(),
775 newremotehead.hex(),
776 str(phases.draft),
776 str(phases.draft),
777 str(phases.public))
777 str(phases.public))
778 if not r:
778 if not r:
779 pushop.ui.warn(_('updating %s to public failed!\n')
779 pushop.ui.warn(_('updating %s to public failed!\n')
780 % newremotehead)
780 % newremotehead)
781
781
782 def _localphasemove(pushop, nodes, phase=phases.public):
782 def _localphasemove(pushop, nodes, phase=phases.public):
783 """move <nodes> to <phase> in the local source repo"""
783 """move <nodes> to <phase> in the local source repo"""
784 if pushop.trmanager:
784 if pushop.trmanager:
785 phases.advanceboundary(pushop.repo,
785 phases.advanceboundary(pushop.repo,
786 pushop.trmanager.transaction(),
786 pushop.trmanager.transaction(),
787 phase,
787 phase,
788 nodes)
788 nodes)
789 else:
789 else:
790 # repo is not locked, do not change any phases!
790 # repo is not locked, do not change any phases!
791 # Informs the user that phases should have been moved when
791 # Informs the user that phases should have been moved when
792 # applicable.
792 # applicable.
793 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
793 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
794 phasestr = phases.phasenames[phase]
794 phasestr = phases.phasenames[phase]
795 if actualmoves:
795 if actualmoves:
796 pushop.ui.status(_('cannot lock source repo, skipping '
796 pushop.ui.status(_('cannot lock source repo, skipping '
797 'local %s phase update\n') % phasestr)
797 'local %s phase update\n') % phasestr)
798
798
799 def _pushobsolete(pushop):
799 def _pushobsolete(pushop):
800 """utility function to push obsolete markers to a remote"""
800 """utility function to push obsolete markers to a remote"""
801 if 'obsmarkers' in pushop.stepsdone:
801 if 'obsmarkers' in pushop.stepsdone:
802 return
802 return
803 repo = pushop.repo
803 repo = pushop.repo
804 remote = pushop.remote
804 remote = pushop.remote
805 pushop.stepsdone.add('obsmarkers')
805 pushop.stepsdone.add('obsmarkers')
806 if pushop.outobsmarkers:
806 if pushop.outobsmarkers:
807 pushop.ui.debug('try to push obsolete markers to remote\n')
807 pushop.ui.debug('try to push obsolete markers to remote\n')
808 rslts = []
808 rslts = []
809 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
809 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
810 for key in sorted(remotedata, reverse=True):
810 for key in sorted(remotedata, reverse=True):
811 # reverse sort to ensure we end with dump0
811 # reverse sort to ensure we end with dump0
812 data = remotedata[key]
812 data = remotedata[key]
813 rslts.append(remote.pushkey('obsolete', key, '', data))
813 rslts.append(remote.pushkey('obsolete', key, '', data))
814 if [r for r in rslts if not r]:
814 if [r for r in rslts if not r]:
815 msg = _('failed to push some obsolete markers!\n')
815 msg = _('failed to push some obsolete markers!\n')
816 repo.ui.warn(msg)
816 repo.ui.warn(msg)
817
817
818 def _pushbookmark(pushop):
818 def _pushbookmark(pushop):
819 """Update bookmark position on remote"""
819 """Update bookmark position on remote"""
820 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
820 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
821 return
821 return
822 pushop.stepsdone.add('bookmarks')
822 pushop.stepsdone.add('bookmarks')
823 ui = pushop.ui
823 ui = pushop.ui
824 remote = pushop.remote
824 remote = pushop.remote
825
825
826 for b, old, new in pushop.outbookmarks:
826 for b, old, new in pushop.outbookmarks:
827 action = 'update'
827 action = 'update'
828 if not old:
828 if not old:
829 action = 'export'
829 action = 'export'
830 elif not new:
830 elif not new:
831 action = 'delete'
831 action = 'delete'
832 if remote.pushkey('bookmarks', b, old, new):
832 if remote.pushkey('bookmarks', b, old, new):
833 ui.status(bookmsgmap[action][0] % b)
833 ui.status(bookmsgmap[action][0] % b)
834 else:
834 else:
835 ui.warn(bookmsgmap[action][1] % b)
835 ui.warn(bookmsgmap[action][1] % b)
836 # discovery can have set the value form invalid entry
836 # discovery can have set the value form invalid entry
837 if pushop.bkresult is not None:
837 if pushop.bkresult is not None:
838 pushop.bkresult = 1
838 pushop.bkresult = 1
839
839
840 class pulloperation(object):
840 class pulloperation(object):
841 """A object that represent a single pull operation
841 """A object that represent a single pull operation
842
842
843 It purpose is to carry pull related state and very common operation.
843 It purpose is to carry pull related state and very common operation.
844
844
845 A new should be created at the beginning of each pull and discarded
845 A new should be created at the beginning of each pull and discarded
846 afterward.
846 afterward.
847 """
847 """
848
848
849 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
849 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
850 remotebookmarks=None, streamclonerequested=None):
850 remotebookmarks=None, streamclonerequested=None):
851 # repo we pull into
851 # repo we pull into
852 self.repo = repo
852 self.repo = repo
853 # repo we pull from
853 # repo we pull from
854 self.remote = remote
854 self.remote = remote
855 # revision we try to pull (None is "all")
855 # revision we try to pull (None is "all")
856 self.heads = heads
856 self.heads = heads
857 # bookmark pulled explicitly
857 # bookmark pulled explicitly
858 self.explicitbookmarks = bookmarks
858 self.explicitbookmarks = bookmarks
859 # do we force pull?
859 # do we force pull?
860 self.force = force
860 self.force = force
861 # whether a streaming clone was requested
861 # whether a streaming clone was requested
862 self.streamclonerequested = streamclonerequested
862 self.streamclonerequested = streamclonerequested
863 # transaction manager
863 # transaction manager
864 self.trmanager = None
864 self.trmanager = None
865 # set of common changeset between local and remote before pull
865 # set of common changeset between local and remote before pull
866 self.common = None
866 self.common = None
867 # set of pulled head
867 # set of pulled head
868 self.rheads = None
868 self.rheads = None
869 # list of missing changeset to fetch remotely
869 # list of missing changeset to fetch remotely
870 self.fetch = None
870 self.fetch = None
871 # remote bookmarks data
871 # remote bookmarks data
872 self.remotebookmarks = remotebookmarks
872 self.remotebookmarks = remotebookmarks
873 # result of changegroup pulling (used as return code by pull)
873 # result of changegroup pulling (used as return code by pull)
874 self.cgresult = None
874 self.cgresult = None
875 # list of step already done
875 # list of step already done
876 self.stepsdone = set()
876 self.stepsdone = set()
877
877
878 @util.propertycache
878 @util.propertycache
879 def pulledsubset(self):
879 def pulledsubset(self):
880 """heads of the set of changeset target by the pull"""
880 """heads of the set of changeset target by the pull"""
881 # compute target subset
881 # compute target subset
882 if self.heads is None:
882 if self.heads is None:
883 # We pulled every thing possible
883 # We pulled every thing possible
884 # sync on everything common
884 # sync on everything common
885 c = set(self.common)
885 c = set(self.common)
886 ret = list(self.common)
886 ret = list(self.common)
887 for n in self.rheads:
887 for n in self.rheads:
888 if n not in c:
888 if n not in c:
889 ret.append(n)
889 ret.append(n)
890 return ret
890 return ret
891 else:
891 else:
892 # We pulled a specific subset
892 # We pulled a specific subset
893 # sync on this subset
893 # sync on this subset
894 return self.heads
894 return self.heads
895
895
896 def gettransaction(self):
896 def gettransaction(self):
897 # deprecated; talk to trmanager directly
897 # deprecated; talk to trmanager directly
898 return self.trmanager.transaction()
898 return self.trmanager.transaction()
899
899
900 class transactionmanager(object):
900 class transactionmanager(object):
901 """An object to manage the life cycle of a transaction
901 """An object to manage the life cycle of a transaction
902
902
903 It creates the transaction on demand and calls the appropriate hooks when
903 It creates the transaction on demand and calls the appropriate hooks when
904 closing the transaction."""
904 closing the transaction."""
905 def __init__(self, repo, source, url):
905 def __init__(self, repo, source, url):
906 self.repo = repo
906 self.repo = repo
907 self.source = source
907 self.source = source
908 self.url = url
908 self.url = url
909 self._tr = None
909 self._tr = None
910
910
911 def transaction(self):
911 def transaction(self):
912 """Return an open transaction object, constructing if necessary"""
912 """Return an open transaction object, constructing if necessary"""
913 if not self._tr:
913 if not self._tr:
914 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
914 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
915 self._tr = self.repo.transaction(trname)
915 self._tr = self.repo.transaction(trname)
916 self._tr.hookargs['source'] = self.source
916 self._tr.hookargs['source'] = self.source
917 self._tr.hookargs['url'] = self.url
917 self._tr.hookargs['url'] = self.url
918 return self._tr
918 return self._tr
919
919
920 def close(self):
920 def close(self):
921 """close transaction if created"""
921 """close transaction if created"""
922 if self._tr is not None:
922 if self._tr is not None:
923 self._tr.close()
923 self._tr.close()
924
924
925 def release(self):
925 def release(self):
926 """release transaction if created"""
926 """release transaction if created"""
927 if self._tr is not None:
927 if self._tr is not None:
928 self._tr.release()
928 self._tr.release()
929
929
930 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
930 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
931 streamclonerequested=None):
931 streamclonerequested=None):
932 """Fetch repository data from a remote.
932 """Fetch repository data from a remote.
933
933
934 This is the main function used to retrieve data from a remote repository.
934 This is the main function used to retrieve data from a remote repository.
935
935
936 ``repo`` is the local repository to clone into.
936 ``repo`` is the local repository to clone into.
937 ``remote`` is a peer instance.
937 ``remote`` is a peer instance.
938 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
938 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
939 default) means to pull everything from the remote.
939 default) means to pull everything from the remote.
940 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
940 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
941 default, all remote bookmarks are pulled.
941 default, all remote bookmarks are pulled.
942 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
942 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
943 initialization.
943 initialization.
944 ``streamclonerequested`` is a boolean indicating whether a "streaming
944 ``streamclonerequested`` is a boolean indicating whether a "streaming
945 clone" is requested. A "streaming clone" is essentially a raw file copy
945 clone" is requested. A "streaming clone" is essentially a raw file copy
946 of revlogs from the server. This only works when the local repository is
946 of revlogs from the server. This only works when the local repository is
947 empty. The default value of ``None`` means to respect the server
947 empty. The default value of ``None`` means to respect the server
948 configuration for preferring stream clones.
948 configuration for preferring stream clones.
949
949
950 Returns the ``pulloperation`` created for this pull.
950 Returns the ``pulloperation`` created for this pull.
951 """
951 """
952 if opargs is None:
952 if opargs is None:
953 opargs = {}
953 opargs = {}
954 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
954 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
955 streamclonerequested=streamclonerequested, **opargs)
955 streamclonerequested=streamclonerequested, **opargs)
956 if pullop.remote.local():
956 if pullop.remote.local():
957 missing = set(pullop.remote.requirements) - pullop.repo.supported
957 missing = set(pullop.remote.requirements) - pullop.repo.supported
958 if missing:
958 if missing:
959 msg = _("required features are not"
959 msg = _("required features are not"
960 " supported in the destination:"
960 " supported in the destination:"
961 " %s") % (', '.join(sorted(missing)))
961 " %s") % (', '.join(sorted(missing)))
962 raise util.Abort(msg)
962 raise util.Abort(msg)
963
963
964 lock = pullop.repo.lock()
964 lock = pullop.repo.lock()
965 try:
965 try:
966 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
966 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
967 streamclone.maybeperformstreamclone(pullop.repo, pullop.remote,
967 streamclone.maybeperformstreamclone(pullop)
968 pullop.heads,
969 pullop.streamclonerequested)
970 _pulldiscovery(pullop)
968 _pulldiscovery(pullop)
971 if _canusebundle2(pullop):
969 if _canusebundle2(pullop):
972 _pullbundle2(pullop)
970 _pullbundle2(pullop)
973 _pullchangeset(pullop)
971 _pullchangeset(pullop)
974 _pullphase(pullop)
972 _pullphase(pullop)
975 _pullbookmarks(pullop)
973 _pullbookmarks(pullop)
976 _pullobsolete(pullop)
974 _pullobsolete(pullop)
977 pullop.trmanager.close()
975 pullop.trmanager.close()
978 finally:
976 finally:
979 pullop.trmanager.release()
977 pullop.trmanager.release()
980 lock.release()
978 lock.release()
981
979
982 return pullop
980 return pullop
983
981
984 # list of steps to perform discovery before pull
982 # list of steps to perform discovery before pull
985 pulldiscoveryorder = []
983 pulldiscoveryorder = []
986
984
987 # Mapping between step name and function
985 # Mapping between step name and function
988 #
986 #
989 # This exists to help extensions wrap steps if necessary
987 # This exists to help extensions wrap steps if necessary
990 pulldiscoverymapping = {}
988 pulldiscoverymapping = {}
991
989
992 def pulldiscovery(stepname):
990 def pulldiscovery(stepname):
993 """decorator for function performing discovery before pull
991 """decorator for function performing discovery before pull
994
992
995 The function is added to the step -> function mapping and appended to the
993 The function is added to the step -> function mapping and appended to the
996 list of steps. Beware that decorated function will be added in order (this
994 list of steps. Beware that decorated function will be added in order (this
997 may matter).
995 may matter).
998
996
999 You can only use this decorator for a new step, if you want to wrap a step
997 You can only use this decorator for a new step, if you want to wrap a step
1000 from an extension, change the pulldiscovery dictionary directly."""
998 from an extension, change the pulldiscovery dictionary directly."""
1001 def dec(func):
999 def dec(func):
1002 assert stepname not in pulldiscoverymapping
1000 assert stepname not in pulldiscoverymapping
1003 pulldiscoverymapping[stepname] = func
1001 pulldiscoverymapping[stepname] = func
1004 pulldiscoveryorder.append(stepname)
1002 pulldiscoveryorder.append(stepname)
1005 return func
1003 return func
1006 return dec
1004 return dec
1007
1005
1008 def _pulldiscovery(pullop):
1006 def _pulldiscovery(pullop):
1009 """Run all discovery steps"""
1007 """Run all discovery steps"""
1010 for stepname in pulldiscoveryorder:
1008 for stepname in pulldiscoveryorder:
1011 step = pulldiscoverymapping[stepname]
1009 step = pulldiscoverymapping[stepname]
1012 step(pullop)
1010 step(pullop)
1013
1011
1014 @pulldiscovery('b1:bookmarks')
1012 @pulldiscovery('b1:bookmarks')
1015 def _pullbookmarkbundle1(pullop):
1013 def _pullbookmarkbundle1(pullop):
1016 """fetch bookmark data in bundle1 case
1014 """fetch bookmark data in bundle1 case
1017
1015
1018 If not using bundle2, we have to fetch bookmarks before changeset
1016 If not using bundle2, we have to fetch bookmarks before changeset
1019 discovery to reduce the chance and impact of race conditions."""
1017 discovery to reduce the chance and impact of race conditions."""
1020 if pullop.remotebookmarks is not None:
1018 if pullop.remotebookmarks is not None:
1021 return
1019 return
1022 if (_canusebundle2(pullop)
1020 if (_canusebundle2(pullop)
1023 and 'listkeys' in bundle2.bundle2caps(pullop.remote)):
1021 and 'listkeys' in bundle2.bundle2caps(pullop.remote)):
1024 # all known bundle2 servers now support listkeys, but lets be nice with
1022 # all known bundle2 servers now support listkeys, but lets be nice with
1025 # new implementation.
1023 # new implementation.
1026 return
1024 return
1027 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1025 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1028
1026
1029
1027
1030 @pulldiscovery('changegroup')
1028 @pulldiscovery('changegroup')
1031 def _pulldiscoverychangegroup(pullop):
1029 def _pulldiscoverychangegroup(pullop):
1032 """discovery phase for the pull
1030 """discovery phase for the pull
1033
1031
1034 Current handle changeset discovery only, will change handle all discovery
1032 Current handle changeset discovery only, will change handle all discovery
1035 at some point."""
1033 at some point."""
1036 tmp = discovery.findcommonincoming(pullop.repo,
1034 tmp = discovery.findcommonincoming(pullop.repo,
1037 pullop.remote,
1035 pullop.remote,
1038 heads=pullop.heads,
1036 heads=pullop.heads,
1039 force=pullop.force)
1037 force=pullop.force)
1040 common, fetch, rheads = tmp
1038 common, fetch, rheads = tmp
1041 nm = pullop.repo.unfiltered().changelog.nodemap
1039 nm = pullop.repo.unfiltered().changelog.nodemap
1042 if fetch and rheads:
1040 if fetch and rheads:
1043 # If a remote heads in filtered locally, lets drop it from the unknown
1041 # If a remote heads in filtered locally, lets drop it from the unknown
1044 # remote heads and put in back in common.
1042 # remote heads and put in back in common.
1045 #
1043 #
1046 # This is a hackish solution to catch most of "common but locally
1044 # This is a hackish solution to catch most of "common but locally
1047 # hidden situation". We do not performs discovery on unfiltered
1045 # hidden situation". We do not performs discovery on unfiltered
1048 # repository because it end up doing a pathological amount of round
1046 # repository because it end up doing a pathological amount of round
1049 # trip for w huge amount of changeset we do not care about.
1047 # trip for w huge amount of changeset we do not care about.
1050 #
1048 #
1051 # If a set of such "common but filtered" changeset exist on the server
1049 # If a set of such "common but filtered" changeset exist on the server
1052 # but are not including a remote heads, we'll not be able to detect it,
1050 # but are not including a remote heads, we'll not be able to detect it,
1053 scommon = set(common)
1051 scommon = set(common)
1054 filteredrheads = []
1052 filteredrheads = []
1055 for n in rheads:
1053 for n in rheads:
1056 if n in nm:
1054 if n in nm:
1057 if n not in scommon:
1055 if n not in scommon:
1058 common.append(n)
1056 common.append(n)
1059 else:
1057 else:
1060 filteredrheads.append(n)
1058 filteredrheads.append(n)
1061 if not filteredrheads:
1059 if not filteredrheads:
1062 fetch = []
1060 fetch = []
1063 rheads = filteredrheads
1061 rheads = filteredrheads
1064 pullop.common = common
1062 pullop.common = common
1065 pullop.fetch = fetch
1063 pullop.fetch = fetch
1066 pullop.rheads = rheads
1064 pullop.rheads = rheads
1067
1065
1068 def _pullbundle2(pullop):
1066 def _pullbundle2(pullop):
1069 """pull data using bundle2
1067 """pull data using bundle2
1070
1068
1071 For now, the only supported data are changegroup."""
1069 For now, the only supported data are changegroup."""
1072 remotecaps = bundle2.bundle2caps(pullop.remote)
1070 remotecaps = bundle2.bundle2caps(pullop.remote)
1073 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1071 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1074 # pulling changegroup
1072 # pulling changegroup
1075 pullop.stepsdone.add('changegroup')
1073 pullop.stepsdone.add('changegroup')
1076
1074
1077 kwargs['common'] = pullop.common
1075 kwargs['common'] = pullop.common
1078 kwargs['heads'] = pullop.heads or pullop.rheads
1076 kwargs['heads'] = pullop.heads or pullop.rheads
1079 kwargs['cg'] = pullop.fetch
1077 kwargs['cg'] = pullop.fetch
1080 if 'listkeys' in remotecaps:
1078 if 'listkeys' in remotecaps:
1081 kwargs['listkeys'] = ['phase']
1079 kwargs['listkeys'] = ['phase']
1082 if pullop.remotebookmarks is None:
1080 if pullop.remotebookmarks is None:
1083 # make sure to always includes bookmark data when migrating
1081 # make sure to always includes bookmark data when migrating
1084 # `hg incoming --bundle` to using this function.
1082 # `hg incoming --bundle` to using this function.
1085 kwargs['listkeys'].append('bookmarks')
1083 kwargs['listkeys'].append('bookmarks')
1086 if not pullop.fetch:
1084 if not pullop.fetch:
1087 pullop.repo.ui.status(_("no changes found\n"))
1085 pullop.repo.ui.status(_("no changes found\n"))
1088 pullop.cgresult = 0
1086 pullop.cgresult = 0
1089 else:
1087 else:
1090 if pullop.heads is None and list(pullop.common) == [nullid]:
1088 if pullop.heads is None and list(pullop.common) == [nullid]:
1091 pullop.repo.ui.status(_("requesting all changes\n"))
1089 pullop.repo.ui.status(_("requesting all changes\n"))
1092 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1090 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1093 remoteversions = bundle2.obsmarkersversion(remotecaps)
1091 remoteversions = bundle2.obsmarkersversion(remotecaps)
1094 if obsolete.commonversion(remoteversions) is not None:
1092 if obsolete.commonversion(remoteversions) is not None:
1095 kwargs['obsmarkers'] = True
1093 kwargs['obsmarkers'] = True
1096 pullop.stepsdone.add('obsmarkers')
1094 pullop.stepsdone.add('obsmarkers')
1097 _pullbundle2extraprepare(pullop, kwargs)
1095 _pullbundle2extraprepare(pullop, kwargs)
1098 bundle = pullop.remote.getbundle('pull', **kwargs)
1096 bundle = pullop.remote.getbundle('pull', **kwargs)
1099 try:
1097 try:
1100 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1098 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1101 except error.BundleValueError as exc:
1099 except error.BundleValueError as exc:
1102 raise util.Abort('missing support for %s' % exc)
1100 raise util.Abort('missing support for %s' % exc)
1103
1101
1104 if pullop.fetch:
1102 if pullop.fetch:
1105 results = [cg['return'] for cg in op.records['changegroup']]
1103 results = [cg['return'] for cg in op.records['changegroup']]
1106 pullop.cgresult = changegroup.combineresults(results)
1104 pullop.cgresult = changegroup.combineresults(results)
1107
1105
1108 # processing phases change
1106 # processing phases change
1109 for namespace, value in op.records['listkeys']:
1107 for namespace, value in op.records['listkeys']:
1110 if namespace == 'phases':
1108 if namespace == 'phases':
1111 _pullapplyphases(pullop, value)
1109 _pullapplyphases(pullop, value)
1112
1110
1113 # processing bookmark update
1111 # processing bookmark update
1114 for namespace, value in op.records['listkeys']:
1112 for namespace, value in op.records['listkeys']:
1115 if namespace == 'bookmarks':
1113 if namespace == 'bookmarks':
1116 pullop.remotebookmarks = value
1114 pullop.remotebookmarks = value
1117
1115
1118 # bookmark data were either already there or pulled in the bundle
1116 # bookmark data were either already there or pulled in the bundle
1119 if pullop.remotebookmarks is not None:
1117 if pullop.remotebookmarks is not None:
1120 _pullbookmarks(pullop)
1118 _pullbookmarks(pullop)
1121
1119
1122 def _pullbundle2extraprepare(pullop, kwargs):
1120 def _pullbundle2extraprepare(pullop, kwargs):
1123 """hook function so that extensions can extend the getbundle call"""
1121 """hook function so that extensions can extend the getbundle call"""
1124 pass
1122 pass
1125
1123
1126 def _pullchangeset(pullop):
1124 def _pullchangeset(pullop):
1127 """pull changeset from unbundle into the local repo"""
1125 """pull changeset from unbundle into the local repo"""
1128 # We delay the open of the transaction as late as possible so we
1126 # We delay the open of the transaction as late as possible so we
1129 # don't open transaction for nothing or you break future useful
1127 # don't open transaction for nothing or you break future useful
1130 # rollback call
1128 # rollback call
1131 if 'changegroup' in pullop.stepsdone:
1129 if 'changegroup' in pullop.stepsdone:
1132 return
1130 return
1133 pullop.stepsdone.add('changegroup')
1131 pullop.stepsdone.add('changegroup')
1134 if not pullop.fetch:
1132 if not pullop.fetch:
1135 pullop.repo.ui.status(_("no changes found\n"))
1133 pullop.repo.ui.status(_("no changes found\n"))
1136 pullop.cgresult = 0
1134 pullop.cgresult = 0
1137 return
1135 return
1138 pullop.gettransaction()
1136 pullop.gettransaction()
1139 if pullop.heads is None and list(pullop.common) == [nullid]:
1137 if pullop.heads is None and list(pullop.common) == [nullid]:
1140 pullop.repo.ui.status(_("requesting all changes\n"))
1138 pullop.repo.ui.status(_("requesting all changes\n"))
1141 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1139 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1142 # issue1320, avoid a race if remote changed after discovery
1140 # issue1320, avoid a race if remote changed after discovery
1143 pullop.heads = pullop.rheads
1141 pullop.heads = pullop.rheads
1144
1142
1145 if pullop.remote.capable('getbundle'):
1143 if pullop.remote.capable('getbundle'):
1146 # TODO: get bundlecaps from remote
1144 # TODO: get bundlecaps from remote
1147 cg = pullop.remote.getbundle('pull', common=pullop.common,
1145 cg = pullop.remote.getbundle('pull', common=pullop.common,
1148 heads=pullop.heads or pullop.rheads)
1146 heads=pullop.heads or pullop.rheads)
1149 elif pullop.heads is None:
1147 elif pullop.heads is None:
1150 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1148 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1151 elif not pullop.remote.capable('changegroupsubset'):
1149 elif not pullop.remote.capable('changegroupsubset'):
1152 raise util.Abort(_("partial pull cannot be done because "
1150 raise util.Abort(_("partial pull cannot be done because "
1153 "other repository doesn't support "
1151 "other repository doesn't support "
1154 "changegroupsubset."))
1152 "changegroupsubset."))
1155 else:
1153 else:
1156 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1154 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1157 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1155 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1158 pullop.remote.url())
1156 pullop.remote.url())
1159
1157
1160 def _pullphase(pullop):
1158 def _pullphase(pullop):
1161 # Get remote phases data from remote
1159 # Get remote phases data from remote
1162 if 'phases' in pullop.stepsdone:
1160 if 'phases' in pullop.stepsdone:
1163 return
1161 return
1164 remotephases = pullop.remote.listkeys('phases')
1162 remotephases = pullop.remote.listkeys('phases')
1165 _pullapplyphases(pullop, remotephases)
1163 _pullapplyphases(pullop, remotephases)
1166
1164
1167 def _pullapplyphases(pullop, remotephases):
1165 def _pullapplyphases(pullop, remotephases):
1168 """apply phase movement from observed remote state"""
1166 """apply phase movement from observed remote state"""
1169 if 'phases' in pullop.stepsdone:
1167 if 'phases' in pullop.stepsdone:
1170 return
1168 return
1171 pullop.stepsdone.add('phases')
1169 pullop.stepsdone.add('phases')
1172 publishing = bool(remotephases.get('publishing', False))
1170 publishing = bool(remotephases.get('publishing', False))
1173 if remotephases and not publishing:
1171 if remotephases and not publishing:
1174 # remote is new and unpublishing
1172 # remote is new and unpublishing
1175 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1173 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1176 pullop.pulledsubset,
1174 pullop.pulledsubset,
1177 remotephases)
1175 remotephases)
1178 dheads = pullop.pulledsubset
1176 dheads = pullop.pulledsubset
1179 else:
1177 else:
1180 # Remote is old or publishing all common changesets
1178 # Remote is old or publishing all common changesets
1181 # should be seen as public
1179 # should be seen as public
1182 pheads = pullop.pulledsubset
1180 pheads = pullop.pulledsubset
1183 dheads = []
1181 dheads = []
1184 unfi = pullop.repo.unfiltered()
1182 unfi = pullop.repo.unfiltered()
1185 phase = unfi._phasecache.phase
1183 phase = unfi._phasecache.phase
1186 rev = unfi.changelog.nodemap.get
1184 rev = unfi.changelog.nodemap.get
1187 public = phases.public
1185 public = phases.public
1188 draft = phases.draft
1186 draft = phases.draft
1189
1187
1190 # exclude changesets already public locally and update the others
1188 # exclude changesets already public locally and update the others
1191 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1189 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1192 if pheads:
1190 if pheads:
1193 tr = pullop.gettransaction()
1191 tr = pullop.gettransaction()
1194 phases.advanceboundary(pullop.repo, tr, public, pheads)
1192 phases.advanceboundary(pullop.repo, tr, public, pheads)
1195
1193
1196 # exclude changesets already draft locally and update the others
1194 # exclude changesets already draft locally and update the others
1197 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1195 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1198 if dheads:
1196 if dheads:
1199 tr = pullop.gettransaction()
1197 tr = pullop.gettransaction()
1200 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1198 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1201
1199
1202 def _pullbookmarks(pullop):
1200 def _pullbookmarks(pullop):
1203 """process the remote bookmark information to update the local one"""
1201 """process the remote bookmark information to update the local one"""
1204 if 'bookmarks' in pullop.stepsdone:
1202 if 'bookmarks' in pullop.stepsdone:
1205 return
1203 return
1206 pullop.stepsdone.add('bookmarks')
1204 pullop.stepsdone.add('bookmarks')
1207 repo = pullop.repo
1205 repo = pullop.repo
1208 remotebookmarks = pullop.remotebookmarks
1206 remotebookmarks = pullop.remotebookmarks
1209 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1207 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1210 pullop.remote.url(),
1208 pullop.remote.url(),
1211 pullop.gettransaction,
1209 pullop.gettransaction,
1212 explicit=pullop.explicitbookmarks)
1210 explicit=pullop.explicitbookmarks)
1213
1211
1214 def _pullobsolete(pullop):
1212 def _pullobsolete(pullop):
1215 """utility function to pull obsolete markers from a remote
1213 """utility function to pull obsolete markers from a remote
1216
1214
1217 The `gettransaction` is function that return the pull transaction, creating
1215 The `gettransaction` is function that return the pull transaction, creating
1218 one if necessary. We return the transaction to inform the calling code that
1216 one if necessary. We return the transaction to inform the calling code that
1219 a new transaction have been created (when applicable).
1217 a new transaction have been created (when applicable).
1220
1218
1221 Exists mostly to allow overriding for experimentation purpose"""
1219 Exists mostly to allow overriding for experimentation purpose"""
1222 if 'obsmarkers' in pullop.stepsdone:
1220 if 'obsmarkers' in pullop.stepsdone:
1223 return
1221 return
1224 pullop.stepsdone.add('obsmarkers')
1222 pullop.stepsdone.add('obsmarkers')
1225 tr = None
1223 tr = None
1226 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1224 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1227 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1225 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1228 remoteobs = pullop.remote.listkeys('obsolete')
1226 remoteobs = pullop.remote.listkeys('obsolete')
1229 if 'dump0' in remoteobs:
1227 if 'dump0' in remoteobs:
1230 tr = pullop.gettransaction()
1228 tr = pullop.gettransaction()
1231 for key in sorted(remoteobs, reverse=True):
1229 for key in sorted(remoteobs, reverse=True):
1232 if key.startswith('dump'):
1230 if key.startswith('dump'):
1233 data = base85.b85decode(remoteobs[key])
1231 data = base85.b85decode(remoteobs[key])
1234 pullop.repo.obsstore.mergemarkers(tr, data)
1232 pullop.repo.obsstore.mergemarkers(tr, data)
1235 pullop.repo.invalidatevolatilesets()
1233 pullop.repo.invalidatevolatilesets()
1236 return tr
1234 return tr
1237
1235
1238 def caps20to10(repo):
1236 def caps20to10(repo):
1239 """return a set with appropriate options to use bundle20 during getbundle"""
1237 """return a set with appropriate options to use bundle20 during getbundle"""
1240 caps = set(['HG20'])
1238 caps = set(['HG20'])
1241 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1239 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1242 caps.add('bundle2=' + urllib.quote(capsblob))
1240 caps.add('bundle2=' + urllib.quote(capsblob))
1243 return caps
1241 return caps
1244
1242
1245 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1243 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1246 getbundle2partsorder = []
1244 getbundle2partsorder = []
1247
1245
1248 # Mapping between step name and function
1246 # Mapping between step name and function
1249 #
1247 #
1250 # This exists to help extensions wrap steps if necessary
1248 # This exists to help extensions wrap steps if necessary
1251 getbundle2partsmapping = {}
1249 getbundle2partsmapping = {}
1252
1250
1253 def getbundle2partsgenerator(stepname, idx=None):
1251 def getbundle2partsgenerator(stepname, idx=None):
1254 """decorator for function generating bundle2 part for getbundle
1252 """decorator for function generating bundle2 part for getbundle
1255
1253
1256 The function is added to the step -> function mapping and appended to the
1254 The function is added to the step -> function mapping and appended to the
1257 list of steps. Beware that decorated functions will be added in order
1255 list of steps. Beware that decorated functions will be added in order
1258 (this may matter).
1256 (this may matter).
1259
1257
1260 You can only use this decorator for new steps, if you want to wrap a step
1258 You can only use this decorator for new steps, if you want to wrap a step
1261 from an extension, attack the getbundle2partsmapping dictionary directly."""
1259 from an extension, attack the getbundle2partsmapping dictionary directly."""
1262 def dec(func):
1260 def dec(func):
1263 assert stepname not in getbundle2partsmapping
1261 assert stepname not in getbundle2partsmapping
1264 getbundle2partsmapping[stepname] = func
1262 getbundle2partsmapping[stepname] = func
1265 if idx is None:
1263 if idx is None:
1266 getbundle2partsorder.append(stepname)
1264 getbundle2partsorder.append(stepname)
1267 else:
1265 else:
1268 getbundle2partsorder.insert(idx, stepname)
1266 getbundle2partsorder.insert(idx, stepname)
1269 return func
1267 return func
1270 return dec
1268 return dec
1271
1269
1272 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1270 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1273 **kwargs):
1271 **kwargs):
1274 """return a full bundle (with potentially multiple kind of parts)
1272 """return a full bundle (with potentially multiple kind of parts)
1275
1273
1276 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1274 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1277 passed. For now, the bundle can contain only changegroup, but this will
1275 passed. For now, the bundle can contain only changegroup, but this will
1278 changes when more part type will be available for bundle2.
1276 changes when more part type will be available for bundle2.
1279
1277
1280 This is different from changegroup.getchangegroup that only returns an HG10
1278 This is different from changegroup.getchangegroup that only returns an HG10
1281 changegroup bundle. They may eventually get reunited in the future when we
1279 changegroup bundle. They may eventually get reunited in the future when we
1282 have a clearer idea of the API we what to query different data.
1280 have a clearer idea of the API we what to query different data.
1283
1281
1284 The implementation is at a very early stage and will get massive rework
1282 The implementation is at a very early stage and will get massive rework
1285 when the API of bundle is refined.
1283 when the API of bundle is refined.
1286 """
1284 """
1287 # bundle10 case
1285 # bundle10 case
1288 usebundle2 = False
1286 usebundle2 = False
1289 if bundlecaps is not None:
1287 if bundlecaps is not None:
1290 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1288 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1291 if not usebundle2:
1289 if not usebundle2:
1292 if bundlecaps and not kwargs.get('cg', True):
1290 if bundlecaps and not kwargs.get('cg', True):
1293 raise ValueError(_('request for bundle10 must include changegroup'))
1291 raise ValueError(_('request for bundle10 must include changegroup'))
1294
1292
1295 if kwargs:
1293 if kwargs:
1296 raise ValueError(_('unsupported getbundle arguments: %s')
1294 raise ValueError(_('unsupported getbundle arguments: %s')
1297 % ', '.join(sorted(kwargs.keys())))
1295 % ', '.join(sorted(kwargs.keys())))
1298 return changegroup.getchangegroup(repo, source, heads=heads,
1296 return changegroup.getchangegroup(repo, source, heads=heads,
1299 common=common, bundlecaps=bundlecaps)
1297 common=common, bundlecaps=bundlecaps)
1300
1298
1301 # bundle20 case
1299 # bundle20 case
1302 b2caps = {}
1300 b2caps = {}
1303 for bcaps in bundlecaps:
1301 for bcaps in bundlecaps:
1304 if bcaps.startswith('bundle2='):
1302 if bcaps.startswith('bundle2='):
1305 blob = urllib.unquote(bcaps[len('bundle2='):])
1303 blob = urllib.unquote(bcaps[len('bundle2='):])
1306 b2caps.update(bundle2.decodecaps(blob))
1304 b2caps.update(bundle2.decodecaps(blob))
1307 bundler = bundle2.bundle20(repo.ui, b2caps)
1305 bundler = bundle2.bundle20(repo.ui, b2caps)
1308
1306
1309 kwargs['heads'] = heads
1307 kwargs['heads'] = heads
1310 kwargs['common'] = common
1308 kwargs['common'] = common
1311
1309
1312 for name in getbundle2partsorder:
1310 for name in getbundle2partsorder:
1313 func = getbundle2partsmapping[name]
1311 func = getbundle2partsmapping[name]
1314 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1312 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1315 **kwargs)
1313 **kwargs)
1316
1314
1317 return util.chunkbuffer(bundler.getchunks())
1315 return util.chunkbuffer(bundler.getchunks())
1318
1316
1319 @getbundle2partsgenerator('changegroup')
1317 @getbundle2partsgenerator('changegroup')
1320 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1318 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1321 b2caps=None, heads=None, common=None, **kwargs):
1319 b2caps=None, heads=None, common=None, **kwargs):
1322 """add a changegroup part to the requested bundle"""
1320 """add a changegroup part to the requested bundle"""
1323 cg = None
1321 cg = None
1324 if kwargs.get('cg', True):
1322 if kwargs.get('cg', True):
1325 # build changegroup bundle here.
1323 # build changegroup bundle here.
1326 version = None
1324 version = None
1327 cgversions = b2caps.get('changegroup')
1325 cgversions = b2caps.get('changegroup')
1328 getcgkwargs = {}
1326 getcgkwargs = {}
1329 if cgversions: # 3.1 and 3.2 ship with an empty value
1327 if cgversions: # 3.1 and 3.2 ship with an empty value
1330 cgversions = [v for v in cgversions if v in changegroup.packermap]
1328 cgversions = [v for v in cgversions if v in changegroup.packermap]
1331 if not cgversions:
1329 if not cgversions:
1332 raise ValueError(_('no common changegroup version'))
1330 raise ValueError(_('no common changegroup version'))
1333 version = getcgkwargs['version'] = max(cgversions)
1331 version = getcgkwargs['version'] = max(cgversions)
1334 outgoing = changegroup.computeoutgoing(repo, heads, common)
1332 outgoing = changegroup.computeoutgoing(repo, heads, common)
1335 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1333 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1336 bundlecaps=bundlecaps,
1334 bundlecaps=bundlecaps,
1337 **getcgkwargs)
1335 **getcgkwargs)
1338
1336
1339 if cg:
1337 if cg:
1340 part = bundler.newpart('changegroup', data=cg)
1338 part = bundler.newpart('changegroup', data=cg)
1341 if version is not None:
1339 if version is not None:
1342 part.addparam('version', version)
1340 part.addparam('version', version)
1343 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1341 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1344
1342
1345 @getbundle2partsgenerator('listkeys')
1343 @getbundle2partsgenerator('listkeys')
1346 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1344 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1347 b2caps=None, **kwargs):
1345 b2caps=None, **kwargs):
1348 """add parts containing listkeys namespaces to the requested bundle"""
1346 """add parts containing listkeys namespaces to the requested bundle"""
1349 listkeys = kwargs.get('listkeys', ())
1347 listkeys = kwargs.get('listkeys', ())
1350 for namespace in listkeys:
1348 for namespace in listkeys:
1351 part = bundler.newpart('listkeys')
1349 part = bundler.newpart('listkeys')
1352 part.addparam('namespace', namespace)
1350 part.addparam('namespace', namespace)
1353 keys = repo.listkeys(namespace).items()
1351 keys = repo.listkeys(namespace).items()
1354 part.data = pushkey.encodekeys(keys)
1352 part.data = pushkey.encodekeys(keys)
1355
1353
1356 @getbundle2partsgenerator('obsmarkers')
1354 @getbundle2partsgenerator('obsmarkers')
1357 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1355 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1358 b2caps=None, heads=None, **kwargs):
1356 b2caps=None, heads=None, **kwargs):
1359 """add an obsolescence markers part to the requested bundle"""
1357 """add an obsolescence markers part to the requested bundle"""
1360 if kwargs.get('obsmarkers', False):
1358 if kwargs.get('obsmarkers', False):
1361 if heads is None:
1359 if heads is None:
1362 heads = repo.heads()
1360 heads = repo.heads()
1363 subset = [c.node() for c in repo.set('::%ln', heads)]
1361 subset = [c.node() for c in repo.set('::%ln', heads)]
1364 markers = repo.obsstore.relevantmarkers(subset)
1362 markers = repo.obsstore.relevantmarkers(subset)
1365 markers = sorted(markers)
1363 markers = sorted(markers)
1366 buildobsmarkerspart(bundler, markers)
1364 buildobsmarkerspart(bundler, markers)
1367
1365
1368 @getbundle2partsgenerator('hgtagsfnodes')
1366 @getbundle2partsgenerator('hgtagsfnodes')
1369 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1367 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1370 b2caps=None, heads=None, common=None,
1368 b2caps=None, heads=None, common=None,
1371 **kwargs):
1369 **kwargs):
1372 """Transfer the .hgtags filenodes mapping.
1370 """Transfer the .hgtags filenodes mapping.
1373
1371
1374 Only values for heads in this bundle will be transferred.
1372 Only values for heads in this bundle will be transferred.
1375
1373
1376 The part data consists of pairs of 20 byte changeset node and .hgtags
1374 The part data consists of pairs of 20 byte changeset node and .hgtags
1377 filenodes raw values.
1375 filenodes raw values.
1378 """
1376 """
1379 # Don't send unless:
1377 # Don't send unless:
1380 # - changeset are being exchanged,
1378 # - changeset are being exchanged,
1381 # - the client supports it.
1379 # - the client supports it.
1382 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1380 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1383 return
1381 return
1384
1382
1385 outgoing = changegroup.computeoutgoing(repo, heads, common)
1383 outgoing = changegroup.computeoutgoing(repo, heads, common)
1386
1384
1387 if not outgoing.missingheads:
1385 if not outgoing.missingheads:
1388 return
1386 return
1389
1387
1390 cache = tags.hgtagsfnodescache(repo.unfiltered())
1388 cache = tags.hgtagsfnodescache(repo.unfiltered())
1391 chunks = []
1389 chunks = []
1392
1390
1393 # .hgtags fnodes are only relevant for head changesets. While we could
1391 # .hgtags fnodes are only relevant for head changesets. While we could
1394 # transfer values for all known nodes, there will likely be little to
1392 # transfer values for all known nodes, there will likely be little to
1395 # no benefit.
1393 # no benefit.
1396 #
1394 #
1397 # We don't bother using a generator to produce output data because
1395 # We don't bother using a generator to produce output data because
1398 # a) we only have 40 bytes per head and even esoteric numbers of heads
1396 # a) we only have 40 bytes per head and even esoteric numbers of heads
1399 # consume little memory (1M heads is 40MB) b) we don't want to send the
1397 # consume little memory (1M heads is 40MB) b) we don't want to send the
1400 # part if we don't have entries and knowing if we have entries requires
1398 # part if we don't have entries and knowing if we have entries requires
1401 # cache lookups.
1399 # cache lookups.
1402 for node in outgoing.missingheads:
1400 for node in outgoing.missingheads:
1403 # Don't compute missing, as this may slow down serving.
1401 # Don't compute missing, as this may slow down serving.
1404 fnode = cache.getfnode(node, computemissing=False)
1402 fnode = cache.getfnode(node, computemissing=False)
1405 if fnode is not None:
1403 if fnode is not None:
1406 chunks.extend([node, fnode])
1404 chunks.extend([node, fnode])
1407
1405
1408 if chunks:
1406 if chunks:
1409 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1407 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1410
1408
1411 def check_heads(repo, their_heads, context):
1409 def check_heads(repo, their_heads, context):
1412 """check if the heads of a repo have been modified
1410 """check if the heads of a repo have been modified
1413
1411
1414 Used by peer for unbundling.
1412 Used by peer for unbundling.
1415 """
1413 """
1416 heads = repo.heads()
1414 heads = repo.heads()
1417 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1415 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1418 if not (their_heads == ['force'] or their_heads == heads or
1416 if not (their_heads == ['force'] or their_heads == heads or
1419 their_heads == ['hashed', heads_hash]):
1417 their_heads == ['hashed', heads_hash]):
1420 # someone else committed/pushed/unbundled while we
1418 # someone else committed/pushed/unbundled while we
1421 # were transferring data
1419 # were transferring data
1422 raise error.PushRaced('repository changed while %s - '
1420 raise error.PushRaced('repository changed while %s - '
1423 'please try again' % context)
1421 'please try again' % context)
1424
1422
1425 def unbundle(repo, cg, heads, source, url):
1423 def unbundle(repo, cg, heads, source, url):
1426 """Apply a bundle to a repo.
1424 """Apply a bundle to a repo.
1427
1425
1428 this function makes sure the repo is locked during the application and have
1426 this function makes sure the repo is locked during the application and have
1429 mechanism to check that no push race occurred between the creation of the
1427 mechanism to check that no push race occurred between the creation of the
1430 bundle and its application.
1428 bundle and its application.
1431
1429
1432 If the push was raced as PushRaced exception is raised."""
1430 If the push was raced as PushRaced exception is raised."""
1433 r = 0
1431 r = 0
1434 # need a transaction when processing a bundle2 stream
1432 # need a transaction when processing a bundle2 stream
1435 wlock = lock = tr = None
1433 wlock = lock = tr = None
1436 recordout = None
1434 recordout = None
1437 # quick fix for output mismatch with bundle2 in 3.4
1435 # quick fix for output mismatch with bundle2 in 3.4
1438 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1436 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1439 False)
1437 False)
1440 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1438 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1441 captureoutput = True
1439 captureoutput = True
1442 try:
1440 try:
1443 check_heads(repo, heads, 'uploading changes')
1441 check_heads(repo, heads, 'uploading changes')
1444 # push can proceed
1442 # push can proceed
1445 if util.safehasattr(cg, 'params'):
1443 if util.safehasattr(cg, 'params'):
1446 r = None
1444 r = None
1447 try:
1445 try:
1448 wlock = repo.wlock()
1446 wlock = repo.wlock()
1449 lock = repo.lock()
1447 lock = repo.lock()
1450 tr = repo.transaction(source)
1448 tr = repo.transaction(source)
1451 tr.hookargs['source'] = source
1449 tr.hookargs['source'] = source
1452 tr.hookargs['url'] = url
1450 tr.hookargs['url'] = url
1453 tr.hookargs['bundle2'] = '1'
1451 tr.hookargs['bundle2'] = '1'
1454 op = bundle2.bundleoperation(repo, lambda: tr,
1452 op = bundle2.bundleoperation(repo, lambda: tr,
1455 captureoutput=captureoutput)
1453 captureoutput=captureoutput)
1456 try:
1454 try:
1457 op = bundle2.processbundle(repo, cg, op=op)
1455 op = bundle2.processbundle(repo, cg, op=op)
1458 finally:
1456 finally:
1459 r = op.reply
1457 r = op.reply
1460 if captureoutput and r is not None:
1458 if captureoutput and r is not None:
1461 repo.ui.pushbuffer(error=True, subproc=True)
1459 repo.ui.pushbuffer(error=True, subproc=True)
1462 def recordout(output):
1460 def recordout(output):
1463 r.newpart('output', data=output, mandatory=False)
1461 r.newpart('output', data=output, mandatory=False)
1464 tr.close()
1462 tr.close()
1465 except BaseException as exc:
1463 except BaseException as exc:
1466 exc.duringunbundle2 = True
1464 exc.duringunbundle2 = True
1467 if captureoutput and r is not None:
1465 if captureoutput and r is not None:
1468 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1466 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1469 def recordout(output):
1467 def recordout(output):
1470 part = bundle2.bundlepart('output', data=output,
1468 part = bundle2.bundlepart('output', data=output,
1471 mandatory=False)
1469 mandatory=False)
1472 parts.append(part)
1470 parts.append(part)
1473 raise
1471 raise
1474 else:
1472 else:
1475 lock = repo.lock()
1473 lock = repo.lock()
1476 r = changegroup.addchangegroup(repo, cg, source, url)
1474 r = changegroup.addchangegroup(repo, cg, source, url)
1477 finally:
1475 finally:
1478 lockmod.release(tr, lock, wlock)
1476 lockmod.release(tr, lock, wlock)
1479 if recordout is not None:
1477 if recordout is not None:
1480 recordout(repo.ui.popbuffer())
1478 recordout(repo.ui.popbuffer())
1481 return r
1479 return r
@@ -1,282 +1,287 b''
1 # streamclone.py - producing and consuming streaming repository data
1 # streamclone.py - producing and consuming streaming repository data
2 #
2 #
3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import time
10 import time
11
11
12 from .i18n import _
12 from .i18n import _
13 from . import (
13 from . import (
14 branchmap,
14 branchmap,
15 error,
15 error,
16 store,
16 store,
17 util,
17 util,
18 )
18 )
19
19
20 def canperformstreamclone(repo, remote, heads, streamrequested=None):
20 def canperformstreamclone(repo, remote, heads, streamrequested=None):
21 """Whether it is possible to perform a streaming clone as part of pull.
21 """Whether it is possible to perform a streaming clone as part of pull.
22
22
23 Returns a tuple of (supported, requirements). ``supported`` is True if
23 Returns a tuple of (supported, requirements). ``supported`` is True if
24 streaming clone is supported and False otherwise. ``requirements`` is
24 streaming clone is supported and False otherwise. ``requirements`` is
25 a set of repo requirements from the remote, or ``None`` if stream clone
25 a set of repo requirements from the remote, or ``None`` if stream clone
26 isn't supported.
26 isn't supported.
27 """
27 """
28 # Streaming clone only works on empty repositories.
28 # Streaming clone only works on empty repositories.
29 if len(repo):
29 if len(repo):
30 return False, None
30 return False, None
31
31
32 # Streaming clone only works if all data is being requested.
32 # Streaming clone only works if all data is being requested.
33 if heads:
33 if heads:
34 return False, None
34 return False, None
35
35
36 # If we don't have a preference, let the server decide for us. This
36 # If we don't have a preference, let the server decide for us. This
37 # likely only comes into play in LANs.
37 # likely only comes into play in LANs.
38 if streamrequested is None:
38 if streamrequested is None:
39 # The server can advertise whether to prefer streaming clone.
39 # The server can advertise whether to prefer streaming clone.
40 streamrequested = remote.capable('stream-preferred')
40 streamrequested = remote.capable('stream-preferred')
41
41
42 if not streamrequested:
42 if not streamrequested:
43 return False, None
43 return False, None
44
44
45 # In order for stream clone to work, the client has to support all the
45 # In order for stream clone to work, the client has to support all the
46 # requirements advertised by the server.
46 # requirements advertised by the server.
47 #
47 #
48 # The server advertises its requirements via the "stream" and "streamreqs"
48 # The server advertises its requirements via the "stream" and "streamreqs"
49 # capability. "stream" (a value-less capability) is advertised if and only
49 # capability. "stream" (a value-less capability) is advertised if and only
50 # if the only requirement is "revlogv1." Else, the "streamreqs" capability
50 # if the only requirement is "revlogv1." Else, the "streamreqs" capability
51 # is advertised and contains a comma-delimited list of requirements.
51 # is advertised and contains a comma-delimited list of requirements.
52 requirements = set()
52 requirements = set()
53 if remote.capable('stream'):
53 if remote.capable('stream'):
54 requirements.add('revlogv1')
54 requirements.add('revlogv1')
55 else:
55 else:
56 streamreqs = remote.capable('streamreqs')
56 streamreqs = remote.capable('streamreqs')
57 # This is weird and shouldn't happen with modern servers.
57 # This is weird and shouldn't happen with modern servers.
58 if not streamreqs:
58 if not streamreqs:
59 return False, None
59 return False, None
60
60
61 streamreqs = set(streamreqs.split(','))
61 streamreqs = set(streamreqs.split(','))
62 # Server requires something we don't support. Bail.
62 # Server requires something we don't support. Bail.
63 if streamreqs - repo.supportedformats:
63 if streamreqs - repo.supportedformats:
64 return False, None
64 return False, None
65 requirements = streamreqs
65 requirements = streamreqs
66
66
67 return True, requirements
67 return True, requirements
68
68
69 def maybeperformstreamclone(repo, remote, heads, stream):
69 def maybeperformstreamclone(pullop):
70 supported, requirements = canperformstreamclone(repo, remote, heads,
70 repo = pullop.repo
71 streamrequested=stream)
71 remote = pullop.remote
72
73 r = canperformstreamclone(repo, remote, pullop.heads,
74 streamrequested=pullop.streamclonerequested)
75 supported, requirements = r
76
72 if not supported:
77 if not supported:
73 return
78 return
74
79
75 streamin(repo, remote, requirements)
80 streamin(repo, remote, requirements)
76
81
77 def allowservergeneration(ui):
82 def allowservergeneration(ui):
78 """Whether streaming clones are allowed from the server."""
83 """Whether streaming clones are allowed from the server."""
79 return ui.configbool('server', 'uncompressed', True, untrusted=True)
84 return ui.configbool('server', 'uncompressed', True, untrusted=True)
80
85
81 # This is it's own function so extensions can override it.
86 # This is it's own function so extensions can override it.
82 def _walkstreamfiles(repo):
87 def _walkstreamfiles(repo):
83 return repo.store.walk()
88 return repo.store.walk()
84
89
85 def generatev1(repo):
90 def generatev1(repo):
86 """Emit content for version 1 of a streaming clone.
91 """Emit content for version 1 of a streaming clone.
87
92
88 This is a generator of raw chunks that constitute a streaming clone.
93 This is a generator of raw chunks that constitute a streaming clone.
89
94
90 The stream begins with a line of 2 space-delimited integers containing the
95 The stream begins with a line of 2 space-delimited integers containing the
91 number of entries and total bytes size.
96 number of entries and total bytes size.
92
97
93 Next, are N entries for each file being transferred. Each file entry starts
98 Next, are N entries for each file being transferred. Each file entry starts
94 as a line with the file name and integer size delimited by a null byte.
99 as a line with the file name and integer size delimited by a null byte.
95 The raw file data follows. Following the raw file data is the next file
100 The raw file data follows. Following the raw file data is the next file
96 entry, or EOF.
101 entry, or EOF.
97
102
98 When used on the wire protocol, an additional line indicating protocol
103 When used on the wire protocol, an additional line indicating protocol
99 success will be prepended to the stream. This function is not responsible
104 success will be prepended to the stream. This function is not responsible
100 for adding it.
105 for adding it.
101
106
102 This function will obtain a repository lock to ensure a consistent view of
107 This function will obtain a repository lock to ensure a consistent view of
103 the store is captured. It therefore may raise LockError.
108 the store is captured. It therefore may raise LockError.
104 """
109 """
105 entries = []
110 entries = []
106 total_bytes = 0
111 total_bytes = 0
107 # Get consistent snapshot of repo, lock during scan.
112 # Get consistent snapshot of repo, lock during scan.
108 lock = repo.lock()
113 lock = repo.lock()
109 try:
114 try:
110 repo.ui.debug('scanning\n')
115 repo.ui.debug('scanning\n')
111 for name, ename, size in _walkstreamfiles(repo):
116 for name, ename, size in _walkstreamfiles(repo):
112 if size:
117 if size:
113 entries.append((name, size))
118 entries.append((name, size))
114 total_bytes += size
119 total_bytes += size
115 finally:
120 finally:
116 lock.release()
121 lock.release()
117
122
118 repo.ui.debug('%d files, %d bytes to transfer\n' %
123 repo.ui.debug('%d files, %d bytes to transfer\n' %
119 (len(entries), total_bytes))
124 (len(entries), total_bytes))
120 yield '%d %d\n' % (len(entries), total_bytes)
125 yield '%d %d\n' % (len(entries), total_bytes)
121
126
122 svfs = repo.svfs
127 svfs = repo.svfs
123 oldaudit = svfs.mustaudit
128 oldaudit = svfs.mustaudit
124 debugflag = repo.ui.debugflag
129 debugflag = repo.ui.debugflag
125 svfs.mustaudit = False
130 svfs.mustaudit = False
126
131
127 try:
132 try:
128 for name, size in entries:
133 for name, size in entries:
129 if debugflag:
134 if debugflag:
130 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
135 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
131 # partially encode name over the wire for backwards compat
136 # partially encode name over the wire for backwards compat
132 yield '%s\0%d\n' % (store.encodedir(name), size)
137 yield '%s\0%d\n' % (store.encodedir(name), size)
133 if size <= 65536:
138 if size <= 65536:
134 fp = svfs(name)
139 fp = svfs(name)
135 try:
140 try:
136 data = fp.read(size)
141 data = fp.read(size)
137 finally:
142 finally:
138 fp.close()
143 fp.close()
139 yield data
144 yield data
140 else:
145 else:
141 for chunk in util.filechunkiter(svfs(name), limit=size):
146 for chunk in util.filechunkiter(svfs(name), limit=size):
142 yield chunk
147 yield chunk
143 finally:
148 finally:
144 svfs.mustaudit = oldaudit
149 svfs.mustaudit = oldaudit
145
150
146 def consumev1(repo, fp):
151 def consumev1(repo, fp):
147 """Apply the contents from version 1 of a streaming clone file handle.
152 """Apply the contents from version 1 of a streaming clone file handle.
148
153
149 This takes the output from "streamout" and applies it to the specified
154 This takes the output from "streamout" and applies it to the specified
150 repository.
155 repository.
151
156
152 Like "streamout," the status line added by the wire protocol is not handled
157 Like "streamout," the status line added by the wire protocol is not handled
153 by this function.
158 by this function.
154 """
159 """
155 lock = repo.lock()
160 lock = repo.lock()
156 try:
161 try:
157 repo.ui.status(_('streaming all changes\n'))
162 repo.ui.status(_('streaming all changes\n'))
158 l = fp.readline()
163 l = fp.readline()
159 try:
164 try:
160 total_files, total_bytes = map(int, l.split(' ', 1))
165 total_files, total_bytes = map(int, l.split(' ', 1))
161 except (ValueError, TypeError):
166 except (ValueError, TypeError):
162 raise error.ResponseError(
167 raise error.ResponseError(
163 _('unexpected response from remote server:'), l)
168 _('unexpected response from remote server:'), l)
164 repo.ui.status(_('%d files to transfer, %s of data\n') %
169 repo.ui.status(_('%d files to transfer, %s of data\n') %
165 (total_files, util.bytecount(total_bytes)))
170 (total_files, util.bytecount(total_bytes)))
166 handled_bytes = 0
171 handled_bytes = 0
167 repo.ui.progress(_('clone'), 0, total=total_bytes)
172 repo.ui.progress(_('clone'), 0, total=total_bytes)
168 start = time.time()
173 start = time.time()
169
174
170 tr = repo.transaction(_('clone'))
175 tr = repo.transaction(_('clone'))
171 try:
176 try:
172 for i in xrange(total_files):
177 for i in xrange(total_files):
173 # XXX doesn't support '\n' or '\r' in filenames
178 # XXX doesn't support '\n' or '\r' in filenames
174 l = fp.readline()
179 l = fp.readline()
175 try:
180 try:
176 name, size = l.split('\0', 1)
181 name, size = l.split('\0', 1)
177 size = int(size)
182 size = int(size)
178 except (ValueError, TypeError):
183 except (ValueError, TypeError):
179 raise error.ResponseError(
184 raise error.ResponseError(
180 _('unexpected response from remote server:'), l)
185 _('unexpected response from remote server:'), l)
181 if repo.ui.debugflag:
186 if repo.ui.debugflag:
182 repo.ui.debug('adding %s (%s)\n' %
187 repo.ui.debug('adding %s (%s)\n' %
183 (name, util.bytecount(size)))
188 (name, util.bytecount(size)))
184 # for backwards compat, name was partially encoded
189 # for backwards compat, name was partially encoded
185 ofp = repo.svfs(store.decodedir(name), 'w')
190 ofp = repo.svfs(store.decodedir(name), 'w')
186 for chunk in util.filechunkiter(fp, limit=size):
191 for chunk in util.filechunkiter(fp, limit=size):
187 handled_bytes += len(chunk)
192 handled_bytes += len(chunk)
188 repo.ui.progress(_('clone'), handled_bytes,
193 repo.ui.progress(_('clone'), handled_bytes,
189 total=total_bytes)
194 total=total_bytes)
190 ofp.write(chunk)
195 ofp.write(chunk)
191 ofp.close()
196 ofp.close()
192 tr.close()
197 tr.close()
193 finally:
198 finally:
194 tr.release()
199 tr.release()
195
200
196 # Writing straight to files circumvented the inmemory caches
201 # Writing straight to files circumvented the inmemory caches
197 repo.invalidate()
202 repo.invalidate()
198
203
199 elapsed = time.time() - start
204 elapsed = time.time() - start
200 if elapsed <= 0:
205 if elapsed <= 0:
201 elapsed = 0.001
206 elapsed = 0.001
202 repo.ui.progress(_('clone'), None)
207 repo.ui.progress(_('clone'), None)
203 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
208 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
204 (util.bytecount(total_bytes), elapsed,
209 (util.bytecount(total_bytes), elapsed,
205 util.bytecount(total_bytes / elapsed)))
210 util.bytecount(total_bytes / elapsed)))
206 finally:
211 finally:
207 lock.release()
212 lock.release()
208
213
209 def streamin(repo, remote, remotereqs):
214 def streamin(repo, remote, remotereqs):
210 # Save remote branchmap. We will use it later
215 # Save remote branchmap. We will use it later
211 # to speed up branchcache creation
216 # to speed up branchcache creation
212 rbranchmap = None
217 rbranchmap = None
213 if remote.capable("branchmap"):
218 if remote.capable("branchmap"):
214 rbranchmap = remote.branchmap()
219 rbranchmap = remote.branchmap()
215
220
216 fp = remote.stream_out()
221 fp = remote.stream_out()
217 l = fp.readline()
222 l = fp.readline()
218 try:
223 try:
219 resp = int(l)
224 resp = int(l)
220 except ValueError:
225 except ValueError:
221 raise error.ResponseError(
226 raise error.ResponseError(
222 _('unexpected response from remote server:'), l)
227 _('unexpected response from remote server:'), l)
223 if resp == 1:
228 if resp == 1:
224 raise util.Abort(_('operation forbidden by server'))
229 raise util.Abort(_('operation forbidden by server'))
225 elif resp == 2:
230 elif resp == 2:
226 raise util.Abort(_('locking the remote repository failed'))
231 raise util.Abort(_('locking the remote repository failed'))
227 elif resp != 0:
232 elif resp != 0:
228 raise util.Abort(_('the server sent an unknown error code'))
233 raise util.Abort(_('the server sent an unknown error code'))
229
234
230 applyremotedata(repo, remotereqs, rbranchmap, fp)
235 applyremotedata(repo, remotereqs, rbranchmap, fp)
231 return len(repo.heads()) + 1
236 return len(repo.heads()) + 1
232
237
233 def applyremotedata(repo, remotereqs, remotebranchmap, fp):
238 def applyremotedata(repo, remotereqs, remotebranchmap, fp):
234 """Apply stream clone data to a repository.
239 """Apply stream clone data to a repository.
235
240
236 "remotereqs" is a set of requirements to handle the incoming data.
241 "remotereqs" is a set of requirements to handle the incoming data.
237 "remotebranchmap" is the result of a branchmap lookup on the remote. It
242 "remotebranchmap" is the result of a branchmap lookup on the remote. It
238 can be None.
243 can be None.
239 "fp" is a file object containing the raw stream data, suitable for
244 "fp" is a file object containing the raw stream data, suitable for
240 feeding into consumev1().
245 feeding into consumev1().
241 """
246 """
242 lock = repo.lock()
247 lock = repo.lock()
243 try:
248 try:
244 consumev1(repo, fp)
249 consumev1(repo, fp)
245
250
246 # new requirements = old non-format requirements +
251 # new requirements = old non-format requirements +
247 # new format-related remote requirements
252 # new format-related remote requirements
248 # requirements from the streamed-in repository
253 # requirements from the streamed-in repository
249 repo.requirements = remotereqs | (
254 repo.requirements = remotereqs | (
250 repo.requirements - repo.supportedformats)
255 repo.requirements - repo.supportedformats)
251 repo._applyopenerreqs()
256 repo._applyopenerreqs()
252 repo._writerequirements()
257 repo._writerequirements()
253
258
254 if remotebranchmap:
259 if remotebranchmap:
255 rbheads = []
260 rbheads = []
256 closed = []
261 closed = []
257 for bheads in remotebranchmap.itervalues():
262 for bheads in remotebranchmap.itervalues():
258 rbheads.extend(bheads)
263 rbheads.extend(bheads)
259 for h in bheads:
264 for h in bheads:
260 r = repo.changelog.rev(h)
265 r = repo.changelog.rev(h)
261 b, c = repo.changelog.branchinfo(r)
266 b, c = repo.changelog.branchinfo(r)
262 if c:
267 if c:
263 closed.append(h)
268 closed.append(h)
264
269
265 if rbheads:
270 if rbheads:
266 rtiprev = max((int(repo.changelog.rev(node))
271 rtiprev = max((int(repo.changelog.rev(node))
267 for node in rbheads))
272 for node in rbheads))
268 cache = branchmap.branchcache(remotebranchmap,
273 cache = branchmap.branchcache(remotebranchmap,
269 repo[rtiprev].node(),
274 repo[rtiprev].node(),
270 rtiprev,
275 rtiprev,
271 closednodes=closed)
276 closednodes=closed)
272 # Try to stick it as low as possible
277 # Try to stick it as low as possible
273 # filter above served are unlikely to be fetch from a clone
278 # filter above served are unlikely to be fetch from a clone
274 for candidate in ('base', 'immutable', 'served'):
279 for candidate in ('base', 'immutable', 'served'):
275 rview = repo.filtered(candidate)
280 rview = repo.filtered(candidate)
276 if cache.validfor(rview):
281 if cache.validfor(rview):
277 repo._branchcaches[candidate] = cache
282 repo._branchcaches[candidate] = cache
278 cache.write(rview)
283 cache.write(rview)
279 break
284 break
280 repo.invalidate()
285 repo.invalidate()
281 finally:
286 finally:
282 lock.release()
287 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now