##// END OF EJS Templates
exchange: fix pyflakes import complaint
Matt Mackall -
r20974:ef377f2e default
parent child Browse files
Show More
@@ -1,648 +1,647 b''
1 # exchange.py - utily to exchange data between repo.
1 # exchange.py - utily to exchange data between repo.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import sys
9 from i18n import _
8 from i18n import _
10 from node import hex, nullid
9 from node import hex, nullid
11 import cStringIO
10 import cStringIO
12 import errno
11 import errno
13 import util, scmutil, changegroup, base85
12 import util, scmutil, changegroup, base85
14 import discovery, phases, obsolete, bookmarks, bundle2
13 import discovery, phases, obsolete, bookmarks, bundle2
15
14
16
15
17 class pushoperation(object):
16 class pushoperation(object):
18 """A object that represent a single push operation
17 """A object that represent a single push operation
19
18
20 It purpose is to carry push related state and very common operation.
19 It purpose is to carry push related state and very common operation.
21
20
22 A new should be created at the begining of each push and discarded
21 A new should be created at the begining of each push and discarded
23 afterward.
22 afterward.
24 """
23 """
25
24
26 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
25 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
27 # repo we push from
26 # repo we push from
28 self.repo = repo
27 self.repo = repo
29 self.ui = repo.ui
28 self.ui = repo.ui
30 # repo we push to
29 # repo we push to
31 self.remote = remote
30 self.remote = remote
32 # force option provided
31 # force option provided
33 self.force = force
32 self.force = force
34 # revs to be pushed (None is "all")
33 # revs to be pushed (None is "all")
35 self.revs = revs
34 self.revs = revs
36 # allow push of new branch
35 # allow push of new branch
37 self.newbranch = newbranch
36 self.newbranch = newbranch
38 # did a local lock get acquired?
37 # did a local lock get acquired?
39 self.locallocked = None
38 self.locallocked = None
40 # Integer version of the push result
39 # Integer version of the push result
41 # - None means nothing to push
40 # - None means nothing to push
42 # - 0 means HTTP error
41 # - 0 means HTTP error
43 # - 1 means we pushed and remote head count is unchanged *or*
42 # - 1 means we pushed and remote head count is unchanged *or*
44 # we have outgoing changesets but refused to push
43 # we have outgoing changesets but refused to push
45 # - other values as described by addchangegroup()
44 # - other values as described by addchangegroup()
46 self.ret = None
45 self.ret = None
47 # discover.outgoing object (contains common and outgoin data)
46 # discover.outgoing object (contains common and outgoin data)
48 self.outgoing = None
47 self.outgoing = None
49 # all remote heads before the push
48 # all remote heads before the push
50 self.remoteheads = None
49 self.remoteheads = None
51 # testable as a boolean indicating if any nodes are missing locally.
50 # testable as a boolean indicating if any nodes are missing locally.
52 self.incoming = None
51 self.incoming = None
53 # set of all heads common after changeset bundle push
52 # set of all heads common after changeset bundle push
54 self.commonheads = None
53 self.commonheads = None
55
54
56 def push(repo, remote, force=False, revs=None, newbranch=False):
55 def push(repo, remote, force=False, revs=None, newbranch=False):
57 '''Push outgoing changesets (limited by revs) from a local
56 '''Push outgoing changesets (limited by revs) from a local
58 repository to remote. Return an integer:
57 repository to remote. Return an integer:
59 - None means nothing to push
58 - None means nothing to push
60 - 0 means HTTP error
59 - 0 means HTTP error
61 - 1 means we pushed and remote head count is unchanged *or*
60 - 1 means we pushed and remote head count is unchanged *or*
62 we have outgoing changesets but refused to push
61 we have outgoing changesets but refused to push
63 - other values as described by addchangegroup()
62 - other values as described by addchangegroup()
64 '''
63 '''
65 pushop = pushoperation(repo, remote, force, revs, newbranch)
64 pushop = pushoperation(repo, remote, force, revs, newbranch)
66 if pushop.remote.local():
65 if pushop.remote.local():
67 missing = (set(pushop.repo.requirements)
66 missing = (set(pushop.repo.requirements)
68 - pushop.remote.local().supported)
67 - pushop.remote.local().supported)
69 if missing:
68 if missing:
70 msg = _("required features are not"
69 msg = _("required features are not"
71 " supported in the destination:"
70 " supported in the destination:"
72 " %s") % (', '.join(sorted(missing)))
71 " %s") % (', '.join(sorted(missing)))
73 raise util.Abort(msg)
72 raise util.Abort(msg)
74
73
75 # there are two ways to push to remote repo:
74 # there are two ways to push to remote repo:
76 #
75 #
77 # addchangegroup assumes local user can lock remote
76 # addchangegroup assumes local user can lock remote
78 # repo (local filesystem, old ssh servers).
77 # repo (local filesystem, old ssh servers).
79 #
78 #
80 # unbundle assumes local user cannot lock remote repo (new ssh
79 # unbundle assumes local user cannot lock remote repo (new ssh
81 # servers, http servers).
80 # servers, http servers).
82
81
83 if not pushop.remote.canpush():
82 if not pushop.remote.canpush():
84 raise util.Abort(_("destination does not support push"))
83 raise util.Abort(_("destination does not support push"))
85 # get local lock as we might write phase data
84 # get local lock as we might write phase data
86 locallock = None
85 locallock = None
87 try:
86 try:
88 locallock = pushop.repo.lock()
87 locallock = pushop.repo.lock()
89 pushop.locallocked = True
88 pushop.locallocked = True
90 except IOError, err:
89 except IOError, err:
91 pushop.locallocked = False
90 pushop.locallocked = False
92 if err.errno != errno.EACCES:
91 if err.errno != errno.EACCES:
93 raise
92 raise
94 # source repo cannot be locked.
93 # source repo cannot be locked.
95 # We do not abort the push, but just disable the local phase
94 # We do not abort the push, but just disable the local phase
96 # synchronisation.
95 # synchronisation.
97 msg = 'cannot lock source repository: %s\n' % err
96 msg = 'cannot lock source repository: %s\n' % err
98 pushop.ui.debug(msg)
97 pushop.ui.debug(msg)
99 try:
98 try:
100 pushop.repo.checkpush(pushop)
99 pushop.repo.checkpush(pushop)
101 lock = None
100 lock = None
102 unbundle = pushop.remote.capable('unbundle')
101 unbundle = pushop.remote.capable('unbundle')
103 if not unbundle:
102 if not unbundle:
104 lock = pushop.remote.lock()
103 lock = pushop.remote.lock()
105 try:
104 try:
106 _pushdiscovery(pushop)
105 _pushdiscovery(pushop)
107 if _pushcheckoutgoing(pushop):
106 if _pushcheckoutgoing(pushop):
108 _pushchangeset(pushop)
107 _pushchangeset(pushop)
109 _pushcomputecommonheads(pushop)
108 _pushcomputecommonheads(pushop)
110 _pushsyncphase(pushop)
109 _pushsyncphase(pushop)
111 _pushobsolete(pushop)
110 _pushobsolete(pushop)
112 finally:
111 finally:
113 if lock is not None:
112 if lock is not None:
114 lock.release()
113 lock.release()
115 finally:
114 finally:
116 if locallock is not None:
115 if locallock is not None:
117 locallock.release()
116 locallock.release()
118
117
119 _pushbookmark(pushop)
118 _pushbookmark(pushop)
120 return pushop.ret
119 return pushop.ret
121
120
122 def _pushdiscovery(pushop):
121 def _pushdiscovery(pushop):
123 # discovery
122 # discovery
124 unfi = pushop.repo.unfiltered()
123 unfi = pushop.repo.unfiltered()
125 fci = discovery.findcommonincoming
124 fci = discovery.findcommonincoming
126 commoninc = fci(unfi, pushop.remote, force=pushop.force)
125 commoninc = fci(unfi, pushop.remote, force=pushop.force)
127 common, inc, remoteheads = commoninc
126 common, inc, remoteheads = commoninc
128 fco = discovery.findcommonoutgoing
127 fco = discovery.findcommonoutgoing
129 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
128 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
130 commoninc=commoninc, force=pushop.force)
129 commoninc=commoninc, force=pushop.force)
131 pushop.outgoing = outgoing
130 pushop.outgoing = outgoing
132 pushop.remoteheads = remoteheads
131 pushop.remoteheads = remoteheads
133 pushop.incoming = inc
132 pushop.incoming = inc
134
133
135 def _pushcheckoutgoing(pushop):
134 def _pushcheckoutgoing(pushop):
136 outgoing = pushop.outgoing
135 outgoing = pushop.outgoing
137 unfi = pushop.repo.unfiltered()
136 unfi = pushop.repo.unfiltered()
138 if not outgoing.missing:
137 if not outgoing.missing:
139 # nothing to push
138 # nothing to push
140 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
139 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
141 return False
140 return False
142 # something to push
141 # something to push
143 if not pushop.force:
142 if not pushop.force:
144 # if repo.obsstore == False --> no obsolete
143 # if repo.obsstore == False --> no obsolete
145 # then, save the iteration
144 # then, save the iteration
146 if unfi.obsstore:
145 if unfi.obsstore:
147 # this message are here for 80 char limit reason
146 # this message are here for 80 char limit reason
148 mso = _("push includes obsolete changeset: %s!")
147 mso = _("push includes obsolete changeset: %s!")
149 mst = "push includes %s changeset: %s!"
148 mst = "push includes %s changeset: %s!"
150 # plain versions for i18n tool to detect them
149 # plain versions for i18n tool to detect them
151 _("push includes unstable changeset: %s!")
150 _("push includes unstable changeset: %s!")
152 _("push includes bumped changeset: %s!")
151 _("push includes bumped changeset: %s!")
153 _("push includes divergent changeset: %s!")
152 _("push includes divergent changeset: %s!")
154 # If we are to push if there is at least one
153 # If we are to push if there is at least one
155 # obsolete or unstable changeset in missing, at
154 # obsolete or unstable changeset in missing, at
156 # least one of the missinghead will be obsolete or
155 # least one of the missinghead will be obsolete or
157 # unstable. So checking heads only is ok
156 # unstable. So checking heads only is ok
158 for node in outgoing.missingheads:
157 for node in outgoing.missingheads:
159 ctx = unfi[node]
158 ctx = unfi[node]
160 if ctx.obsolete():
159 if ctx.obsolete():
161 raise util.Abort(mso % ctx)
160 raise util.Abort(mso % ctx)
162 elif ctx.troubled():
161 elif ctx.troubled():
163 raise util.Abort(_(mst)
162 raise util.Abort(_(mst)
164 % (ctx.troubles()[0],
163 % (ctx.troubles()[0],
165 ctx))
164 ctx))
166 newbm = pushop.ui.configlist('bookmarks', 'pushing')
165 newbm = pushop.ui.configlist('bookmarks', 'pushing')
167 discovery.checkheads(unfi, pushop.remote, outgoing,
166 discovery.checkheads(unfi, pushop.remote, outgoing,
168 pushop.remoteheads,
167 pushop.remoteheads,
169 pushop.newbranch,
168 pushop.newbranch,
170 bool(pushop.incoming),
169 bool(pushop.incoming),
171 newbm)
170 newbm)
172 return True
171 return True
173
172
174 def _pushchangeset(pushop):
173 def _pushchangeset(pushop):
175 """Make the actual push of changeset bundle to remote repo"""
174 """Make the actual push of changeset bundle to remote repo"""
176 outgoing = pushop.outgoing
175 outgoing = pushop.outgoing
177 unbundle = pushop.remote.capable('unbundle')
176 unbundle = pushop.remote.capable('unbundle')
178 # TODO: get bundlecaps from remote
177 # TODO: get bundlecaps from remote
179 bundlecaps = None
178 bundlecaps = None
180 # create a changegroup from local
179 # create a changegroup from local
181 if pushop.revs is None and not (outgoing.excluded
180 if pushop.revs is None and not (outgoing.excluded
182 or pushop.repo.changelog.filteredrevs):
181 or pushop.repo.changelog.filteredrevs):
183 # push everything,
182 # push everything,
184 # use the fast path, no race possible on push
183 # use the fast path, no race possible on push
185 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
184 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
186 cg = changegroup.getsubset(pushop.repo,
185 cg = changegroup.getsubset(pushop.repo,
187 outgoing,
186 outgoing,
188 bundler,
187 bundler,
189 'push',
188 'push',
190 fastpath=True)
189 fastpath=True)
191 else:
190 else:
192 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
191 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
193 bundlecaps)
192 bundlecaps)
194
193
195 # apply changegroup to remote
194 # apply changegroup to remote
196 if unbundle:
195 if unbundle:
197 # local repo finds heads on server, finds out what
196 # local repo finds heads on server, finds out what
198 # revs it must push. once revs transferred, if server
197 # revs it must push. once revs transferred, if server
199 # finds it has different heads (someone else won
198 # finds it has different heads (someone else won
200 # commit/push race), server aborts.
199 # commit/push race), server aborts.
201 if pushop.force:
200 if pushop.force:
202 remoteheads = ['force']
201 remoteheads = ['force']
203 else:
202 else:
204 remoteheads = pushop.remoteheads
203 remoteheads = pushop.remoteheads
205 # ssh: return remote's addchangegroup()
204 # ssh: return remote's addchangegroup()
206 # http: return remote's addchangegroup() or 0 for error
205 # http: return remote's addchangegroup() or 0 for error
207 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
206 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
208 'push')
207 'push')
209 else:
208 else:
210 # we return an integer indicating remote head count
209 # we return an integer indicating remote head count
211 # change
210 # change
212 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
211 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
213
212
214 def _pushcomputecommonheads(pushop):
213 def _pushcomputecommonheads(pushop):
215 unfi = pushop.repo.unfiltered()
214 unfi = pushop.repo.unfiltered()
216 if pushop.ret:
215 if pushop.ret:
217 # push succeed, synchronize target of the push
216 # push succeed, synchronize target of the push
218 cheads = pushop.outgoing.missingheads
217 cheads = pushop.outgoing.missingheads
219 elif pushop.revs is None:
218 elif pushop.revs is None:
220 # All out push fails. synchronize all common
219 # All out push fails. synchronize all common
221 cheads = pushop.outgoing.commonheads
220 cheads = pushop.outgoing.commonheads
222 else:
221 else:
223 # I want cheads = heads(::missingheads and ::commonheads)
222 # I want cheads = heads(::missingheads and ::commonheads)
224 # (missingheads is revs with secret changeset filtered out)
223 # (missingheads is revs with secret changeset filtered out)
225 #
224 #
226 # This can be expressed as:
225 # This can be expressed as:
227 # cheads = ( (missingheads and ::commonheads)
226 # cheads = ( (missingheads and ::commonheads)
228 # + (commonheads and ::missingheads))"
227 # + (commonheads and ::missingheads))"
229 # )
228 # )
230 #
229 #
231 # while trying to push we already computed the following:
230 # while trying to push we already computed the following:
232 # common = (::commonheads)
231 # common = (::commonheads)
233 # missing = ((commonheads::missingheads) - commonheads)
232 # missing = ((commonheads::missingheads) - commonheads)
234 #
233 #
235 # We can pick:
234 # We can pick:
236 # * missingheads part of common (::commonheads)
235 # * missingheads part of common (::commonheads)
237 common = set(pushop.outgoing.common)
236 common = set(pushop.outgoing.common)
238 nm = pushop.repo.changelog.nodemap
237 nm = pushop.repo.changelog.nodemap
239 cheads = [node for node in pushop.revs if nm[node] in common]
238 cheads = [node for node in pushop.revs if nm[node] in common]
240 # and
239 # and
241 # * commonheads parents on missing
240 # * commonheads parents on missing
242 revset = unfi.set('%ln and parents(roots(%ln))',
241 revset = unfi.set('%ln and parents(roots(%ln))',
243 pushop.outgoing.commonheads,
242 pushop.outgoing.commonheads,
244 pushop.outgoing.missing)
243 pushop.outgoing.missing)
245 cheads.extend(c.node() for c in revset)
244 cheads.extend(c.node() for c in revset)
246 pushop.commonheads = cheads
245 pushop.commonheads = cheads
247
246
248 def _pushsyncphase(pushop):
247 def _pushsyncphase(pushop):
249 """synchronise phase information locally and remotly"""
248 """synchronise phase information locally and remotly"""
250 unfi = pushop.repo.unfiltered()
249 unfi = pushop.repo.unfiltered()
251 cheads = pushop.commonheads
250 cheads = pushop.commonheads
252 if pushop.ret:
251 if pushop.ret:
253 # push succeed, synchronize target of the push
252 # push succeed, synchronize target of the push
254 cheads = pushop.outgoing.missingheads
253 cheads = pushop.outgoing.missingheads
255 elif pushop.revs is None:
254 elif pushop.revs is None:
256 # All out push fails. synchronize all common
255 # All out push fails. synchronize all common
257 cheads = pushop.outgoing.commonheads
256 cheads = pushop.outgoing.commonheads
258 else:
257 else:
259 # I want cheads = heads(::missingheads and ::commonheads)
258 # I want cheads = heads(::missingheads and ::commonheads)
260 # (missingheads is revs with secret changeset filtered out)
259 # (missingheads is revs with secret changeset filtered out)
261 #
260 #
262 # This can be expressed as:
261 # This can be expressed as:
263 # cheads = ( (missingheads and ::commonheads)
262 # cheads = ( (missingheads and ::commonheads)
264 # + (commonheads and ::missingheads))"
263 # + (commonheads and ::missingheads))"
265 # )
264 # )
266 #
265 #
267 # while trying to push we already computed the following:
266 # while trying to push we already computed the following:
268 # common = (::commonheads)
267 # common = (::commonheads)
269 # missing = ((commonheads::missingheads) - commonheads)
268 # missing = ((commonheads::missingheads) - commonheads)
270 #
269 #
271 # We can pick:
270 # We can pick:
272 # * missingheads part of common (::commonheads)
271 # * missingheads part of common (::commonheads)
273 common = set(pushop.outgoing.common)
272 common = set(pushop.outgoing.common)
274 nm = pushop.repo.changelog.nodemap
273 nm = pushop.repo.changelog.nodemap
275 cheads = [node for node in pushop.revs if nm[node] in common]
274 cheads = [node for node in pushop.revs if nm[node] in common]
276 # and
275 # and
277 # * commonheads parents on missing
276 # * commonheads parents on missing
278 revset = unfi.set('%ln and parents(roots(%ln))',
277 revset = unfi.set('%ln and parents(roots(%ln))',
279 pushop.outgoing.commonheads,
278 pushop.outgoing.commonheads,
280 pushop.outgoing.missing)
279 pushop.outgoing.missing)
281 cheads.extend(c.node() for c in revset)
280 cheads.extend(c.node() for c in revset)
282 pushop.commonheads = cheads
281 pushop.commonheads = cheads
283 # even when we don't push, exchanging phase data is useful
282 # even when we don't push, exchanging phase data is useful
284 remotephases = pushop.remote.listkeys('phases')
283 remotephases = pushop.remote.listkeys('phases')
285 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
284 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
286 and remotephases # server supports phases
285 and remotephases # server supports phases
287 and pushop.ret is None # nothing was pushed
286 and pushop.ret is None # nothing was pushed
288 and remotephases.get('publishing', False)):
287 and remotephases.get('publishing', False)):
289 # When:
288 # When:
290 # - this is a subrepo push
289 # - this is a subrepo push
291 # - and remote support phase
290 # - and remote support phase
292 # - and no changeset was pushed
291 # - and no changeset was pushed
293 # - and remote is publishing
292 # - and remote is publishing
294 # We may be in issue 3871 case!
293 # We may be in issue 3871 case!
295 # We drop the possible phase synchronisation done by
294 # We drop the possible phase synchronisation done by
296 # courtesy to publish changesets possibly locally draft
295 # courtesy to publish changesets possibly locally draft
297 # on the remote.
296 # on the remote.
298 remotephases = {'publishing': 'True'}
297 remotephases = {'publishing': 'True'}
299 if not remotephases: # old server or public only rer
298 if not remotephases: # old server or public only rer
300 _localphasemove(pushop, cheads)
299 _localphasemove(pushop, cheads)
301 # don't push any phase data as there is nothing to push
300 # don't push any phase data as there is nothing to push
302 else:
301 else:
303 ana = phases.analyzeremotephases(pushop.repo, cheads,
302 ana = phases.analyzeremotephases(pushop.repo, cheads,
304 remotephases)
303 remotephases)
305 pheads, droots = ana
304 pheads, droots = ana
306 ### Apply remote phase on local
305 ### Apply remote phase on local
307 if remotephases.get('publishing', False):
306 if remotephases.get('publishing', False):
308 _localphasemove(pushop, cheads)
307 _localphasemove(pushop, cheads)
309 else: # publish = False
308 else: # publish = False
310 _localphasemove(pushop, pheads)
309 _localphasemove(pushop, pheads)
311 _localphasemove(pushop, cheads, phases.draft)
310 _localphasemove(pushop, cheads, phases.draft)
312 ### Apply local phase on remote
311 ### Apply local phase on remote
313
312
314 # Get the list of all revs draft on remote by public here.
313 # Get the list of all revs draft on remote by public here.
315 # XXX Beware that revset break if droots is not strictly
314 # XXX Beware that revset break if droots is not strictly
316 # XXX root we may want to ensure it is but it is costly
315 # XXX root we may want to ensure it is but it is costly
317 outdated = unfi.set('heads((%ln::%ln) and public())',
316 outdated = unfi.set('heads((%ln::%ln) and public())',
318 droots, cheads)
317 droots, cheads)
319 for newremotehead in outdated:
318 for newremotehead in outdated:
320 r = pushop.remote.pushkey('phases',
319 r = pushop.remote.pushkey('phases',
321 newremotehead.hex(),
320 newremotehead.hex(),
322 str(phases.draft),
321 str(phases.draft),
323 str(phases.public))
322 str(phases.public))
324 if not r:
323 if not r:
325 pushop.ui.warn(_('updating %s to public failed!\n')
324 pushop.ui.warn(_('updating %s to public failed!\n')
326 % newremotehead)
325 % newremotehead)
327
326
328 def _localphasemove(pushop, nodes, phase=phases.public):
327 def _localphasemove(pushop, nodes, phase=phases.public):
329 """move <nodes> to <phase> in the local source repo"""
328 """move <nodes> to <phase> in the local source repo"""
330 if pushop.locallocked:
329 if pushop.locallocked:
331 phases.advanceboundary(pushop.repo, phase, nodes)
330 phases.advanceboundary(pushop.repo, phase, nodes)
332 else:
331 else:
333 # repo is not locked, do not change any phases!
332 # repo is not locked, do not change any phases!
334 # Informs the user that phases should have been moved when
333 # Informs the user that phases should have been moved when
335 # applicable.
334 # applicable.
336 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
335 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
337 phasestr = phases.phasenames[phase]
336 phasestr = phases.phasenames[phase]
338 if actualmoves:
337 if actualmoves:
339 pushop.ui.status(_('cannot lock source repo, skipping '
338 pushop.ui.status(_('cannot lock source repo, skipping '
340 'local %s phase update\n') % phasestr)
339 'local %s phase update\n') % phasestr)
341
340
342 def _pushobsolete(pushop):
341 def _pushobsolete(pushop):
343 """utility function to push obsolete markers to a remote"""
342 """utility function to push obsolete markers to a remote"""
344 pushop.ui.debug('try to push obsolete markers to remote\n')
343 pushop.ui.debug('try to push obsolete markers to remote\n')
345 repo = pushop.repo
344 repo = pushop.repo
346 remote = pushop.remote
345 remote = pushop.remote
347 if (obsolete._enabled and repo.obsstore and
346 if (obsolete._enabled and repo.obsstore and
348 'obsolete' in remote.listkeys('namespaces')):
347 'obsolete' in remote.listkeys('namespaces')):
349 rslts = []
348 rslts = []
350 remotedata = repo.listkeys('obsolete')
349 remotedata = repo.listkeys('obsolete')
351 for key in sorted(remotedata, reverse=True):
350 for key in sorted(remotedata, reverse=True):
352 # reverse sort to ensure we end with dump0
351 # reverse sort to ensure we end with dump0
353 data = remotedata[key]
352 data = remotedata[key]
354 rslts.append(remote.pushkey('obsolete', key, '', data))
353 rslts.append(remote.pushkey('obsolete', key, '', data))
355 if [r for r in rslts if not r]:
354 if [r for r in rslts if not r]:
356 msg = _('failed to push some obsolete markers!\n')
355 msg = _('failed to push some obsolete markers!\n')
357 repo.ui.warn(msg)
356 repo.ui.warn(msg)
358
357
359 def _pushbookmark(pushop):
358 def _pushbookmark(pushop):
360 """Update bookmark position on remote"""
359 """Update bookmark position on remote"""
361 ui = pushop.ui
360 ui = pushop.ui
362 repo = pushop.repo.unfiltered()
361 repo = pushop.repo.unfiltered()
363 remote = pushop.remote
362 remote = pushop.remote
364 ui.debug("checking for updated bookmarks\n")
363 ui.debug("checking for updated bookmarks\n")
365 revnums = map(repo.changelog.rev, pushop.revs or [])
364 revnums = map(repo.changelog.rev, pushop.revs or [])
366 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
365 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
367 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
366 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
368 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
367 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
369 srchex=hex)
368 srchex=hex)
370
369
371 for b, scid, dcid in advsrc:
370 for b, scid, dcid in advsrc:
372 if ancestors and repo[scid].rev() not in ancestors:
371 if ancestors and repo[scid].rev() not in ancestors:
373 continue
372 continue
374 if remote.pushkey('bookmarks', b, dcid, scid):
373 if remote.pushkey('bookmarks', b, dcid, scid):
375 ui.status(_("updating bookmark %s\n") % b)
374 ui.status(_("updating bookmark %s\n") % b)
376 else:
375 else:
377 ui.warn(_('updating bookmark %s failed!\n') % b)
376 ui.warn(_('updating bookmark %s failed!\n') % b)
378
377
379 class pulloperation(object):
378 class pulloperation(object):
380 """A object that represent a single pull operation
379 """A object that represent a single pull operation
381
380
382 It purpose is to carry push related state and very common operation.
381 It purpose is to carry push related state and very common operation.
383
382
384 A new should be created at the begining of each pull and discarded
383 A new should be created at the begining of each pull and discarded
385 afterward.
384 afterward.
386 """
385 """
387
386
388 def __init__(self, repo, remote, heads=None, force=False):
387 def __init__(self, repo, remote, heads=None, force=False):
389 # repo we pull into
388 # repo we pull into
390 self.repo = repo
389 self.repo = repo
391 # repo we pull from
390 # repo we pull from
392 self.remote = remote
391 self.remote = remote
393 # revision we try to pull (None is "all")
392 # revision we try to pull (None is "all")
394 self.heads = heads
393 self.heads = heads
395 # do we force pull?
394 # do we force pull?
396 self.force = force
395 self.force = force
397 # the name the pull transaction
396 # the name the pull transaction
398 self._trname = 'pull\n' + util.hidepassword(remote.url())
397 self._trname = 'pull\n' + util.hidepassword(remote.url())
399 # hold the transaction once created
398 # hold the transaction once created
400 self._tr = None
399 self._tr = None
401 # set of common changeset between local and remote before pull
400 # set of common changeset between local and remote before pull
402 self.common = None
401 self.common = None
403 # set of pulled head
402 # set of pulled head
404 self.rheads = None
403 self.rheads = None
405 # list of missing changeset to fetch remotly
404 # list of missing changeset to fetch remotly
406 self.fetch = None
405 self.fetch = None
407 # result of changegroup pulling (used as returng code by pull)
406 # result of changegroup pulling (used as returng code by pull)
408 self.cgresult = None
407 self.cgresult = None
409 # list of step remaining todo (related to future bundle2 usage)
408 # list of step remaining todo (related to future bundle2 usage)
410 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
409 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
411
410
412 @util.propertycache
411 @util.propertycache
413 def pulledsubset(self):
412 def pulledsubset(self):
414 """heads of the set of changeset target by the pull"""
413 """heads of the set of changeset target by the pull"""
415 # compute target subset
414 # compute target subset
416 if self.heads is None:
415 if self.heads is None:
417 # We pulled every thing possible
416 # We pulled every thing possible
418 # sync on everything common
417 # sync on everything common
419 c = set(self.common)
418 c = set(self.common)
420 ret = list(self.common)
419 ret = list(self.common)
421 for n in self.rheads:
420 for n in self.rheads:
422 if n not in c:
421 if n not in c:
423 ret.append(n)
422 ret.append(n)
424 return ret
423 return ret
425 else:
424 else:
426 # We pulled a specific subset
425 # We pulled a specific subset
427 # sync on this subset
426 # sync on this subset
428 return self.heads
427 return self.heads
429
428
430 def gettransaction(self):
429 def gettransaction(self):
431 """get appropriate pull transaction, creating it if needed"""
430 """get appropriate pull transaction, creating it if needed"""
432 if self._tr is None:
431 if self._tr is None:
433 self._tr = self.repo.transaction(self._trname)
432 self._tr = self.repo.transaction(self._trname)
434 return self._tr
433 return self._tr
435
434
436 def closetransaction(self):
435 def closetransaction(self):
437 """close transaction if created"""
436 """close transaction if created"""
438 if self._tr is not None:
437 if self._tr is not None:
439 self._tr.close()
438 self._tr.close()
440
439
441 def releasetransaction(self):
440 def releasetransaction(self):
442 """release transaction if created"""
441 """release transaction if created"""
443 if self._tr is not None:
442 if self._tr is not None:
444 self._tr.release()
443 self._tr.release()
445
444
446 def pull(repo, remote, heads=None, force=False):
445 def pull(repo, remote, heads=None, force=False):
447 pullop = pulloperation(repo, remote, heads, force)
446 pullop = pulloperation(repo, remote, heads, force)
448 if pullop.remote.local():
447 if pullop.remote.local():
449 missing = set(pullop.remote.requirements) - pullop.repo.supported
448 missing = set(pullop.remote.requirements) - pullop.repo.supported
450 if missing:
449 if missing:
451 msg = _("required features are not"
450 msg = _("required features are not"
452 " supported in the destination:"
451 " supported in the destination:"
453 " %s") % (', '.join(sorted(missing)))
452 " %s") % (', '.join(sorted(missing)))
454 raise util.Abort(msg)
453 raise util.Abort(msg)
455
454
456 lock = pullop.repo.lock()
455 lock = pullop.repo.lock()
457 try:
456 try:
458 _pulldiscovery(pullop)
457 _pulldiscovery(pullop)
459 if pullop.remote.capable('bundle2'):
458 if pullop.remote.capable('bundle2'):
460 _pullbundle2(pullop)
459 _pullbundle2(pullop)
461 if 'changegroup' in pullop.todosteps:
460 if 'changegroup' in pullop.todosteps:
462 _pullchangeset(pullop)
461 _pullchangeset(pullop)
463 if 'phases' in pullop.todosteps:
462 if 'phases' in pullop.todosteps:
464 _pullphase(pullop)
463 _pullphase(pullop)
465 if 'obsmarkers' in pullop.todosteps:
464 if 'obsmarkers' in pullop.todosteps:
466 _pullobsolete(pullop)
465 _pullobsolete(pullop)
467 pullop.closetransaction()
466 pullop.closetransaction()
468 finally:
467 finally:
469 pullop.releasetransaction()
468 pullop.releasetransaction()
470 lock.release()
469 lock.release()
471
470
472 return pullop.cgresult
471 return pullop.cgresult
473
472
474 def _pulldiscovery(pullop):
473 def _pulldiscovery(pullop):
475 """discovery phase for the pull
474 """discovery phase for the pull
476
475
477 Current handle changeset discovery only, will change handle all discovery
476 Current handle changeset discovery only, will change handle all discovery
478 at some point."""
477 at some point."""
479 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
478 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
480 pullop.remote,
479 pullop.remote,
481 heads=pullop.heads,
480 heads=pullop.heads,
482 force=pullop.force)
481 force=pullop.force)
483 pullop.common, pullop.fetch, pullop.rheads = tmp
482 pullop.common, pullop.fetch, pullop.rheads = tmp
484
483
485 def _pullbundle2(pullop):
484 def _pullbundle2(pullop):
486 """pull data using bundle2
485 """pull data using bundle2
487
486
488 For now, the only supported data are changegroup."""
487 For now, the only supported data are changegroup."""
489 kwargs = {'bundlecaps': set(['HG20'])}
488 kwargs = {'bundlecaps': set(['HG20'])}
490 # pulling changegroup
489 # pulling changegroup
491 pullop.todosteps.remove('changegroup')
490 pullop.todosteps.remove('changegroup')
492 if not pullop.fetch:
491 if not pullop.fetch:
493 pullop.repo.ui.status(_("no changes found\n"))
492 pullop.repo.ui.status(_("no changes found\n"))
494 pullop.cgresult = 0
493 pullop.cgresult = 0
495 else:
494 else:
496 kwargs['common'] = pullop.common
495 kwargs['common'] = pullop.common
497 kwargs['heads'] = pullop.heads or pullop.rheads
496 kwargs['heads'] = pullop.heads or pullop.rheads
498 if pullop.heads is None and list(pullop.common) == [nullid]:
497 if pullop.heads is None and list(pullop.common) == [nullid]:
499 pullop.repo.ui.status(_("requesting all changes\n"))
498 pullop.repo.ui.status(_("requesting all changes\n"))
500 if kwargs.keys() == ['format']:
499 if kwargs.keys() == ['format']:
501 return # nothing to pull
500 return # nothing to pull
502 bundle = pullop.remote.getbundle('pull', **kwargs)
501 bundle = pullop.remote.getbundle('pull', **kwargs)
503 try:
502 try:
504 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
503 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
505 except KeyError, exc:
504 except KeyError, exc:
506 raise util.Abort('missing support for %s' % exc)
505 raise util.Abort('missing support for %s' % exc)
507 assert len(op.records['changegroup']) == 1
506 assert len(op.records['changegroup']) == 1
508 pullop.cgresult = op.records['changegroup'][0]['return']
507 pullop.cgresult = op.records['changegroup'][0]['return']
509
508
510 def _pullchangeset(pullop):
509 def _pullchangeset(pullop):
511 """pull changeset from unbundle into the local repo"""
510 """pull changeset from unbundle into the local repo"""
512 # We delay the open of the transaction as late as possible so we
511 # We delay the open of the transaction as late as possible so we
513 # don't open transaction for nothing or you break future useful
512 # don't open transaction for nothing or you break future useful
514 # rollback call
513 # rollback call
515 pullop.todosteps.remove('changegroup')
514 pullop.todosteps.remove('changegroup')
516 if not pullop.fetch:
515 if not pullop.fetch:
517 pullop.repo.ui.status(_("no changes found\n"))
516 pullop.repo.ui.status(_("no changes found\n"))
518 pullop.cgresult = 0
517 pullop.cgresult = 0
519 return
518 return
520 pullop.gettransaction()
519 pullop.gettransaction()
521 if pullop.heads is None and list(pullop.common) == [nullid]:
520 if pullop.heads is None and list(pullop.common) == [nullid]:
522 pullop.repo.ui.status(_("requesting all changes\n"))
521 pullop.repo.ui.status(_("requesting all changes\n"))
523 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
522 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
524 # issue1320, avoid a race if remote changed after discovery
523 # issue1320, avoid a race if remote changed after discovery
525 pullop.heads = pullop.rheads
524 pullop.heads = pullop.rheads
526
525
527 if pullop.remote.capable('getbundle'):
526 if pullop.remote.capable('getbundle'):
528 # TODO: get bundlecaps from remote
527 # TODO: get bundlecaps from remote
529 cg = pullop.remote.getbundle('pull', common=pullop.common,
528 cg = pullop.remote.getbundle('pull', common=pullop.common,
530 heads=pullop.heads or pullop.rheads)
529 heads=pullop.heads or pullop.rheads)
531 elif pullop.heads is None:
530 elif pullop.heads is None:
532 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
531 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
533 elif not pullop.remote.capable('changegroupsubset'):
532 elif not pullop.remote.capable('changegroupsubset'):
534 raise util.Abort(_("partial pull cannot be done because "
533 raise util.Abort(_("partial pull cannot be done because "
535 "other repository doesn't support "
534 "other repository doesn't support "
536 "changegroupsubset."))
535 "changegroupsubset."))
537 else:
536 else:
538 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
537 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
539 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
538 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
540 pullop.remote.url())
539 pullop.remote.url())
541
540
542 def _pullphase(pullop):
541 def _pullphase(pullop):
543 # Get remote phases data from remote
542 # Get remote phases data from remote
544 pullop.todosteps.remove('phases')
543 pullop.todosteps.remove('phases')
545 remotephases = pullop.remote.listkeys('phases')
544 remotephases = pullop.remote.listkeys('phases')
546 publishing = bool(remotephases.get('publishing', False))
545 publishing = bool(remotephases.get('publishing', False))
547 if remotephases and not publishing:
546 if remotephases and not publishing:
548 # remote is new and unpublishing
547 # remote is new and unpublishing
549 pheads, _dr = phases.analyzeremotephases(pullop.repo,
548 pheads, _dr = phases.analyzeremotephases(pullop.repo,
550 pullop.pulledsubset,
549 pullop.pulledsubset,
551 remotephases)
550 remotephases)
552 phases.advanceboundary(pullop.repo, phases.public, pheads)
551 phases.advanceboundary(pullop.repo, phases.public, pheads)
553 phases.advanceboundary(pullop.repo, phases.draft,
552 phases.advanceboundary(pullop.repo, phases.draft,
554 pullop.pulledsubset)
553 pullop.pulledsubset)
555 else:
554 else:
556 # Remote is old or publishing all common changesets
555 # Remote is old or publishing all common changesets
557 # should be seen as public
556 # should be seen as public
558 phases.advanceboundary(pullop.repo, phases.public,
557 phases.advanceboundary(pullop.repo, phases.public,
559 pullop.pulledsubset)
558 pullop.pulledsubset)
560
559
561 def _pullobsolete(pullop):
560 def _pullobsolete(pullop):
562 """utility function to pull obsolete markers from a remote
561 """utility function to pull obsolete markers from a remote
563
562
564 The `gettransaction` is function that return the pull transaction, creating
563 The `gettransaction` is function that return the pull transaction, creating
565 one if necessary. We return the transaction to inform the calling code that
564 one if necessary. We return the transaction to inform the calling code that
566 a new transaction have been created (when applicable).
565 a new transaction have been created (when applicable).
567
566
568 Exists mostly to allow overriding for experimentation purpose"""
567 Exists mostly to allow overriding for experimentation purpose"""
569 pullop.todosteps.remove('obsmarkers')
568 pullop.todosteps.remove('obsmarkers')
570 tr = None
569 tr = None
571 if obsolete._enabled:
570 if obsolete._enabled:
572 pullop.repo.ui.debug('fetching remote obsolete markers\n')
571 pullop.repo.ui.debug('fetching remote obsolete markers\n')
573 remoteobs = pullop.remote.listkeys('obsolete')
572 remoteobs = pullop.remote.listkeys('obsolete')
574 if 'dump0' in remoteobs:
573 if 'dump0' in remoteobs:
575 tr = pullop.gettransaction()
574 tr = pullop.gettransaction()
576 for key in sorted(remoteobs, reverse=True):
575 for key in sorted(remoteobs, reverse=True):
577 if key.startswith('dump'):
576 if key.startswith('dump'):
578 data = base85.b85decode(remoteobs[key])
577 data = base85.b85decode(remoteobs[key])
579 pullop.repo.obsstore.mergemarkers(tr, data)
578 pullop.repo.obsstore.mergemarkers(tr, data)
580 pullop.repo.invalidatevolatilesets()
579 pullop.repo.invalidatevolatilesets()
581 return tr
580 return tr
582
581
583 def getbundle(repo, source, heads=None, common=None, bundlecaps=None):
582 def getbundle(repo, source, heads=None, common=None, bundlecaps=None):
584 """return a full bundle (with potentially multiple kind of parts)
583 """return a full bundle (with potentially multiple kind of parts)
585
584
586 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
585 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
587 passed. For now, the bundle can contain only changegroup, but this will
586 passed. For now, the bundle can contain only changegroup, but this will
588 changes when more part type will be available for bundle2.
587 changes when more part type will be available for bundle2.
589
588
590 This is different from changegroup.getbundle that only returns an HG10
589 This is different from changegroup.getbundle that only returns an HG10
591 changegroup bundle. They may eventually get reunited in the future when we
590 changegroup bundle. They may eventually get reunited in the future when we
592 have a clearer idea of the API we what to query different data.
591 have a clearer idea of the API we what to query different data.
593
592
594 The implementation is at a very early stage and will get massive rework
593 The implementation is at a very early stage and will get massive rework
595 when the API of bundle is refined.
594 when the API of bundle is refined.
596 """
595 """
597 # build bundle here.
596 # build bundle here.
598 cg = changegroup.getbundle(repo, source, heads=heads,
597 cg = changegroup.getbundle(repo, source, heads=heads,
599 common=common, bundlecaps=bundlecaps)
598 common=common, bundlecaps=bundlecaps)
600 if bundlecaps is None or 'HG20' not in bundlecaps:
599 if bundlecaps is None or 'HG20' not in bundlecaps:
601 return cg
600 return cg
602 # very crude first implementation,
601 # very crude first implementation,
603 # the bundle API will change and the generation will be done lazily.
602 # the bundle API will change and the generation will be done lazily.
604 bundler = bundle2.bundle20(repo.ui)
603 bundler = bundle2.bundle20(repo.ui)
605 tempname = changegroup.writebundle(cg, None, 'HG10UN')
604 tempname = changegroup.writebundle(cg, None, 'HG10UN')
606 data = open(tempname).read()
605 data = open(tempname).read()
607 part = bundle2.part('changegroup', data=data)
606 part = bundle2.part('changegroup', data=data)
608 bundler.addpart(part)
607 bundler.addpart(part)
609 temp = cStringIO.StringIO()
608 temp = cStringIO.StringIO()
610 for c in bundler.getchunks():
609 for c in bundler.getchunks():
611 temp.write(c)
610 temp.write(c)
612 temp.seek(0)
611 temp.seek(0)
613 return bundle2.unbundle20(repo.ui, temp)
612 return bundle2.unbundle20(repo.ui, temp)
614
613
615 class PushRaced(RuntimeError):
614 class PushRaced(RuntimeError):
616 """An exception raised during unbunding that indicate a push race"""
615 """An exception raised during unbunding that indicate a push race"""
617
616
618 def check_heads(repo, their_heads, context):
617 def check_heads(repo, their_heads, context):
619 """check if the heads of a repo have been modified
618 """check if the heads of a repo have been modified
620
619
621 Used by peer for unbundling.
620 Used by peer for unbundling.
622 """
621 """
623 heads = repo.heads()
622 heads = repo.heads()
624 heads_hash = util.sha1(''.join(sorted(heads))).digest()
623 heads_hash = util.sha1(''.join(sorted(heads))).digest()
625 if not (their_heads == ['force'] or their_heads == heads or
624 if not (their_heads == ['force'] or their_heads == heads or
626 their_heads == ['hashed', heads_hash]):
625 their_heads == ['hashed', heads_hash]):
627 # someone else committed/pushed/unbundled while we
626 # someone else committed/pushed/unbundled while we
628 # were transferring data
627 # were transferring data
629 raise PushRaced('repository changed while %s - '
628 raise PushRaced('repository changed while %s - '
630 'please try again' % context)
629 'please try again' % context)
631
630
632 def unbundle(repo, cg, heads, source, url):
631 def unbundle(repo, cg, heads, source, url):
633 """Apply a bundle to a repo.
632 """Apply a bundle to a repo.
634
633
635 this function makes sure the repo is locked during the application and have
634 this function makes sure the repo is locked during the application and have
636 mechanism to check that no push race occured between the creation of the
635 mechanism to check that no push race occured between the creation of the
637 bundle and its application.
636 bundle and its application.
638
637
639 If the push was raced as PushRaced exception is raised."""
638 If the push was raced as PushRaced exception is raised."""
640 r = 0
639 r = 0
641 lock = repo.lock()
640 lock = repo.lock()
642 try:
641 try:
643 check_heads(repo, heads, 'uploading changes')
642 check_heads(repo, heads, 'uploading changes')
644 # push can proceed
643 # push can proceed
645 r = changegroup.addchangegroup(repo, cg, source, url)
644 r = changegroup.addchangegroup(repo, cg, source, url)
646 finally:
645 finally:
647 lock.release()
646 lock.release()
648 return r
647 return r
General Comments 0
You need to be logged in to leave comments. Login now