##// END OF EJS Templates
pull: move phases synchronisation in its own function...
Pierre-Yves David -
r20486:0c469df6 default
parent child Browse files
Show More
@@ -1,521 +1,523 b''
1 # exchange.py - utily to exchange data between repo.
1 # exchange.py - utily to exchange data between repo.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno
10 import errno
11 import util, scmutil, changegroup, base85
11 import util, scmutil, changegroup, base85
12 import discovery, phases, obsolete, bookmarks
12 import discovery, phases, obsolete, bookmarks
13
13
14
14
15 class pushoperation(object):
15 class pushoperation(object):
16 """A object that represent a single push operation
16 """A object that represent a single push operation
17
17
18 It purpose is to carry push related state and very common operation.
18 It purpose is to carry push related state and very common operation.
19
19
20 A new should be created at the begining of each push and discarded
20 A new should be created at the begining of each push and discarded
21 afterward.
21 afterward.
22 """
22 """
23
23
24 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
24 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
25 # repo we push from
25 # repo we push from
26 self.repo = repo
26 self.repo = repo
27 self.ui = repo.ui
27 self.ui = repo.ui
28 # repo we push to
28 # repo we push to
29 self.remote = remote
29 self.remote = remote
30 # force option provided
30 # force option provided
31 self.force = force
31 self.force = force
32 # revs to be pushed (None is "all")
32 # revs to be pushed (None is "all")
33 self.revs = revs
33 self.revs = revs
34 # allow push of new branch
34 # allow push of new branch
35 self.newbranch = newbranch
35 self.newbranch = newbranch
36 # did a local lock get acquired?
36 # did a local lock get acquired?
37 self.locallocked = None
37 self.locallocked = None
38 # Integer version of the push result
38 # Integer version of the push result
39 # - None means nothing to push
39 # - None means nothing to push
40 # - 0 means HTTP error
40 # - 0 means HTTP error
41 # - 1 means we pushed and remote head count is unchanged *or*
41 # - 1 means we pushed and remote head count is unchanged *or*
42 # we have outgoing changesets but refused to push
42 # we have outgoing changesets but refused to push
43 # - other values as described by addchangegroup()
43 # - other values as described by addchangegroup()
44 self.ret = None
44 self.ret = None
45 # discover.outgoing object (contains common and outgoin data)
45 # discover.outgoing object (contains common and outgoin data)
46 self.outgoing = None
46 self.outgoing = None
47 # all remote heads before the push
47 # all remote heads before the push
48 self.remoteheads = None
48 self.remoteheads = None
49 # testable as a boolean indicating if any nodes are missing locally.
49 # testable as a boolean indicating if any nodes are missing locally.
50 self.incoming = None
50 self.incoming = None
51 # set of all heads common after changeset bundle push
51 # set of all heads common after changeset bundle push
52 self.commonheads = None
52 self.commonheads = None
53
53
54 def push(repo, remote, force=False, revs=None, newbranch=False):
54 def push(repo, remote, force=False, revs=None, newbranch=False):
55 '''Push outgoing changesets (limited by revs) from a local
55 '''Push outgoing changesets (limited by revs) from a local
56 repository to remote. Return an integer:
56 repository to remote. Return an integer:
57 - None means nothing to push
57 - None means nothing to push
58 - 0 means HTTP error
58 - 0 means HTTP error
59 - 1 means we pushed and remote head count is unchanged *or*
59 - 1 means we pushed and remote head count is unchanged *or*
60 we have outgoing changesets but refused to push
60 we have outgoing changesets but refused to push
61 - other values as described by addchangegroup()
61 - other values as described by addchangegroup()
62 '''
62 '''
63 pushop = pushoperation(repo, remote, force, revs, newbranch)
63 pushop = pushoperation(repo, remote, force, revs, newbranch)
64 if pushop.remote.local():
64 if pushop.remote.local():
65 missing = (set(pushop.repo.requirements)
65 missing = (set(pushop.repo.requirements)
66 - pushop.remote.local().supported)
66 - pushop.remote.local().supported)
67 if missing:
67 if missing:
68 msg = _("required features are not"
68 msg = _("required features are not"
69 " supported in the destination:"
69 " supported in the destination:"
70 " %s") % (', '.join(sorted(missing)))
70 " %s") % (', '.join(sorted(missing)))
71 raise util.Abort(msg)
71 raise util.Abort(msg)
72
72
73 # there are two ways to push to remote repo:
73 # there are two ways to push to remote repo:
74 #
74 #
75 # addchangegroup assumes local user can lock remote
75 # addchangegroup assumes local user can lock remote
76 # repo (local filesystem, old ssh servers).
76 # repo (local filesystem, old ssh servers).
77 #
77 #
78 # unbundle assumes local user cannot lock remote repo (new ssh
78 # unbundle assumes local user cannot lock remote repo (new ssh
79 # servers, http servers).
79 # servers, http servers).
80
80
81 if not pushop.remote.canpush():
81 if not pushop.remote.canpush():
82 raise util.Abort(_("destination does not support push"))
82 raise util.Abort(_("destination does not support push"))
83 # get local lock as we might write phase data
83 # get local lock as we might write phase data
84 locallock = None
84 locallock = None
85 try:
85 try:
86 locallock = pushop.repo.lock()
86 locallock = pushop.repo.lock()
87 pushop.locallocked = True
87 pushop.locallocked = True
88 except IOError, err:
88 except IOError, err:
89 pushop.locallocked = False
89 pushop.locallocked = False
90 if err.errno != errno.EACCES:
90 if err.errno != errno.EACCES:
91 raise
91 raise
92 # source repo cannot be locked.
92 # source repo cannot be locked.
93 # We do not abort the push, but just disable the local phase
93 # We do not abort the push, but just disable the local phase
94 # synchronisation.
94 # synchronisation.
95 msg = 'cannot lock source repository: %s\n' % err
95 msg = 'cannot lock source repository: %s\n' % err
96 pushop.ui.debug(msg)
96 pushop.ui.debug(msg)
97 try:
97 try:
98 pushop.repo.checkpush(pushop.force, pushop.revs)
98 pushop.repo.checkpush(pushop.force, pushop.revs)
99 lock = None
99 lock = None
100 unbundle = pushop.remote.capable('unbundle')
100 unbundle = pushop.remote.capable('unbundle')
101 if not unbundle:
101 if not unbundle:
102 lock = pushop.remote.lock()
102 lock = pushop.remote.lock()
103 try:
103 try:
104 _pushdiscovery(pushop)
104 _pushdiscovery(pushop)
105 if _pushcheckoutgoing(pushop):
105 if _pushcheckoutgoing(pushop):
106 _pushchangeset(pushop)
106 _pushchangeset(pushop)
107 _pushcomputecommonheads(pushop)
107 _pushcomputecommonheads(pushop)
108 _pushsyncphase(pushop)
108 _pushsyncphase(pushop)
109 _pushobsolete(pushop)
109 _pushobsolete(pushop)
110 finally:
110 finally:
111 if lock is not None:
111 if lock is not None:
112 lock.release()
112 lock.release()
113 finally:
113 finally:
114 if locallock is not None:
114 if locallock is not None:
115 locallock.release()
115 locallock.release()
116
116
117 _pushbookmark(pushop)
117 _pushbookmark(pushop)
118 return pushop.ret
118 return pushop.ret
119
119
120 def _pushdiscovery(pushop):
120 def _pushdiscovery(pushop):
121 # discovery
121 # discovery
122 unfi = pushop.repo.unfiltered()
122 unfi = pushop.repo.unfiltered()
123 fci = discovery.findcommonincoming
123 fci = discovery.findcommonincoming
124 commoninc = fci(unfi, pushop.remote, force=pushop.force)
124 commoninc = fci(unfi, pushop.remote, force=pushop.force)
125 common, inc, remoteheads = commoninc
125 common, inc, remoteheads = commoninc
126 fco = discovery.findcommonoutgoing
126 fco = discovery.findcommonoutgoing
127 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
127 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
128 commoninc=commoninc, force=pushop.force)
128 commoninc=commoninc, force=pushop.force)
129 pushop.outgoing = outgoing
129 pushop.outgoing = outgoing
130 pushop.remoteheads = remoteheads
130 pushop.remoteheads = remoteheads
131 pushop.incoming = inc
131 pushop.incoming = inc
132
132
133 def _pushcheckoutgoing(pushop):
133 def _pushcheckoutgoing(pushop):
134 outgoing = pushop.outgoing
134 outgoing = pushop.outgoing
135 unfi = pushop.repo.unfiltered()
135 unfi = pushop.repo.unfiltered()
136 if not outgoing.missing:
136 if not outgoing.missing:
137 # nothing to push
137 # nothing to push
138 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
138 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
139 return False
139 return False
140 # something to push
140 # something to push
141 if not pushop.force:
141 if not pushop.force:
142 # if repo.obsstore == False --> no obsolete
142 # if repo.obsstore == False --> no obsolete
143 # then, save the iteration
143 # then, save the iteration
144 if unfi.obsstore:
144 if unfi.obsstore:
145 # this message are here for 80 char limit reason
145 # this message are here for 80 char limit reason
146 mso = _("push includes obsolete changeset: %s!")
146 mso = _("push includes obsolete changeset: %s!")
147 mst = "push includes %s changeset: %s!"
147 mst = "push includes %s changeset: %s!"
148 # plain versions for i18n tool to detect them
148 # plain versions for i18n tool to detect them
149 _("push includes unstable changeset: %s!")
149 _("push includes unstable changeset: %s!")
150 _("push includes bumped changeset: %s!")
150 _("push includes bumped changeset: %s!")
151 _("push includes divergent changeset: %s!")
151 _("push includes divergent changeset: %s!")
152 # If we are to push if there is at least one
152 # If we are to push if there is at least one
153 # obsolete or unstable changeset in missing, at
153 # obsolete or unstable changeset in missing, at
154 # least one of the missinghead will be obsolete or
154 # least one of the missinghead will be obsolete or
155 # unstable. So checking heads only is ok
155 # unstable. So checking heads only is ok
156 for node in outgoing.missingheads:
156 for node in outgoing.missingheads:
157 ctx = unfi[node]
157 ctx = unfi[node]
158 if ctx.obsolete():
158 if ctx.obsolete():
159 raise util.Abort(mso % ctx)
159 raise util.Abort(mso % ctx)
160 elif ctx.troubled():
160 elif ctx.troubled():
161 raise util.Abort(_(mst)
161 raise util.Abort(_(mst)
162 % (ctx.troubles()[0],
162 % (ctx.troubles()[0],
163 ctx))
163 ctx))
164 newbm = pushop.ui.configlist('bookmarks', 'pushing')
164 newbm = pushop.ui.configlist('bookmarks', 'pushing')
165 discovery.checkheads(unfi, pushop.remote, outgoing,
165 discovery.checkheads(unfi, pushop.remote, outgoing,
166 pushop.remoteheads,
166 pushop.remoteheads,
167 pushop.newbranch,
167 pushop.newbranch,
168 bool(pushop.incoming),
168 bool(pushop.incoming),
169 newbm)
169 newbm)
170 return True
170 return True
171
171
172 def _pushchangeset(pushop):
172 def _pushchangeset(pushop):
173 """Make the actual push of changeset bundle to remote repo"""
173 """Make the actual push of changeset bundle to remote repo"""
174 outgoing = pushop.outgoing
174 outgoing = pushop.outgoing
175 unbundle = pushop.remote.capable('unbundle')
175 unbundle = pushop.remote.capable('unbundle')
176 # TODO: get bundlecaps from remote
176 # TODO: get bundlecaps from remote
177 bundlecaps = None
177 bundlecaps = None
178 # create a changegroup from local
178 # create a changegroup from local
179 if pushop.revs is None and not (outgoing.excluded
179 if pushop.revs is None and not (outgoing.excluded
180 or pushop.repo.changelog.filteredrevs):
180 or pushop.repo.changelog.filteredrevs):
181 # push everything,
181 # push everything,
182 # use the fast path, no race possible on push
182 # use the fast path, no race possible on push
183 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
183 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
184 cg = pushop.repo._changegroupsubset(outgoing,
184 cg = pushop.repo._changegroupsubset(outgoing,
185 bundler,
185 bundler,
186 'push',
186 'push',
187 fastpath=True)
187 fastpath=True)
188 else:
188 else:
189 cg = pushop.repo.getlocalbundle('push', outgoing, bundlecaps)
189 cg = pushop.repo.getlocalbundle('push', outgoing, bundlecaps)
190
190
191 # apply changegroup to remote
191 # apply changegroup to remote
192 if unbundle:
192 if unbundle:
193 # local repo finds heads on server, finds out what
193 # local repo finds heads on server, finds out what
194 # revs it must push. once revs transferred, if server
194 # revs it must push. once revs transferred, if server
195 # finds it has different heads (someone else won
195 # finds it has different heads (someone else won
196 # commit/push race), server aborts.
196 # commit/push race), server aborts.
197 if pushop.force:
197 if pushop.force:
198 remoteheads = ['force']
198 remoteheads = ['force']
199 else:
199 else:
200 remoteheads = pushop.remoteheads
200 remoteheads = pushop.remoteheads
201 # ssh: return remote's addchangegroup()
201 # ssh: return remote's addchangegroup()
202 # http: return remote's addchangegroup() or 0 for error
202 # http: return remote's addchangegroup() or 0 for error
203 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
203 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
204 'push')
204 'push')
205 else:
205 else:
206 # we return an integer indicating remote head count
206 # we return an integer indicating remote head count
207 # change
207 # change
208 pushop.ret = pushop.remote.addchangegroup(cg, 'push',
208 pushop.ret = pushop.remote.addchangegroup(cg, 'push',
209 pushop.repo.url())
209 pushop.repo.url())
210
210
211 def _pushcomputecommonheads(pushop):
211 def _pushcomputecommonheads(pushop):
212 unfi = pushop.repo.unfiltered()
212 unfi = pushop.repo.unfiltered()
213 if pushop.ret:
213 if pushop.ret:
214 # push succeed, synchronize target of the push
214 # push succeed, synchronize target of the push
215 cheads = pushop.outgoing.missingheads
215 cheads = pushop.outgoing.missingheads
216 elif pushop.revs is None:
216 elif pushop.revs is None:
217 # All out push fails. synchronize all common
217 # All out push fails. synchronize all common
218 cheads = pushop.outgoing.commonheads
218 cheads = pushop.outgoing.commonheads
219 else:
219 else:
220 # I want cheads = heads(::missingheads and ::commonheads)
220 # I want cheads = heads(::missingheads and ::commonheads)
221 # (missingheads is revs with secret changeset filtered out)
221 # (missingheads is revs with secret changeset filtered out)
222 #
222 #
223 # This can be expressed as:
223 # This can be expressed as:
224 # cheads = ( (missingheads and ::commonheads)
224 # cheads = ( (missingheads and ::commonheads)
225 # + (commonheads and ::missingheads))"
225 # + (commonheads and ::missingheads))"
226 # )
226 # )
227 #
227 #
228 # while trying to push we already computed the following:
228 # while trying to push we already computed the following:
229 # common = (::commonheads)
229 # common = (::commonheads)
230 # missing = ((commonheads::missingheads) - commonheads)
230 # missing = ((commonheads::missingheads) - commonheads)
231 #
231 #
232 # We can pick:
232 # We can pick:
233 # * missingheads part of common (::commonheads)
233 # * missingheads part of common (::commonheads)
234 common = set(pushop.outgoing.common)
234 common = set(pushop.outgoing.common)
235 nm = pushop.repo.changelog.nodemap
235 nm = pushop.repo.changelog.nodemap
236 cheads = [node for node in pushop.revs if nm[node] in common]
236 cheads = [node for node in pushop.revs if nm[node] in common]
237 # and
237 # and
238 # * commonheads parents on missing
238 # * commonheads parents on missing
239 revset = unfi.set('%ln and parents(roots(%ln))',
239 revset = unfi.set('%ln and parents(roots(%ln))',
240 pushop.outgoing.commonheads,
240 pushop.outgoing.commonheads,
241 pushop.outgoing.missing)
241 pushop.outgoing.missing)
242 cheads.extend(c.node() for c in revset)
242 cheads.extend(c.node() for c in revset)
243 pushop.commonheads = cheads
243 pushop.commonheads = cheads
244
244
245 def _pushsyncphase(pushop):
245 def _pushsyncphase(pushop):
246 """synchronise phase information locally and remotly"""
246 """synchronise phase information locally and remotly"""
247 unfi = pushop.repo.unfiltered()
247 unfi = pushop.repo.unfiltered()
248 cheads = pushop.commonheads
248 cheads = pushop.commonheads
249 if pushop.ret:
249 if pushop.ret:
250 # push succeed, synchronize target of the push
250 # push succeed, synchronize target of the push
251 cheads = pushop.outgoing.missingheads
251 cheads = pushop.outgoing.missingheads
252 elif pushop.revs is None:
252 elif pushop.revs is None:
253 # All out push fails. synchronize all common
253 # All out push fails. synchronize all common
254 cheads = pushop.outgoing.commonheads
254 cheads = pushop.outgoing.commonheads
255 else:
255 else:
256 # I want cheads = heads(::missingheads and ::commonheads)
256 # I want cheads = heads(::missingheads and ::commonheads)
257 # (missingheads is revs with secret changeset filtered out)
257 # (missingheads is revs with secret changeset filtered out)
258 #
258 #
259 # This can be expressed as:
259 # This can be expressed as:
260 # cheads = ( (missingheads and ::commonheads)
260 # cheads = ( (missingheads and ::commonheads)
261 # + (commonheads and ::missingheads))"
261 # + (commonheads and ::missingheads))"
262 # )
262 # )
263 #
263 #
264 # while trying to push we already computed the following:
264 # while trying to push we already computed the following:
265 # common = (::commonheads)
265 # common = (::commonheads)
266 # missing = ((commonheads::missingheads) - commonheads)
266 # missing = ((commonheads::missingheads) - commonheads)
267 #
267 #
268 # We can pick:
268 # We can pick:
269 # * missingheads part of common (::commonheads)
269 # * missingheads part of common (::commonheads)
270 common = set(pushop.outgoing.common)
270 common = set(pushop.outgoing.common)
271 nm = pushop.repo.changelog.nodemap
271 nm = pushop.repo.changelog.nodemap
272 cheads = [node for node in pushop.revs if nm[node] in common]
272 cheads = [node for node in pushop.revs if nm[node] in common]
273 # and
273 # and
274 # * commonheads parents on missing
274 # * commonheads parents on missing
275 revset = unfi.set('%ln and parents(roots(%ln))',
275 revset = unfi.set('%ln and parents(roots(%ln))',
276 pushop.outgoing.commonheads,
276 pushop.outgoing.commonheads,
277 pushop.outgoing.missing)
277 pushop.outgoing.missing)
278 cheads.extend(c.node() for c in revset)
278 cheads.extend(c.node() for c in revset)
279 pushop.commonheads = cheads
279 pushop.commonheads = cheads
280 # even when we don't push, exchanging phase data is useful
280 # even when we don't push, exchanging phase data is useful
281 remotephases = pushop.remote.listkeys('phases')
281 remotephases = pushop.remote.listkeys('phases')
282 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
282 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
283 and remotephases # server supports phases
283 and remotephases # server supports phases
284 and pushop.ret is None # nothing was pushed
284 and pushop.ret is None # nothing was pushed
285 and remotephases.get('publishing', False)):
285 and remotephases.get('publishing', False)):
286 # When:
286 # When:
287 # - this is a subrepo push
287 # - this is a subrepo push
288 # - and remote support phase
288 # - and remote support phase
289 # - and no changeset was pushed
289 # - and no changeset was pushed
290 # - and remote is publishing
290 # - and remote is publishing
291 # We may be in issue 3871 case!
291 # We may be in issue 3871 case!
292 # We drop the possible phase synchronisation done by
292 # We drop the possible phase synchronisation done by
293 # courtesy to publish changesets possibly locally draft
293 # courtesy to publish changesets possibly locally draft
294 # on the remote.
294 # on the remote.
295 remotephases = {'publishing': 'True'}
295 remotephases = {'publishing': 'True'}
296 if not remotephases: # old server or public only rer
296 if not remotephases: # old server or public only rer
297 _localphasemove(pushop, cheads)
297 _localphasemove(pushop, cheads)
298 # don't push any phase data as there is nothing to push
298 # don't push any phase data as there is nothing to push
299 else:
299 else:
300 ana = phases.analyzeremotephases(pushop.repo, cheads,
300 ana = phases.analyzeremotephases(pushop.repo, cheads,
301 remotephases)
301 remotephases)
302 pheads, droots = ana
302 pheads, droots = ana
303 ### Apply remote phase on local
303 ### Apply remote phase on local
304 if remotephases.get('publishing', False):
304 if remotephases.get('publishing', False):
305 _localphasemove(pushop, cheads)
305 _localphasemove(pushop, cheads)
306 else: # publish = False
306 else: # publish = False
307 _localphasemove(pushop, pheads)
307 _localphasemove(pushop, pheads)
308 _localphasemove(pushop, cheads, phases.draft)
308 _localphasemove(pushop, cheads, phases.draft)
309 ### Apply local phase on remote
309 ### Apply local phase on remote
310
310
311 # Get the list of all revs draft on remote by public here.
311 # Get the list of all revs draft on remote by public here.
312 # XXX Beware that revset break if droots is not strictly
312 # XXX Beware that revset break if droots is not strictly
313 # XXX root we may want to ensure it is but it is costly
313 # XXX root we may want to ensure it is but it is costly
314 outdated = unfi.set('heads((%ln::%ln) and public())',
314 outdated = unfi.set('heads((%ln::%ln) and public())',
315 droots, cheads)
315 droots, cheads)
316 for newremotehead in outdated:
316 for newremotehead in outdated:
317 r = pushop.remote.pushkey('phases',
317 r = pushop.remote.pushkey('phases',
318 newremotehead.hex(),
318 newremotehead.hex(),
319 str(phases.draft),
319 str(phases.draft),
320 str(phases.public))
320 str(phases.public))
321 if not r:
321 if not r:
322 pushop.ui.warn(_('updating %s to public failed!\n')
322 pushop.ui.warn(_('updating %s to public failed!\n')
323 % newremotehead)
323 % newremotehead)
324
324
325 def _localphasemove(pushop, nodes, phase=phases.public):
325 def _localphasemove(pushop, nodes, phase=phases.public):
326 """move <nodes> to <phase> in the local source repo"""
326 """move <nodes> to <phase> in the local source repo"""
327 if pushop.locallocked:
327 if pushop.locallocked:
328 phases.advanceboundary(pushop.repo, phase, nodes)
328 phases.advanceboundary(pushop.repo, phase, nodes)
329 else:
329 else:
330 # repo is not locked, do not change any phases!
330 # repo is not locked, do not change any phases!
331 # Informs the user that phases should have been moved when
331 # Informs the user that phases should have been moved when
332 # applicable.
332 # applicable.
333 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
333 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
334 phasestr = phases.phasenames[phase]
334 phasestr = phases.phasenames[phase]
335 if actualmoves:
335 if actualmoves:
336 pushop.ui.status(_('cannot lock source repo, skipping '
336 pushop.ui.status(_('cannot lock source repo, skipping '
337 'local %s phase update\n') % phasestr)
337 'local %s phase update\n') % phasestr)
338
338
339 def _pushobsolete(pushop):
339 def _pushobsolete(pushop):
340 """utility function to push obsolete markers to a remote"""
340 """utility function to push obsolete markers to a remote"""
341 pushop.ui.debug('try to push obsolete markers to remote\n')
341 pushop.ui.debug('try to push obsolete markers to remote\n')
342 repo = pushop.repo
342 repo = pushop.repo
343 remote = pushop.remote
343 remote = pushop.remote
344 if (obsolete._enabled and repo.obsstore and
344 if (obsolete._enabled and repo.obsstore and
345 'obsolete' in remote.listkeys('namespaces')):
345 'obsolete' in remote.listkeys('namespaces')):
346 rslts = []
346 rslts = []
347 remotedata = repo.listkeys('obsolete')
347 remotedata = repo.listkeys('obsolete')
348 for key in sorted(remotedata, reverse=True):
348 for key in sorted(remotedata, reverse=True):
349 # reverse sort to ensure we end with dump0
349 # reverse sort to ensure we end with dump0
350 data = remotedata[key]
350 data = remotedata[key]
351 rslts.append(remote.pushkey('obsolete', key, '', data))
351 rslts.append(remote.pushkey('obsolete', key, '', data))
352 if [r for r in rslts if not r]:
352 if [r for r in rslts if not r]:
353 msg = _('failed to push some obsolete markers!\n')
353 msg = _('failed to push some obsolete markers!\n')
354 repo.ui.warn(msg)
354 repo.ui.warn(msg)
355
355
356 def _pushbookmark(pushop):
356 def _pushbookmark(pushop):
357 """Update bookmark position on remote"""
357 """Update bookmark position on remote"""
358 ui = pushop.ui
358 ui = pushop.ui
359 repo = pushop.repo.unfiltered()
359 repo = pushop.repo.unfiltered()
360 remote = pushop.remote
360 remote = pushop.remote
361 ui.debug("checking for updated bookmarks\n")
361 ui.debug("checking for updated bookmarks\n")
362 revnums = map(repo.changelog.rev, pushop.revs or [])
362 revnums = map(repo.changelog.rev, pushop.revs or [])
363 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
363 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
364 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
364 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
365 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
365 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
366 srchex=hex)
366 srchex=hex)
367
367
368 for b, scid, dcid in advsrc:
368 for b, scid, dcid in advsrc:
369 if ancestors and repo[scid].rev() not in ancestors:
369 if ancestors and repo[scid].rev() not in ancestors:
370 continue
370 continue
371 if remote.pushkey('bookmarks', b, dcid, scid):
371 if remote.pushkey('bookmarks', b, dcid, scid):
372 ui.status(_("updating bookmark %s\n") % b)
372 ui.status(_("updating bookmark %s\n") % b)
373 else:
373 else:
374 ui.warn(_('updating bookmark %s failed!\n') % b)
374 ui.warn(_('updating bookmark %s failed!\n') % b)
375
375
376 class pulloperation(object):
376 class pulloperation(object):
377 """A object that represent a single pull operation
377 """A object that represent a single pull operation
378
378
379 It purpose is to carry push related state and very common operation.
379 It purpose is to carry push related state and very common operation.
380
380
381 A new should be created at the begining of each push and discarded
381 A new should be created at the begining of each push and discarded
382 afterward.
382 afterward.
383 """
383 """
384
384
385 def __init__(self, repo, remote, heads=None, force=False):
385 def __init__(self, repo, remote, heads=None, force=False):
386 # repo we pull from
386 # repo we pull from
387 self.repo = repo
387 self.repo = repo
388 # repo we pull to
388 # repo we pull to
389 self.remote = remote
389 self.remote = remote
390 # revision we try to pull (None is "all")
390 # revision we try to pull (None is "all")
391 self.heads = heads
391 self.heads = heads
392 # do we force pull?
392 # do we force pull?
393 self.force = force
393 self.force = force
394 # the name the pull transaction
394 # the name the pull transaction
395 self._trname = 'pull\n' + util.hidepassword(remote.url())
395 self._trname = 'pull\n' + util.hidepassword(remote.url())
396 # hold the transaction once created
396 # hold the transaction once created
397 self._tr = None
397 self._tr = None
398 # heads of the set of changeset target by the pull
398 # heads of the set of changeset target by the pull
399 self.pulledsubset = None
399 self.pulledsubset = None
400
400
401 def gettransaction(self):
401 def gettransaction(self):
402 """get appropriate pull transaction, creating it if needed"""
402 """get appropriate pull transaction, creating it if needed"""
403 if self._tr is None:
403 if self._tr is None:
404 self._tr = self.repo.transaction(self._trname)
404 self._tr = self.repo.transaction(self._trname)
405 return self._tr
405 return self._tr
406
406
407 def closetransaction(self):
407 def closetransaction(self):
408 """close transaction if created"""
408 """close transaction if created"""
409 if self._tr is not None:
409 if self._tr is not None:
410 self._tr.close()
410 self._tr.close()
411
411
412 def releasetransaction(self):
412 def releasetransaction(self):
413 """release transaction if created"""
413 """release transaction if created"""
414 if self._tr is not None:
414 if self._tr is not None:
415 self._tr.release()
415 self._tr.release()
416
416
417 def pull(repo, remote, heads=None, force=False):
417 def pull(repo, remote, heads=None, force=False):
418 pullop = pulloperation(repo, remote, heads, force)
418 pullop = pulloperation(repo, remote, heads, force)
419 if pullop.remote.local():
419 if pullop.remote.local():
420 missing = set(pullop.remote.requirements) - pullop.repo.supported
420 missing = set(pullop.remote.requirements) - pullop.repo.supported
421 if missing:
421 if missing:
422 msg = _("required features are not"
422 msg = _("required features are not"
423 " supported in the destination:"
423 " supported in the destination:"
424 " %s") % (', '.join(sorted(missing)))
424 " %s") % (', '.join(sorted(missing)))
425 raise util.Abort(msg)
425 raise util.Abort(msg)
426
426
427 lock = pullop.repo.lock()
427 lock = pullop.repo.lock()
428 try:
428 try:
429 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
429 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
430 pullop.remote,
430 pullop.remote,
431 heads=pullop.heads,
431 heads=pullop.heads,
432 force=force)
432 force=force)
433 common, fetch, rheads = tmp
433 common, fetch, rheads = tmp
434 if not fetch:
434 if not fetch:
435 pullop.repo.ui.status(_("no changes found\n"))
435 pullop.repo.ui.status(_("no changes found\n"))
436 result = 0
436 result = 0
437 else:
437 else:
438 # We delay the open of the transaction as late as possible so we
438 # We delay the open of the transaction as late as possible so we
439 # don't open transaction for nothing or you break future useful
439 # don't open transaction for nothing or you break future useful
440 # rollback call
440 # rollback call
441 pullop.gettransaction()
441 pullop.gettransaction()
442 if pullop.heads is None and list(common) == [nullid]:
442 if pullop.heads is None and list(common) == [nullid]:
443 pullop.repo.ui.status(_("requesting all changes\n"))
443 pullop.repo.ui.status(_("requesting all changes\n"))
444 elif (pullop.heads is None
444 elif (pullop.heads is None
445 and pullop.remote.capable('changegroupsubset')):
445 and pullop.remote.capable('changegroupsubset')):
446 # issue1320, avoid a race if remote changed after discovery
446 # issue1320, avoid a race if remote changed after discovery
447 pullop.heads = rheads
447 pullop.heads = rheads
448
448
449 if pullop.remote.capable('getbundle'):
449 if pullop.remote.capable('getbundle'):
450 # TODO: get bundlecaps from remote
450 # TODO: get bundlecaps from remote
451 cg = pullop.remote.getbundle('pull', common=common,
451 cg = pullop.remote.getbundle('pull', common=common,
452 heads=pullop.heads or rheads)
452 heads=pullop.heads or rheads)
453 elif pullop.heads is None:
453 elif pullop.heads is None:
454 cg = pullop.remote.changegroup(fetch, 'pull')
454 cg = pullop.remote.changegroup(fetch, 'pull')
455 elif not pullop.remote.capable('changegroupsubset'):
455 elif not pullop.remote.capable('changegroupsubset'):
456 raise util.Abort(_("partial pull cannot be done because "
456 raise util.Abort(_("partial pull cannot be done because "
457 "other repository doesn't support "
457 "other repository doesn't support "
458 "changegroupsubset."))
458 "changegroupsubset."))
459 else:
459 else:
460 cg = pullop.remote.changegroupsubset(fetch, pullop.heads,
460 cg = pullop.remote.changegroupsubset(fetch, pullop.heads,
461 'pull')
461 'pull')
462 result = pullop.repo.addchangegroup(cg, 'pull',
462 result = pullop.repo.addchangegroup(cg, 'pull',
463 pullop.remote.url())
463 pullop.remote.url())
464
464
465 # compute target subset
465 # compute target subset
466 if pullop.heads is None:
466 if pullop.heads is None:
467 # We pulled every thing possible
467 # We pulled every thing possible
468 # sync on everything common
468 # sync on everything common
469 subset = common + rheads
469 subset = common + rheads
470 else:
470 else:
471 # We pulled a specific subset
471 # We pulled a specific subset
472 # sync on this subset
472 # sync on this subset
473 subset = pullop.heads
473 subset = pullop.heads
474 pullop.pulledsubset = subset
474 pullop.pulledsubset = subset
475
475
476 # Get remote phases data from remote
476 _pullphase(pullop)
477 remotephases = pullop.remote.listkeys('phases')
478 publishing = bool(remotephases.get('publishing', False))
479 if remotephases and not publishing:
480 # remote is new and unpublishing
481 pheads, _dr = phases.analyzeremotephases(pullop.repo,
482 pullop.pulledsubset,
483 remotephases)
484 phases.advanceboundary(pullop.repo, phases.public, pheads)
485 phases.advanceboundary(pullop.repo, phases.draft,
486 pullop.pulledsubset)
487 else:
488 # Remote is old or publishing all common changesets
489 # should be seen as public
490 phases.advanceboundary(pullop.repo, phases.public,
491 pullop.pulledsubset)
492
493 _pullobsolete(pullop)
477 _pullobsolete(pullop)
494 pullop.closetransaction()
478 pullop.closetransaction()
495 finally:
479 finally:
496 pullop.releasetransaction()
480 pullop.releasetransaction()
497 lock.release()
481 lock.release()
498
482
499 return result
483 return result
500
484
485 def _pullphase(pullop):
486 # Get remote phases data from remote
487 remotephases = pullop.remote.listkeys('phases')
488 publishing = bool(remotephases.get('publishing', False))
489 if remotephases and not publishing:
490 # remote is new and unpublishing
491 pheads, _dr = phases.analyzeremotephases(pullop.repo,
492 pullop.pulledsubset,
493 remotephases)
494 phases.advanceboundary(pullop.repo, phases.public, pheads)
495 phases.advanceboundary(pullop.repo, phases.draft,
496 pullop.pulledsubset)
497 else:
498 # Remote is old or publishing all common changesets
499 # should be seen as public
500 phases.advanceboundary(pullop.repo, phases.public,
501 pullop.pulledsubset)
502
501 def _pullobsolete(pullop):
503 def _pullobsolete(pullop):
502 """utility function to pull obsolete markers from a remote
504 """utility function to pull obsolete markers from a remote
503
505
504 The `gettransaction` is function that return the pull transaction, creating
506 The `gettransaction` is function that return the pull transaction, creating
505 one if necessary. We return the transaction to inform the calling code that
507 one if necessary. We return the transaction to inform the calling code that
506 a new transaction have been created (when applicable).
508 a new transaction have been created (when applicable).
507
509
508 Exists mostly to allow overriding for experimentation purpose"""
510 Exists mostly to allow overriding for experimentation purpose"""
509 tr = None
511 tr = None
510 if obsolete._enabled:
512 if obsolete._enabled:
511 pullop.repo.ui.debug('fetching remote obsolete markers\n')
513 pullop.repo.ui.debug('fetching remote obsolete markers\n')
512 remoteobs = pullop.remote.listkeys('obsolete')
514 remoteobs = pullop.remote.listkeys('obsolete')
513 if 'dump0' in remoteobs:
515 if 'dump0' in remoteobs:
514 tr = pullop.gettransaction()
516 tr = pullop.gettransaction()
515 for key in sorted(remoteobs, reverse=True):
517 for key in sorted(remoteobs, reverse=True):
516 if key.startswith('dump'):
518 if key.startswith('dump'):
517 data = base85.b85decode(remoteobs[key])
519 data = base85.b85decode(remoteobs[key])
518 pullop.repo.obsstore.mergemarkers(tr, data)
520 pullop.repo.obsstore.mergemarkers(tr, data)
519 pullop.repo.invalidatevolatilesets()
521 pullop.repo.invalidatevolatilesets()
520 return tr
522 return tr
521
523
General Comments 0
You need to be logged in to leave comments. Login now