##// END OF EJS Templates
pull: move obsolescence marker exchange in the exchange module...
Pierre-Yves David -
r20476:1180c6ec default
parent child Browse files
Show More
@@ -1,484 +1,506 b''
1 # exchange.py - utily to exchange data between repo.
1 # exchange.py - utily to exchange data between repo.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno
10 import errno
11 import util, scmutil, changegroup
11 import util, scmutil, changegroup, base85
12 import discovery, phases, obsolete, bookmarks
12 import discovery, phases, obsolete, bookmarks
13
13
14
14
15 class pushoperation(object):
15 class pushoperation(object):
16 """A object that represent a single push operation
16 """A object that represent a single push operation
17
17
18 It purpose is to carry push related state and very common operation.
18 It purpose is to carry push related state and very common operation.
19
19
20 A new should be created at the begining of each push and discarded
20 A new should be created at the begining of each push and discarded
21 afterward.
21 afterward.
22 """
22 """
23
23
24 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
24 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
25 # repo we push from
25 # repo we push from
26 self.repo = repo
26 self.repo = repo
27 self.ui = repo.ui
27 self.ui = repo.ui
28 # repo we push to
28 # repo we push to
29 self.remote = remote
29 self.remote = remote
30 # force option provided
30 # force option provided
31 self.force = force
31 self.force = force
32 # revs to be pushed (None is "all")
32 # revs to be pushed (None is "all")
33 self.revs = revs
33 self.revs = revs
34 # allow push of new branch
34 # allow push of new branch
35 self.newbranch = newbranch
35 self.newbranch = newbranch
36 # did a local lock get acquired?
36 # did a local lock get acquired?
37 self.locallocked = None
37 self.locallocked = None
38 # Integer version of the push result
38 # Integer version of the push result
39 # - None means nothing to push
39 # - None means nothing to push
40 # - 0 means HTTP error
40 # - 0 means HTTP error
41 # - 1 means we pushed and remote head count is unchanged *or*
41 # - 1 means we pushed and remote head count is unchanged *or*
42 # we have outgoing changesets but refused to push
42 # we have outgoing changesets but refused to push
43 # - other values as described by addchangegroup()
43 # - other values as described by addchangegroup()
44 self.ret = None
44 self.ret = None
45 # discover.outgoing object (contains common and outgoin data)
45 # discover.outgoing object (contains common and outgoin data)
46 self.outgoing = None
46 self.outgoing = None
47 # all remote heads before the push
47 # all remote heads before the push
48 self.remoteheads = None
48 self.remoteheads = None
49 # testable as a boolean indicating if any nodes are missing locally.
49 # testable as a boolean indicating if any nodes are missing locally.
50 self.incoming = None
50 self.incoming = None
51 # set of all heads common after changeset bundle push
51 # set of all heads common after changeset bundle push
52 self.commonheads = None
52 self.commonheads = None
53
53
54 def push(repo, remote, force=False, revs=None, newbranch=False):
54 def push(repo, remote, force=False, revs=None, newbranch=False):
55 '''Push outgoing changesets (limited by revs) from a local
55 '''Push outgoing changesets (limited by revs) from a local
56 repository to remote. Return an integer:
56 repository to remote. Return an integer:
57 - None means nothing to push
57 - None means nothing to push
58 - 0 means HTTP error
58 - 0 means HTTP error
59 - 1 means we pushed and remote head count is unchanged *or*
59 - 1 means we pushed and remote head count is unchanged *or*
60 we have outgoing changesets but refused to push
60 we have outgoing changesets but refused to push
61 - other values as described by addchangegroup()
61 - other values as described by addchangegroup()
62 '''
62 '''
63 pushop = pushoperation(repo, remote, force, revs, newbranch)
63 pushop = pushoperation(repo, remote, force, revs, newbranch)
64 if pushop.remote.local():
64 if pushop.remote.local():
65 missing = (set(pushop.repo.requirements)
65 missing = (set(pushop.repo.requirements)
66 - pushop.remote.local().supported)
66 - pushop.remote.local().supported)
67 if missing:
67 if missing:
68 msg = _("required features are not"
68 msg = _("required features are not"
69 " supported in the destination:"
69 " supported in the destination:"
70 " %s") % (', '.join(sorted(missing)))
70 " %s") % (', '.join(sorted(missing)))
71 raise util.Abort(msg)
71 raise util.Abort(msg)
72
72
73 # there are two ways to push to remote repo:
73 # there are two ways to push to remote repo:
74 #
74 #
75 # addchangegroup assumes local user can lock remote
75 # addchangegroup assumes local user can lock remote
76 # repo (local filesystem, old ssh servers).
76 # repo (local filesystem, old ssh servers).
77 #
77 #
78 # unbundle assumes local user cannot lock remote repo (new ssh
78 # unbundle assumes local user cannot lock remote repo (new ssh
79 # servers, http servers).
79 # servers, http servers).
80
80
81 if not pushop.remote.canpush():
81 if not pushop.remote.canpush():
82 raise util.Abort(_("destination does not support push"))
82 raise util.Abort(_("destination does not support push"))
83 # get local lock as we might write phase data
83 # get local lock as we might write phase data
84 locallock = None
84 locallock = None
85 try:
85 try:
86 locallock = pushop.repo.lock()
86 locallock = pushop.repo.lock()
87 pushop.locallocked = True
87 pushop.locallocked = True
88 except IOError, err:
88 except IOError, err:
89 pushop.locallocked = False
89 pushop.locallocked = False
90 if err.errno != errno.EACCES:
90 if err.errno != errno.EACCES:
91 raise
91 raise
92 # source repo cannot be locked.
92 # source repo cannot be locked.
93 # We do not abort the push, but just disable the local phase
93 # We do not abort the push, but just disable the local phase
94 # synchronisation.
94 # synchronisation.
95 msg = 'cannot lock source repository: %s\n' % err
95 msg = 'cannot lock source repository: %s\n' % err
96 pushop.ui.debug(msg)
96 pushop.ui.debug(msg)
97 try:
97 try:
98 pushop.repo.checkpush(pushop.force, pushop.revs)
98 pushop.repo.checkpush(pushop.force, pushop.revs)
99 lock = None
99 lock = None
100 unbundle = pushop.remote.capable('unbundle')
100 unbundle = pushop.remote.capable('unbundle')
101 if not unbundle:
101 if not unbundle:
102 lock = pushop.remote.lock()
102 lock = pushop.remote.lock()
103 try:
103 try:
104 _pushdiscovery(pushop)
104 _pushdiscovery(pushop)
105 if _pushcheckoutgoing(pushop):
105 if _pushcheckoutgoing(pushop):
106 _pushchangeset(pushop)
106 _pushchangeset(pushop)
107 _pushcomputecommonheads(pushop)
107 _pushcomputecommonheads(pushop)
108 _pushsyncphase(pushop)
108 _pushsyncphase(pushop)
109 _pushobsolete(pushop)
109 _pushobsolete(pushop)
110 finally:
110 finally:
111 if lock is not None:
111 if lock is not None:
112 lock.release()
112 lock.release()
113 finally:
113 finally:
114 if locallock is not None:
114 if locallock is not None:
115 locallock.release()
115 locallock.release()
116
116
117 _pushbookmark(pushop)
117 _pushbookmark(pushop)
118 return pushop.ret
118 return pushop.ret
119
119
120 def _pushdiscovery(pushop):
120 def _pushdiscovery(pushop):
121 # discovery
121 # discovery
122 unfi = pushop.repo.unfiltered()
122 unfi = pushop.repo.unfiltered()
123 fci = discovery.findcommonincoming
123 fci = discovery.findcommonincoming
124 commoninc = fci(unfi, pushop.remote, force=pushop.force)
124 commoninc = fci(unfi, pushop.remote, force=pushop.force)
125 common, inc, remoteheads = commoninc
125 common, inc, remoteheads = commoninc
126 fco = discovery.findcommonoutgoing
126 fco = discovery.findcommonoutgoing
127 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
127 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
128 commoninc=commoninc, force=pushop.force)
128 commoninc=commoninc, force=pushop.force)
129 pushop.outgoing = outgoing
129 pushop.outgoing = outgoing
130 pushop.remoteheads = remoteheads
130 pushop.remoteheads = remoteheads
131 pushop.incoming = inc
131 pushop.incoming = inc
132
132
133 def _pushcheckoutgoing(pushop):
133 def _pushcheckoutgoing(pushop):
134 outgoing = pushop.outgoing
134 outgoing = pushop.outgoing
135 unfi = pushop.repo.unfiltered()
135 unfi = pushop.repo.unfiltered()
136 if not outgoing.missing:
136 if not outgoing.missing:
137 # nothing to push
137 # nothing to push
138 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
138 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
139 return False
139 return False
140 # something to push
140 # something to push
141 if not pushop.force:
141 if not pushop.force:
142 # if repo.obsstore == False --> no obsolete
142 # if repo.obsstore == False --> no obsolete
143 # then, save the iteration
143 # then, save the iteration
144 if unfi.obsstore:
144 if unfi.obsstore:
145 # this message are here for 80 char limit reason
145 # this message are here for 80 char limit reason
146 mso = _("push includes obsolete changeset: %s!")
146 mso = _("push includes obsolete changeset: %s!")
147 mst = "push includes %s changeset: %s!"
147 mst = "push includes %s changeset: %s!"
148 # plain versions for i18n tool to detect them
148 # plain versions for i18n tool to detect them
149 _("push includes unstable changeset: %s!")
149 _("push includes unstable changeset: %s!")
150 _("push includes bumped changeset: %s!")
150 _("push includes bumped changeset: %s!")
151 _("push includes divergent changeset: %s!")
151 _("push includes divergent changeset: %s!")
152 # If we are to push if there is at least one
152 # If we are to push if there is at least one
153 # obsolete or unstable changeset in missing, at
153 # obsolete or unstable changeset in missing, at
154 # least one of the missinghead will be obsolete or
154 # least one of the missinghead will be obsolete or
155 # unstable. So checking heads only is ok
155 # unstable. So checking heads only is ok
156 for node in outgoing.missingheads:
156 for node in outgoing.missingheads:
157 ctx = unfi[node]
157 ctx = unfi[node]
158 if ctx.obsolete():
158 if ctx.obsolete():
159 raise util.Abort(mso % ctx)
159 raise util.Abort(mso % ctx)
160 elif ctx.troubled():
160 elif ctx.troubled():
161 raise util.Abort(_(mst)
161 raise util.Abort(_(mst)
162 % (ctx.troubles()[0],
162 % (ctx.troubles()[0],
163 ctx))
163 ctx))
164 newbm = pushop.ui.configlist('bookmarks', 'pushing')
164 newbm = pushop.ui.configlist('bookmarks', 'pushing')
165 discovery.checkheads(unfi, pushop.remote, outgoing,
165 discovery.checkheads(unfi, pushop.remote, outgoing,
166 pushop.remoteheads,
166 pushop.remoteheads,
167 pushop.newbranch,
167 pushop.newbranch,
168 bool(pushop.incoming),
168 bool(pushop.incoming),
169 newbm)
169 newbm)
170 return True
170 return True
171
171
172 def _pushchangeset(pushop):
172 def _pushchangeset(pushop):
173 """Make the actual push of changeset bundle to remote repo"""
173 """Make the actual push of changeset bundle to remote repo"""
174 outgoing = pushop.outgoing
174 outgoing = pushop.outgoing
175 unbundle = pushop.remote.capable('unbundle')
175 unbundle = pushop.remote.capable('unbundle')
176 # TODO: get bundlecaps from remote
176 # TODO: get bundlecaps from remote
177 bundlecaps = None
177 bundlecaps = None
178 # create a changegroup from local
178 # create a changegroup from local
179 if pushop.revs is None and not (outgoing.excluded
179 if pushop.revs is None and not (outgoing.excluded
180 or pushop.repo.changelog.filteredrevs):
180 or pushop.repo.changelog.filteredrevs):
181 # push everything,
181 # push everything,
182 # use the fast path, no race possible on push
182 # use the fast path, no race possible on push
183 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
183 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
184 cg = pushop.repo._changegroupsubset(outgoing,
184 cg = pushop.repo._changegroupsubset(outgoing,
185 bundler,
185 bundler,
186 'push',
186 'push',
187 fastpath=True)
187 fastpath=True)
188 else:
188 else:
189 cg = pushop.repo.getlocalbundle('push', outgoing, bundlecaps)
189 cg = pushop.repo.getlocalbundle('push', outgoing, bundlecaps)
190
190
191 # apply changegroup to remote
191 # apply changegroup to remote
192 if unbundle:
192 if unbundle:
193 # local repo finds heads on server, finds out what
193 # local repo finds heads on server, finds out what
194 # revs it must push. once revs transferred, if server
194 # revs it must push. once revs transferred, if server
195 # finds it has different heads (someone else won
195 # finds it has different heads (someone else won
196 # commit/push race), server aborts.
196 # commit/push race), server aborts.
197 if pushop.force:
197 if pushop.force:
198 remoteheads = ['force']
198 remoteheads = ['force']
199 else:
199 else:
200 remoteheads = pushop.remoteheads
200 remoteheads = pushop.remoteheads
201 # ssh: return remote's addchangegroup()
201 # ssh: return remote's addchangegroup()
202 # http: return remote's addchangegroup() or 0 for error
202 # http: return remote's addchangegroup() or 0 for error
203 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
203 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
204 'push')
204 'push')
205 else:
205 else:
206 # we return an integer indicating remote head count
206 # we return an integer indicating remote head count
207 # change
207 # change
208 pushop.ret = pushop.remote.addchangegroup(cg, 'push',
208 pushop.ret = pushop.remote.addchangegroup(cg, 'push',
209 pushop.repo.url())
209 pushop.repo.url())
210
210
211 def _pushcomputecommonheads(pushop):
211 def _pushcomputecommonheads(pushop):
212 unfi = pushop.repo.unfiltered()
212 unfi = pushop.repo.unfiltered()
213 if pushop.ret:
213 if pushop.ret:
214 # push succeed, synchronize target of the push
214 # push succeed, synchronize target of the push
215 cheads = pushop.outgoing.missingheads
215 cheads = pushop.outgoing.missingheads
216 elif pushop.revs is None:
216 elif pushop.revs is None:
217 # All out push fails. synchronize all common
217 # All out push fails. synchronize all common
218 cheads = pushop.outgoing.commonheads
218 cheads = pushop.outgoing.commonheads
219 else:
219 else:
220 # I want cheads = heads(::missingheads and ::commonheads)
220 # I want cheads = heads(::missingheads and ::commonheads)
221 # (missingheads is revs with secret changeset filtered out)
221 # (missingheads is revs with secret changeset filtered out)
222 #
222 #
223 # This can be expressed as:
223 # This can be expressed as:
224 # cheads = ( (missingheads and ::commonheads)
224 # cheads = ( (missingheads and ::commonheads)
225 # + (commonheads and ::missingheads))"
225 # + (commonheads and ::missingheads))"
226 # )
226 # )
227 #
227 #
228 # while trying to push we already computed the following:
228 # while trying to push we already computed the following:
229 # common = (::commonheads)
229 # common = (::commonheads)
230 # missing = ((commonheads::missingheads) - commonheads)
230 # missing = ((commonheads::missingheads) - commonheads)
231 #
231 #
232 # We can pick:
232 # We can pick:
233 # * missingheads part of common (::commonheads)
233 # * missingheads part of common (::commonheads)
234 common = set(pushop.outgoing.common)
234 common = set(pushop.outgoing.common)
235 nm = pushop.repo.changelog.nodemap
235 nm = pushop.repo.changelog.nodemap
236 cheads = [node for node in pushop.revs if nm[node] in common]
236 cheads = [node for node in pushop.revs if nm[node] in common]
237 # and
237 # and
238 # * commonheads parents on missing
238 # * commonheads parents on missing
239 revset = unfi.set('%ln and parents(roots(%ln))',
239 revset = unfi.set('%ln and parents(roots(%ln))',
240 pushop.outgoing.commonheads,
240 pushop.outgoing.commonheads,
241 pushop.outgoing.missing)
241 pushop.outgoing.missing)
242 cheads.extend(c.node() for c in revset)
242 cheads.extend(c.node() for c in revset)
243 pushop.commonheads = cheads
243 pushop.commonheads = cheads
244
244
245 def _pushsyncphase(pushop):
245 def _pushsyncphase(pushop):
246 """synchronise phase information locally and remotly"""
246 """synchronise phase information locally and remotly"""
247 unfi = pushop.repo.unfiltered()
247 unfi = pushop.repo.unfiltered()
248 cheads = pushop.commonheads
248 cheads = pushop.commonheads
249 if pushop.ret:
249 if pushop.ret:
250 # push succeed, synchronize target of the push
250 # push succeed, synchronize target of the push
251 cheads = pushop.outgoing.missingheads
251 cheads = pushop.outgoing.missingheads
252 elif pushop.revs is None:
252 elif pushop.revs is None:
253 # All out push fails. synchronize all common
253 # All out push fails. synchronize all common
254 cheads = pushop.outgoing.commonheads
254 cheads = pushop.outgoing.commonheads
255 else:
255 else:
256 # I want cheads = heads(::missingheads and ::commonheads)
256 # I want cheads = heads(::missingheads and ::commonheads)
257 # (missingheads is revs with secret changeset filtered out)
257 # (missingheads is revs with secret changeset filtered out)
258 #
258 #
259 # This can be expressed as:
259 # This can be expressed as:
260 # cheads = ( (missingheads and ::commonheads)
260 # cheads = ( (missingheads and ::commonheads)
261 # + (commonheads and ::missingheads))"
261 # + (commonheads and ::missingheads))"
262 # )
262 # )
263 #
263 #
264 # while trying to push we already computed the following:
264 # while trying to push we already computed the following:
265 # common = (::commonheads)
265 # common = (::commonheads)
266 # missing = ((commonheads::missingheads) - commonheads)
266 # missing = ((commonheads::missingheads) - commonheads)
267 #
267 #
268 # We can pick:
268 # We can pick:
269 # * missingheads part of common (::commonheads)
269 # * missingheads part of common (::commonheads)
270 common = set(pushop.outgoing.common)
270 common = set(pushop.outgoing.common)
271 nm = pushop.repo.changelog.nodemap
271 nm = pushop.repo.changelog.nodemap
272 cheads = [node for node in pushop.revs if nm[node] in common]
272 cheads = [node for node in pushop.revs if nm[node] in common]
273 # and
273 # and
274 # * commonheads parents on missing
274 # * commonheads parents on missing
275 revset = unfi.set('%ln and parents(roots(%ln))',
275 revset = unfi.set('%ln and parents(roots(%ln))',
276 pushop.outgoing.commonheads,
276 pushop.outgoing.commonheads,
277 pushop.outgoing.missing)
277 pushop.outgoing.missing)
278 cheads.extend(c.node() for c in revset)
278 cheads.extend(c.node() for c in revset)
279 pushop.commonheads = cheads
279 pushop.commonheads = cheads
280 # even when we don't push, exchanging phase data is useful
280 # even when we don't push, exchanging phase data is useful
281 remotephases = pushop.remote.listkeys('phases')
281 remotephases = pushop.remote.listkeys('phases')
282 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
282 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
283 and remotephases # server supports phases
283 and remotephases # server supports phases
284 and pushop.ret is None # nothing was pushed
284 and pushop.ret is None # nothing was pushed
285 and remotephases.get('publishing', False)):
285 and remotephases.get('publishing', False)):
286 # When:
286 # When:
287 # - this is a subrepo push
287 # - this is a subrepo push
288 # - and remote support phase
288 # - and remote support phase
289 # - and no changeset was pushed
289 # - and no changeset was pushed
290 # - and remote is publishing
290 # - and remote is publishing
291 # We may be in issue 3871 case!
291 # We may be in issue 3871 case!
292 # We drop the possible phase synchronisation done by
292 # We drop the possible phase synchronisation done by
293 # courtesy to publish changesets possibly locally draft
293 # courtesy to publish changesets possibly locally draft
294 # on the remote.
294 # on the remote.
295 remotephases = {'publishing': 'True'}
295 remotephases = {'publishing': 'True'}
296 if not remotephases: # old server or public only rer
296 if not remotephases: # old server or public only rer
297 _localphasemove(pushop, cheads)
297 _localphasemove(pushop, cheads)
298 # don't push any phase data as there is nothing to push
298 # don't push any phase data as there is nothing to push
299 else:
299 else:
300 ana = phases.analyzeremotephases(pushop.repo, cheads,
300 ana = phases.analyzeremotephases(pushop.repo, cheads,
301 remotephases)
301 remotephases)
302 pheads, droots = ana
302 pheads, droots = ana
303 ### Apply remote phase on local
303 ### Apply remote phase on local
304 if remotephases.get('publishing', False):
304 if remotephases.get('publishing', False):
305 _localphasemove(pushop, cheads)
305 _localphasemove(pushop, cheads)
306 else: # publish = False
306 else: # publish = False
307 _localphasemove(pushop, pheads)
307 _localphasemove(pushop, pheads)
308 _localphasemove(pushop, cheads, phases.draft)
308 _localphasemove(pushop, cheads, phases.draft)
309 ### Apply local phase on remote
309 ### Apply local phase on remote
310
310
311 # Get the list of all revs draft on remote by public here.
311 # Get the list of all revs draft on remote by public here.
312 # XXX Beware that revset break if droots is not strictly
312 # XXX Beware that revset break if droots is not strictly
313 # XXX root we may want to ensure it is but it is costly
313 # XXX root we may want to ensure it is but it is costly
314 outdated = unfi.set('heads((%ln::%ln) and public())',
314 outdated = unfi.set('heads((%ln::%ln) and public())',
315 droots, cheads)
315 droots, cheads)
316 for newremotehead in outdated:
316 for newremotehead in outdated:
317 r = pushop.remote.pushkey('phases',
317 r = pushop.remote.pushkey('phases',
318 newremotehead.hex(),
318 newremotehead.hex(),
319 str(phases.draft),
319 str(phases.draft),
320 str(phases.public))
320 str(phases.public))
321 if not r:
321 if not r:
322 pushop.ui.warn(_('updating %s to public failed!\n')
322 pushop.ui.warn(_('updating %s to public failed!\n')
323 % newremotehead)
323 % newremotehead)
324
324
325 def _localphasemove(pushop, nodes, phase=phases.public):
325 def _localphasemove(pushop, nodes, phase=phases.public):
326 """move <nodes> to <phase> in the local source repo"""
326 """move <nodes> to <phase> in the local source repo"""
327 if pushop.locallocked:
327 if pushop.locallocked:
328 phases.advanceboundary(pushop.repo, phase, nodes)
328 phases.advanceboundary(pushop.repo, phase, nodes)
329 else:
329 else:
330 # repo is not locked, do not change any phases!
330 # repo is not locked, do not change any phases!
331 # Informs the user that phases should have been moved when
331 # Informs the user that phases should have been moved when
332 # applicable.
332 # applicable.
333 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
333 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
334 phasestr = phases.phasenames[phase]
334 phasestr = phases.phasenames[phase]
335 if actualmoves:
335 if actualmoves:
336 pushop.ui.status(_('cannot lock source repo, skipping '
336 pushop.ui.status(_('cannot lock source repo, skipping '
337 'local %s phase update\n') % phasestr)
337 'local %s phase update\n') % phasestr)
338
338
339 def _pushobsolete(pushop):
339 def _pushobsolete(pushop):
340 """utility function to push obsolete markers to a remote"""
340 """utility function to push obsolete markers to a remote"""
341 pushop.ui.debug('try to push obsolete markers to remote\n')
341 pushop.ui.debug('try to push obsolete markers to remote\n')
342 repo = pushop.repo
342 repo = pushop.repo
343 remote = pushop.remote
343 remote = pushop.remote
344 if (obsolete._enabled and repo.obsstore and
344 if (obsolete._enabled and repo.obsstore and
345 'obsolete' in remote.listkeys('namespaces')):
345 'obsolete' in remote.listkeys('namespaces')):
346 rslts = []
346 rslts = []
347 remotedata = repo.listkeys('obsolete')
347 remotedata = repo.listkeys('obsolete')
348 for key in sorted(remotedata, reverse=True):
348 for key in sorted(remotedata, reverse=True):
349 # reverse sort to ensure we end with dump0
349 # reverse sort to ensure we end with dump0
350 data = remotedata[key]
350 data = remotedata[key]
351 rslts.append(remote.pushkey('obsolete', key, '', data))
351 rslts.append(remote.pushkey('obsolete', key, '', data))
352 if [r for r in rslts if not r]:
352 if [r for r in rslts if not r]:
353 msg = _('failed to push some obsolete markers!\n')
353 msg = _('failed to push some obsolete markers!\n')
354 repo.ui.warn(msg)
354 repo.ui.warn(msg)
355
355
356 def _pushbookmark(pushop):
356 def _pushbookmark(pushop):
357 """Update bookmark position on remote"""
357 """Update bookmark position on remote"""
358 ui = pushop.ui
358 ui = pushop.ui
359 repo = pushop.repo.unfiltered()
359 repo = pushop.repo.unfiltered()
360 remote = pushop.remote
360 remote = pushop.remote
361 ui.debug("checking for updated bookmarks\n")
361 ui.debug("checking for updated bookmarks\n")
362 revnums = map(repo.changelog.rev, pushop.revs or [])
362 revnums = map(repo.changelog.rev, pushop.revs or [])
363 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
363 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
364 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
364 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
365 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
365 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
366 srchex=hex)
366 srchex=hex)
367
367
368 for b, scid, dcid in advsrc:
368 for b, scid, dcid in advsrc:
369 if ancestors and repo[scid].rev() not in ancestors:
369 if ancestors and repo[scid].rev() not in ancestors:
370 continue
370 continue
371 if remote.pushkey('bookmarks', b, dcid, scid):
371 if remote.pushkey('bookmarks', b, dcid, scid):
372 ui.status(_("updating bookmark %s\n") % b)
372 ui.status(_("updating bookmark %s\n") % b)
373 else:
373 else:
374 ui.warn(_('updating bookmark %s failed!\n') % b)
374 ui.warn(_('updating bookmark %s failed!\n') % b)
375
375
376 class pulloperation(object):
376 class pulloperation(object):
377 """A object that represent a single pull operation
377 """A object that represent a single pull operation
378
378
379 It purpose is to carry push related state and very common operation.
379 It purpose is to carry push related state and very common operation.
380
380
381 A new should be created at the begining of each push and discarded
381 A new should be created at the begining of each push and discarded
382 afterward.
382 afterward.
383 """
383 """
384
384
385 def __init__(self, repo, remote, heads=None, force=False):
385 def __init__(self, repo, remote, heads=None, force=False):
386 # repo we pull from
386 # repo we pull from
387 self.repo = repo
387 self.repo = repo
388 # repo we pull to
388 # repo we pull to
389 self.remote = remote
389 self.remote = remote
390 # revision we try to pull (None is "all")
390 # revision we try to pull (None is "all")
391 self.heads = heads
391 self.heads = heads
392 # do we force pull?
392 # do we force pull?
393 self.force = force
393 self.force = force
394
394
395 def pull(repo, remote, heads=None, force=False):
395 def pull(repo, remote, heads=None, force=False):
396 pullop = pulloperation(repo, remote, heads)
396 pullop = pulloperation(repo, remote, heads, force)
397 if pullop.remote.local():
397 if pullop.remote.local():
398 missing = set(pullop.remote.requirements) - pullop.repo.supported
398 missing = set(pullop.remote.requirements) - pullop.repo.supported
399 if missing:
399 if missing:
400 msg = _("required features are not"
400 msg = _("required features are not"
401 " supported in the destination:"
401 " supported in the destination:"
402 " %s") % (', '.join(sorted(missing)))
402 " %s") % (', '.join(sorted(missing)))
403 raise util.Abort(msg)
403 raise util.Abort(msg)
404
404
405 # don't open transaction for nothing or you break future useful
405 # don't open transaction for nothing or you break future useful
406 # rollback call
406 # rollback call
407 tr = None
407 tr = None
408 trname = 'pull\n' + util.hidepassword(pullop.remote.url())
408 trname = 'pull\n' + util.hidepassword(pullop.remote.url())
409 lock = pullop.repo.lock()
409 lock = pullop.repo.lock()
410 try:
410 try:
411 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
411 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
412 pullop.remote,
412 pullop.remote,
413 heads=pullop.heads,
413 heads=pullop.heads,
414 force=force)
414 force=force)
415 common, fetch, rheads = tmp
415 common, fetch, rheads = tmp
416 if not fetch:
416 if not fetch:
417 pullop.repo.ui.status(_("no changes found\n"))
417 pullop.repo.ui.status(_("no changes found\n"))
418 result = 0
418 result = 0
419 else:
419 else:
420 tr = pullop.repo.transaction(trname)
420 tr = pullop.repo.transaction(trname)
421 if pullop.heads is None and list(common) == [nullid]:
421 if pullop.heads is None and list(common) == [nullid]:
422 pullop.repo.ui.status(_("requesting all changes\n"))
422 pullop.repo.ui.status(_("requesting all changes\n"))
423 elif (pullop.heads is None
423 elif (pullop.heads is None
424 and pullop.remote.capable('changegroupsubset')):
424 and pullop.remote.capable('changegroupsubset')):
425 # issue1320, avoid a race if remote changed after discovery
425 # issue1320, avoid a race if remote changed after discovery
426 pullop.heads = rheads
426 pullop.heads = rheads
427
427
428 if pullop.remote.capable('getbundle'):
428 if pullop.remote.capable('getbundle'):
429 # TODO: get bundlecaps from remote
429 # TODO: get bundlecaps from remote
430 cg = pullop.remote.getbundle('pull', common=common,
430 cg = pullop.remote.getbundle('pull', common=common,
431 heads=pullop.heads or rheads)
431 heads=pullop.heads or rheads)
432 elif pullop.heads is None:
432 elif pullop.heads is None:
433 cg = pullop.remote.changegroup(fetch, 'pull')
433 cg = pullop.remote.changegroup(fetch, 'pull')
434 elif not pullop.remote.capable('changegroupsubset'):
434 elif not pullop.remote.capable('changegroupsubset'):
435 raise util.Abort(_("partial pull cannot be done because "
435 raise util.Abort(_("partial pull cannot be done because "
436 "other repository doesn't support "
436 "other repository doesn't support "
437 "changegroupsubset."))
437 "changegroupsubset."))
438 else:
438 else:
439 cg = pullop.remote.changegroupsubset(fetch, pullop.heads,
439 cg = pullop.remote.changegroupsubset(fetch, pullop.heads,
440 'pull')
440 'pull')
441 result = pullop.repo.addchangegroup(cg, 'pull',
441 result = pullop.repo.addchangegroup(cg, 'pull',
442 pullop.remote.url())
442 pullop.remote.url())
443
443
444 # compute target subset
444 # compute target subset
445 if pullop.heads is None:
445 if pullop.heads is None:
446 # We pulled every thing possible
446 # We pulled every thing possible
447 # sync on everything common
447 # sync on everything common
448 subset = common + rheads
448 subset = common + rheads
449 else:
449 else:
450 # We pulled a specific subset
450 # We pulled a specific subset
451 # sync on this subset
451 # sync on this subset
452 subset = pullop.heads
452 subset = pullop.heads
453
453
454 # Get remote phases data from remote
454 # Get remote phases data from remote
455 remotephases = pullop.remote.listkeys('phases')
455 remotephases = pullop.remote.listkeys('phases')
456 publishing = bool(remotephases.get('publishing', False))
456 publishing = bool(remotephases.get('publishing', False))
457 if remotephases and not publishing:
457 if remotephases and not publishing:
458 # remote is new and unpublishing
458 # remote is new and unpublishing
459 pheads, _dr = phases.analyzeremotephases(pullop.repo, subset,
459 pheads, _dr = phases.analyzeremotephases(pullop.repo, subset,
460 remotephases)
460 remotephases)
461 phases.advanceboundary(pullop.repo, phases.public, pheads)
461 phases.advanceboundary(pullop.repo, phases.public, pheads)
462 phases.advanceboundary(pullop.repo, phases.draft, subset)
462 phases.advanceboundary(pullop.repo, phases.draft, subset)
463 else:
463 else:
464 # Remote is old or publishing all common changesets
464 # Remote is old or publishing all common changesets
465 # should be seen as public
465 # should be seen as public
466 phases.advanceboundary(pullop.repo, phases.public, subset)
466 phases.advanceboundary(pullop.repo, phases.public, subset)
467
467
468 def gettransaction():
468 def gettransaction():
469 if tr is None:
469 if tr is None:
470 return pullop.repo.transaction(trname)
470 return pullop.repo.transaction(trname)
471 return tr
471 return tr
472
472
473 obstr = obsolete.syncpull(pullop.repo, pullop.remote, gettransaction)
473 obstr = _pullobsolete(pullop.repo, pullop.remote, gettransaction)
474 if obstr is not None:
474 if obstr is not None:
475 tr = obstr
475 tr = obstr
476
476
477 if tr is not None:
477 if tr is not None:
478 tr.close()
478 tr.close()
479 finally:
479 finally:
480 if tr is not None:
480 if tr is not None:
481 tr.release()
481 tr.release()
482 lock.release()
482 lock.release()
483
483
484 return result
484 return result
485
486 def _pullobsolete(repo, remote, gettransaction):
487 """utility function to pull obsolete markers from a remote
488
489 The `gettransaction` is function that return the pull transaction, creating
490 one if necessary. We return the transaction to inform the calling code that
491 a new transaction have been created (when applicable).
492
493 Exists mostly to allow overriding for experimentation purpose"""
494 tr = None
495 if obsolete._enabled:
496 repo.ui.debug('fetching remote obsolete markers\n')
497 remoteobs = remote.listkeys('obsolete')
498 if 'dump0' in remoteobs:
499 tr = gettransaction()
500 for key in sorted(remoteobs, reverse=True):
501 if key.startswith('dump'):
502 data = base85.b85decode(remoteobs[key])
503 repo.obsstore.mergemarkers(tr, data)
504 repo.invalidatevolatilesets()
505 return tr
506
@@ -1,864 +1,843 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete markers handling
9 """Obsolete markers handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewriting operations, and help
17 transformations performed by history rewriting operations, and help
18 building new tools to reconciliate conflicting rewriting actions. To
18 building new tools to reconciliate conflicting rewriting actions. To
19 facilitate conflicts resolution, markers include various annotations
19 facilitate conflicts resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called "precursor" and possible replacements are
23 The old obsoleted changeset is called "precursor" and possible replacements are
24 called "successors". Markers that used changeset X as a precursors are called
24 called "successors". Markers that used changeset X as a precursors are called
25 "successor markers of X" because they hold information about the successors of
25 "successor markers of X" because they hold information about the successors of
26 X. Markers that use changeset Y as a successors are call "precursor markers of
26 X. Markers that use changeset Y as a successors are call "precursor markers of
27 Y" because they hold information about the precursors of Y.
27 Y" because they hold information about the precursors of Y.
28
28
29 Examples:
29 Examples:
30
30
31 - When changeset A is replacement by a changeset A', one marker is stored:
31 - When changeset A is replacement by a changeset A', one marker is stored:
32
32
33 (A, (A'))
33 (A, (A'))
34
34
35 - When changesets A and B are folded into a new changeset C two markers are
35 - When changesets A and B are folded into a new changeset C two markers are
36 stored:
36 stored:
37
37
38 (A, (C,)) and (B, (C,))
38 (A, (C,)) and (B, (C,))
39
39
40 - When changeset A is simply "pruned" from the graph, a marker in create:
40 - When changeset A is simply "pruned" from the graph, a marker in create:
41
41
42 (A, ())
42 (A, ())
43
43
44 - When changeset A is split into B and C, a single marker are used:
44 - When changeset A is split into B and C, a single marker are used:
45
45
46 (A, (C, C))
46 (A, (C, C))
47
47
48 We use a single marker to distinct the "split" case from the "divergence"
48 We use a single marker to distinct the "split" case from the "divergence"
49 case. If two independents operation rewrite the same changeset A in to A' and
49 case. If two independents operation rewrite the same changeset A in to A' and
50 A'' when have an error case: divergent rewriting. We can detect it because
50 A'' when have an error case: divergent rewriting. We can detect it because
51 two markers will be created independently:
51 two markers will be created independently:
52
52
53 (A, (B,)) and (A, (C,))
53 (A, (B,)) and (A, (C,))
54
54
55 Format
55 Format
56 ------
56 ------
57
57
58 Markers are stored in an append-only file stored in
58 Markers are stored in an append-only file stored in
59 '.hg/store/obsstore'.
59 '.hg/store/obsstore'.
60
60
61 The file starts with a version header:
61 The file starts with a version header:
62
62
63 - 1 unsigned byte: version number, starting at zero.
63 - 1 unsigned byte: version number, starting at zero.
64
64
65
65
66 The header is followed by the markers. Each marker is made of:
66 The header is followed by the markers. Each marker is made of:
67
67
68 - 1 unsigned byte: number of new changesets "R", could be zero.
68 - 1 unsigned byte: number of new changesets "R", could be zero.
69
69
70 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
70 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
71
71
72 - 1 byte: a bit field. It is reserved for flags used in obsolete
72 - 1 byte: a bit field. It is reserved for flags used in obsolete
73 markers common operations, to avoid repeated decoding of metadata
73 markers common operations, to avoid repeated decoding of metadata
74 entries.
74 entries.
75
75
76 - 20 bytes: obsoleted changeset identifier.
76 - 20 bytes: obsoleted changeset identifier.
77
77
78 - N*20 bytes: new changesets identifiers.
78 - N*20 bytes: new changesets identifiers.
79
79
80 - M bytes: metadata as a sequence of nul-terminated strings. Each
80 - M bytes: metadata as a sequence of nul-terminated strings. Each
81 string contains a key and a value, separated by a color ':', without
81 string contains a key and a value, separated by a color ':', without
82 additional encoding. Keys cannot contain '\0' or ':' and values
82 additional encoding. Keys cannot contain '\0' or ':' and values
83 cannot contain '\0'.
83 cannot contain '\0'.
84 """
84 """
85 import struct
85 import struct
86 import util, base85, node
86 import util, base85, node
87 import phases
87 import phases
88 from i18n import _
88 from i18n import _
89
89
90 _pack = struct.pack
90 _pack = struct.pack
91 _unpack = struct.unpack
91 _unpack = struct.unpack
92
92
93 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
93 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
94
94
95 # the obsolete feature is not mature enough to be enabled by default.
95 # the obsolete feature is not mature enough to be enabled by default.
96 # you have to rely on third party extension extension to enable this.
96 # you have to rely on third party extension extension to enable this.
97 _enabled = False
97 _enabled = False
98
98
99 # data used for parsing and writing
99 # data used for parsing and writing
100 _fmversion = 0
100 _fmversion = 0
101 _fmfixed = '>BIB20s'
101 _fmfixed = '>BIB20s'
102 _fmnode = '20s'
102 _fmnode = '20s'
103 _fmfsize = struct.calcsize(_fmfixed)
103 _fmfsize = struct.calcsize(_fmfixed)
104 _fnodesize = struct.calcsize(_fmnode)
104 _fnodesize = struct.calcsize(_fmnode)
105
105
106 ### obsolescence marker flag
106 ### obsolescence marker flag
107
107
108 ## bumpedfix flag
108 ## bumpedfix flag
109 #
109 #
110 # When a changeset A' succeed to a changeset A which became public, we call A'
110 # When a changeset A' succeed to a changeset A which became public, we call A'
111 # "bumped" because it's a successors of a public changesets
111 # "bumped" because it's a successors of a public changesets
112 #
112 #
113 # o A' (bumped)
113 # o A' (bumped)
114 # |`:
114 # |`:
115 # | o A
115 # | o A
116 # |/
116 # |/
117 # o Z
117 # o Z
118 #
118 #
119 # The way to solve this situation is to create a new changeset Ad as children
119 # The way to solve this situation is to create a new changeset Ad as children
120 # of A. This changeset have the same content than A'. So the diff from A to A'
120 # of A. This changeset have the same content than A'. So the diff from A to A'
121 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
121 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
122 #
122 #
123 # o Ad
123 # o Ad
124 # |`:
124 # |`:
125 # | x A'
125 # | x A'
126 # |'|
126 # |'|
127 # o | A
127 # o | A
128 # |/
128 # |/
129 # o Z
129 # o Z
130 #
130 #
131 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
131 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
132 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
132 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
133 # This flag mean that the successors express the changes between the public and
133 # This flag mean that the successors express the changes between the public and
134 # bumped version and fix the situation, breaking the transitivity of
134 # bumped version and fix the situation, breaking the transitivity of
135 # "bumped" here.
135 # "bumped" here.
136 bumpedfix = 1
136 bumpedfix = 1
137
137
138 def _readmarkers(data):
138 def _readmarkers(data):
139 """Read and enumerate markers from raw data"""
139 """Read and enumerate markers from raw data"""
140 off = 0
140 off = 0
141 diskversion = _unpack('>B', data[off:off + 1])[0]
141 diskversion = _unpack('>B', data[off:off + 1])[0]
142 off += 1
142 off += 1
143 if diskversion != _fmversion:
143 if diskversion != _fmversion:
144 raise util.Abort(_('parsing obsolete marker: unknown version %r')
144 raise util.Abort(_('parsing obsolete marker: unknown version %r')
145 % diskversion)
145 % diskversion)
146
146
147 # Loop on markers
147 # Loop on markers
148 l = len(data)
148 l = len(data)
149 while off + _fmfsize <= l:
149 while off + _fmfsize <= l:
150 # read fixed part
150 # read fixed part
151 cur = data[off:off + _fmfsize]
151 cur = data[off:off + _fmfsize]
152 off += _fmfsize
152 off += _fmfsize
153 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
153 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
154 # read replacement
154 # read replacement
155 sucs = ()
155 sucs = ()
156 if nbsuc:
156 if nbsuc:
157 s = (_fnodesize * nbsuc)
157 s = (_fnodesize * nbsuc)
158 cur = data[off:off + s]
158 cur = data[off:off + s]
159 sucs = _unpack(_fmnode * nbsuc, cur)
159 sucs = _unpack(_fmnode * nbsuc, cur)
160 off += s
160 off += s
161 # read metadata
161 # read metadata
162 # (metadata will be decoded on demand)
162 # (metadata will be decoded on demand)
163 metadata = data[off:off + mdsize]
163 metadata = data[off:off + mdsize]
164 if len(metadata) != mdsize:
164 if len(metadata) != mdsize:
165 raise util.Abort(_('parsing obsolete marker: metadata is too '
165 raise util.Abort(_('parsing obsolete marker: metadata is too '
166 'short, %d bytes expected, got %d')
166 'short, %d bytes expected, got %d')
167 % (mdsize, len(metadata)))
167 % (mdsize, len(metadata)))
168 off += mdsize
168 off += mdsize
169 yield (pre, sucs, flags, metadata)
169 yield (pre, sucs, flags, metadata)
170
170
171 def encodemeta(meta):
171 def encodemeta(meta):
172 """Return encoded metadata string to string mapping.
172 """Return encoded metadata string to string mapping.
173
173
174 Assume no ':' in key and no '\0' in both key and value."""
174 Assume no ':' in key and no '\0' in both key and value."""
175 for key, value in meta.iteritems():
175 for key, value in meta.iteritems():
176 if ':' in key or '\0' in key:
176 if ':' in key or '\0' in key:
177 raise ValueError("':' and '\0' are forbidden in metadata key'")
177 raise ValueError("':' and '\0' are forbidden in metadata key'")
178 if '\0' in value:
178 if '\0' in value:
179 raise ValueError("':' are forbidden in metadata value'")
179 raise ValueError("':' are forbidden in metadata value'")
180 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
180 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
181
181
182 def decodemeta(data):
182 def decodemeta(data):
183 """Return string to string dictionary from encoded version."""
183 """Return string to string dictionary from encoded version."""
184 d = {}
184 d = {}
185 for l in data.split('\0'):
185 for l in data.split('\0'):
186 if l:
186 if l:
187 key, value = l.split(':')
187 key, value = l.split(':')
188 d[key] = value
188 d[key] = value
189 return d
189 return d
190
190
191 class marker(object):
191 class marker(object):
192 """Wrap obsolete marker raw data"""
192 """Wrap obsolete marker raw data"""
193
193
194 def __init__(self, repo, data):
194 def __init__(self, repo, data):
195 # the repo argument will be used to create changectx in later version
195 # the repo argument will be used to create changectx in later version
196 self._repo = repo
196 self._repo = repo
197 self._data = data
197 self._data = data
198 self._decodedmeta = None
198 self._decodedmeta = None
199
199
200 def __hash__(self):
200 def __hash__(self):
201 return hash(self._data)
201 return hash(self._data)
202
202
203 def __eq__(self, other):
203 def __eq__(self, other):
204 if type(other) != type(self):
204 if type(other) != type(self):
205 return False
205 return False
206 return self._data == other._data
206 return self._data == other._data
207
207
208 def precnode(self):
208 def precnode(self):
209 """Precursor changeset node identifier"""
209 """Precursor changeset node identifier"""
210 return self._data[0]
210 return self._data[0]
211
211
212 def succnodes(self):
212 def succnodes(self):
213 """List of successor changesets node identifiers"""
213 """List of successor changesets node identifiers"""
214 return self._data[1]
214 return self._data[1]
215
215
216 def metadata(self):
216 def metadata(self):
217 """Decoded metadata dictionary"""
217 """Decoded metadata dictionary"""
218 if self._decodedmeta is None:
218 if self._decodedmeta is None:
219 self._decodedmeta = decodemeta(self._data[3])
219 self._decodedmeta = decodemeta(self._data[3])
220 return self._decodedmeta
220 return self._decodedmeta
221
221
222 def date(self):
222 def date(self):
223 """Creation date as (unixtime, offset)"""
223 """Creation date as (unixtime, offset)"""
224 parts = self.metadata()['date'].split(' ')
224 parts = self.metadata()['date'].split(' ')
225 return (float(parts[0]), int(parts[1]))
225 return (float(parts[0]), int(parts[1]))
226
226
227 class obsstore(object):
227 class obsstore(object):
228 """Store obsolete markers
228 """Store obsolete markers
229
229
230 Markers can be accessed with two mappings:
230 Markers can be accessed with two mappings:
231 - precursors[x] -> set(markers on precursors edges of x)
231 - precursors[x] -> set(markers on precursors edges of x)
232 - successors[x] -> set(markers on successors edges of x)
232 - successors[x] -> set(markers on successors edges of x)
233 """
233 """
234
234
235 def __init__(self, sopener):
235 def __init__(self, sopener):
236 # caches for various obsolescence related cache
236 # caches for various obsolescence related cache
237 self.caches = {}
237 self.caches = {}
238 self._all = []
238 self._all = []
239 # new markers to serialize
239 # new markers to serialize
240 self.precursors = {}
240 self.precursors = {}
241 self.successors = {}
241 self.successors = {}
242 self.sopener = sopener
242 self.sopener = sopener
243 data = sopener.tryread('obsstore')
243 data = sopener.tryread('obsstore')
244 if data:
244 if data:
245 self._load(_readmarkers(data))
245 self._load(_readmarkers(data))
246
246
247 def __iter__(self):
247 def __iter__(self):
248 return iter(self._all)
248 return iter(self._all)
249
249
250 def __nonzero__(self):
250 def __nonzero__(self):
251 return bool(self._all)
251 return bool(self._all)
252
252
253 def create(self, transaction, prec, succs=(), flag=0, metadata=None):
253 def create(self, transaction, prec, succs=(), flag=0, metadata=None):
254 """obsolete: add a new obsolete marker
254 """obsolete: add a new obsolete marker
255
255
256 * ensuring it is hashable
256 * ensuring it is hashable
257 * check mandatory metadata
257 * check mandatory metadata
258 * encode metadata
258 * encode metadata
259 """
259 """
260 if metadata is None:
260 if metadata is None:
261 metadata = {}
261 metadata = {}
262 if 'date' not in metadata:
262 if 'date' not in metadata:
263 metadata['date'] = "%d %d" % util.makedate()
263 metadata['date'] = "%d %d" % util.makedate()
264 if len(prec) != 20:
264 if len(prec) != 20:
265 raise ValueError(prec)
265 raise ValueError(prec)
266 for succ in succs:
266 for succ in succs:
267 if len(succ) != 20:
267 if len(succ) != 20:
268 raise ValueError(succ)
268 raise ValueError(succ)
269 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
269 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
270 self.add(transaction, [marker])
270 self.add(transaction, [marker])
271
271
272 def add(self, transaction, markers):
272 def add(self, transaction, markers):
273 """Add new markers to the store
273 """Add new markers to the store
274
274
275 Take care of filtering duplicate.
275 Take care of filtering duplicate.
276 Return the number of new marker."""
276 Return the number of new marker."""
277 if not _enabled:
277 if not _enabled:
278 raise util.Abort('obsolete feature is not enabled on this repo')
278 raise util.Abort('obsolete feature is not enabled on this repo')
279 known = set(self._all)
279 known = set(self._all)
280 new = []
280 new = []
281 for m in markers:
281 for m in markers:
282 if m not in known:
282 if m not in known:
283 known.add(m)
283 known.add(m)
284 new.append(m)
284 new.append(m)
285 if new:
285 if new:
286 f = self.sopener('obsstore', 'ab')
286 f = self.sopener('obsstore', 'ab')
287 try:
287 try:
288 # Whether the file's current position is at the begin or at
288 # Whether the file's current position is at the begin or at
289 # the end after opening a file for appending is implementation
289 # the end after opening a file for appending is implementation
290 # defined. So we must seek to the end before calling tell(),
290 # defined. So we must seek to the end before calling tell(),
291 # or we may get a zero offset for non-zero sized files on
291 # or we may get a zero offset for non-zero sized files on
292 # some platforms (issue3543).
292 # some platforms (issue3543).
293 f.seek(0, _SEEK_END)
293 f.seek(0, _SEEK_END)
294 offset = f.tell()
294 offset = f.tell()
295 transaction.add('obsstore', offset)
295 transaction.add('obsstore', offset)
296 # offset == 0: new file - add the version header
296 # offset == 0: new file - add the version header
297 for bytes in _encodemarkers(new, offset == 0):
297 for bytes in _encodemarkers(new, offset == 0):
298 f.write(bytes)
298 f.write(bytes)
299 finally:
299 finally:
300 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
300 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
301 # call 'filecacheentry.refresh()' here
301 # call 'filecacheentry.refresh()' here
302 f.close()
302 f.close()
303 self._load(new)
303 self._load(new)
304 # new marker *may* have changed several set. invalidate the cache.
304 # new marker *may* have changed several set. invalidate the cache.
305 self.caches.clear()
305 self.caches.clear()
306 return len(new)
306 return len(new)
307
307
308 def mergemarkers(self, transaction, data):
308 def mergemarkers(self, transaction, data):
309 markers = _readmarkers(data)
309 markers = _readmarkers(data)
310 self.add(transaction, markers)
310 self.add(transaction, markers)
311
311
312 def _load(self, markers):
312 def _load(self, markers):
313 for mark in markers:
313 for mark in markers:
314 self._all.append(mark)
314 self._all.append(mark)
315 pre, sucs = mark[:2]
315 pre, sucs = mark[:2]
316 self.successors.setdefault(pre, set()).add(mark)
316 self.successors.setdefault(pre, set()).add(mark)
317 for suc in sucs:
317 for suc in sucs:
318 self.precursors.setdefault(suc, set()).add(mark)
318 self.precursors.setdefault(suc, set()).add(mark)
319 if node.nullid in self.precursors:
319 if node.nullid in self.precursors:
320 raise util.Abort(_('bad obsolescence marker detected: '
320 raise util.Abort(_('bad obsolescence marker detected: '
321 'invalid successors nullid'))
321 'invalid successors nullid'))
322
322
323 def _encodemarkers(markers, addheader=False):
323 def _encodemarkers(markers, addheader=False):
324 # Kept separate from flushmarkers(), it will be reused for
324 # Kept separate from flushmarkers(), it will be reused for
325 # markers exchange.
325 # markers exchange.
326 if addheader:
326 if addheader:
327 yield _pack('>B', _fmversion)
327 yield _pack('>B', _fmversion)
328 for marker in markers:
328 for marker in markers:
329 yield _encodeonemarker(marker)
329 yield _encodeonemarker(marker)
330
330
331
331
332 def _encodeonemarker(marker):
332 def _encodeonemarker(marker):
333 pre, sucs, flags, metadata = marker
333 pre, sucs, flags, metadata = marker
334 nbsuc = len(sucs)
334 nbsuc = len(sucs)
335 format = _fmfixed + (_fmnode * nbsuc)
335 format = _fmfixed + (_fmnode * nbsuc)
336 data = [nbsuc, len(metadata), flags, pre]
336 data = [nbsuc, len(metadata), flags, pre]
337 data.extend(sucs)
337 data.extend(sucs)
338 return _pack(format, *data) + metadata
338 return _pack(format, *data) + metadata
339
339
340 # arbitrary picked to fit into 8K limit from HTTP server
340 # arbitrary picked to fit into 8K limit from HTTP server
341 # you have to take in account:
341 # you have to take in account:
342 # - the version header
342 # - the version header
343 # - the base85 encoding
343 # - the base85 encoding
344 _maxpayload = 5300
344 _maxpayload = 5300
345
345
346 def listmarkers(repo):
346 def listmarkers(repo):
347 """List markers over pushkey"""
347 """List markers over pushkey"""
348 if not repo.obsstore:
348 if not repo.obsstore:
349 return {}
349 return {}
350 keys = {}
350 keys = {}
351 parts = []
351 parts = []
352 currentlen = _maxpayload * 2 # ensure we create a new part
352 currentlen = _maxpayload * 2 # ensure we create a new part
353 for marker in repo.obsstore:
353 for marker in repo.obsstore:
354 nextdata = _encodeonemarker(marker)
354 nextdata = _encodeonemarker(marker)
355 if (len(nextdata) + currentlen > _maxpayload):
355 if (len(nextdata) + currentlen > _maxpayload):
356 currentpart = []
356 currentpart = []
357 currentlen = 0
357 currentlen = 0
358 parts.append(currentpart)
358 parts.append(currentpart)
359 currentpart.append(nextdata)
359 currentpart.append(nextdata)
360 currentlen += len(nextdata)
360 currentlen += len(nextdata)
361 for idx, part in enumerate(reversed(parts)):
361 for idx, part in enumerate(reversed(parts)):
362 data = ''.join([_pack('>B', _fmversion)] + part)
362 data = ''.join([_pack('>B', _fmversion)] + part)
363 keys['dump%i' % idx] = base85.b85encode(data)
363 keys['dump%i' % idx] = base85.b85encode(data)
364 return keys
364 return keys
365
365
366 def pushmarker(repo, key, old, new):
366 def pushmarker(repo, key, old, new):
367 """Push markers over pushkey"""
367 """Push markers over pushkey"""
368 if not key.startswith('dump'):
368 if not key.startswith('dump'):
369 repo.ui.warn(_('unknown key: %r') % key)
369 repo.ui.warn(_('unknown key: %r') % key)
370 return 0
370 return 0
371 if old:
371 if old:
372 repo.ui.warn(_('unexpected old value') % key)
372 repo.ui.warn(_('unexpected old value') % key)
373 return 0
373 return 0
374 data = base85.b85decode(new)
374 data = base85.b85decode(new)
375 lock = repo.lock()
375 lock = repo.lock()
376 try:
376 try:
377 tr = repo.transaction('pushkey: obsolete markers')
377 tr = repo.transaction('pushkey: obsolete markers')
378 try:
378 try:
379 repo.obsstore.mergemarkers(tr, data)
379 repo.obsstore.mergemarkers(tr, data)
380 tr.close()
380 tr.close()
381 return 1
381 return 1
382 finally:
382 finally:
383 tr.release()
383 tr.release()
384 finally:
384 finally:
385 lock.release()
385 lock.release()
386
386
387 def syncpull(repo, remote, gettransaction):
388 """utility function to pull obsolete markers from a remote
389
390 The `gettransaction` is function that return the pull transaction, creating
391 one if necessary. We return the transaction to inform the calling code that
392 a new transaction have been created (when applicable).
393
394 Exists mostly to allow overriding for experimentation purpose"""
395 tr = None
396 if _enabled:
397 repo.ui.debug('fetching remote obsolete markers\n')
398 remoteobs = remote.listkeys('obsolete')
399 if 'dump0' in remoteobs:
400 tr = gettransaction()
401 for key in sorted(remoteobs, reverse=True):
402 if key.startswith('dump'):
403 data = base85.b85decode(remoteobs[key])
404 repo.obsstore.mergemarkers(tr, data)
405 repo.invalidatevolatilesets()
406 return tr
407
408 def allmarkers(repo):
387 def allmarkers(repo):
409 """all obsolete markers known in a repository"""
388 """all obsolete markers known in a repository"""
410 for markerdata in repo.obsstore:
389 for markerdata in repo.obsstore:
411 yield marker(repo, markerdata)
390 yield marker(repo, markerdata)
412
391
413 def precursormarkers(ctx):
392 def precursormarkers(ctx):
414 """obsolete marker marking this changeset as a successors"""
393 """obsolete marker marking this changeset as a successors"""
415 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
394 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
416 yield marker(ctx._repo, data)
395 yield marker(ctx._repo, data)
417
396
418 def successormarkers(ctx):
397 def successormarkers(ctx):
419 """obsolete marker making this changeset obsolete"""
398 """obsolete marker making this changeset obsolete"""
420 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
399 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
421 yield marker(ctx._repo, data)
400 yield marker(ctx._repo, data)
422
401
423 def allsuccessors(obsstore, nodes, ignoreflags=0):
402 def allsuccessors(obsstore, nodes, ignoreflags=0):
424 """Yield node for every successor of <nodes>.
403 """Yield node for every successor of <nodes>.
425
404
426 Some successors may be unknown locally.
405 Some successors may be unknown locally.
427
406
428 This is a linear yield unsuited to detecting split changesets. It includes
407 This is a linear yield unsuited to detecting split changesets. It includes
429 initial nodes too."""
408 initial nodes too."""
430 remaining = set(nodes)
409 remaining = set(nodes)
431 seen = set(remaining)
410 seen = set(remaining)
432 while remaining:
411 while remaining:
433 current = remaining.pop()
412 current = remaining.pop()
434 yield current
413 yield current
435 for mark in obsstore.successors.get(current, ()):
414 for mark in obsstore.successors.get(current, ()):
436 # ignore marker flagged with specified flag
415 # ignore marker flagged with specified flag
437 if mark[2] & ignoreflags:
416 if mark[2] & ignoreflags:
438 continue
417 continue
439 for suc in mark[1]:
418 for suc in mark[1]:
440 if suc not in seen:
419 if suc not in seen:
441 seen.add(suc)
420 seen.add(suc)
442 remaining.add(suc)
421 remaining.add(suc)
443
422
444 def allprecursors(obsstore, nodes, ignoreflags=0):
423 def allprecursors(obsstore, nodes, ignoreflags=0):
445 """Yield node for every precursors of <nodes>.
424 """Yield node for every precursors of <nodes>.
446
425
447 Some precursors may be unknown locally.
426 Some precursors may be unknown locally.
448
427
449 This is a linear yield unsuited to detecting folded changesets. It includes
428 This is a linear yield unsuited to detecting folded changesets. It includes
450 initial nodes too."""
429 initial nodes too."""
451
430
452 remaining = set(nodes)
431 remaining = set(nodes)
453 seen = set(remaining)
432 seen = set(remaining)
454 while remaining:
433 while remaining:
455 current = remaining.pop()
434 current = remaining.pop()
456 yield current
435 yield current
457 for mark in obsstore.precursors.get(current, ()):
436 for mark in obsstore.precursors.get(current, ()):
458 # ignore marker flagged with specified flag
437 # ignore marker flagged with specified flag
459 if mark[2] & ignoreflags:
438 if mark[2] & ignoreflags:
460 continue
439 continue
461 suc = mark[0]
440 suc = mark[0]
462 if suc not in seen:
441 if suc not in seen:
463 seen.add(suc)
442 seen.add(suc)
464 remaining.add(suc)
443 remaining.add(suc)
465
444
466 def foreground(repo, nodes):
445 def foreground(repo, nodes):
467 """return all nodes in the "foreground" of other node
446 """return all nodes in the "foreground" of other node
468
447
469 The foreground of a revision is anything reachable using parent -> children
448 The foreground of a revision is anything reachable using parent -> children
470 or precursor -> successor relation. It is very similar to "descendant" but
449 or precursor -> successor relation. It is very similar to "descendant" but
471 augmented with obsolescence information.
450 augmented with obsolescence information.
472
451
473 Beware that possible obsolescence cycle may result if complex situation.
452 Beware that possible obsolescence cycle may result if complex situation.
474 """
453 """
475 repo = repo.unfiltered()
454 repo = repo.unfiltered()
476 foreground = set(repo.set('%ln::', nodes))
455 foreground = set(repo.set('%ln::', nodes))
477 if repo.obsstore:
456 if repo.obsstore:
478 # We only need this complicated logic if there is obsolescence
457 # We only need this complicated logic if there is obsolescence
479 # XXX will probably deserve an optimised revset.
458 # XXX will probably deserve an optimised revset.
480 nm = repo.changelog.nodemap
459 nm = repo.changelog.nodemap
481 plen = -1
460 plen = -1
482 # compute the whole set of successors or descendants
461 # compute the whole set of successors or descendants
483 while len(foreground) != plen:
462 while len(foreground) != plen:
484 plen = len(foreground)
463 plen = len(foreground)
485 succs = set(c.node() for c in foreground)
464 succs = set(c.node() for c in foreground)
486 mutable = [c.node() for c in foreground if c.mutable()]
465 mutable = [c.node() for c in foreground if c.mutable()]
487 succs.update(allsuccessors(repo.obsstore, mutable))
466 succs.update(allsuccessors(repo.obsstore, mutable))
488 known = (n for n in succs if n in nm)
467 known = (n for n in succs if n in nm)
489 foreground = set(repo.set('%ln::', known))
468 foreground = set(repo.set('%ln::', known))
490 return set(c.node() for c in foreground)
469 return set(c.node() for c in foreground)
491
470
492
471
493 def successorssets(repo, initialnode, cache=None):
472 def successorssets(repo, initialnode, cache=None):
494 """Return all set of successors of initial nodes
473 """Return all set of successors of initial nodes
495
474
496 The successors set of a changeset A are a group of revisions that succeed
475 The successors set of a changeset A are a group of revisions that succeed
497 A. It succeeds A as a consistent whole, each revision being only a partial
476 A. It succeeds A as a consistent whole, each revision being only a partial
498 replacement. The successors set contains non-obsolete changesets only.
477 replacement. The successors set contains non-obsolete changesets only.
499
478
500 This function returns the full list of successor sets which is why it
479 This function returns the full list of successor sets which is why it
501 returns a list of tuples and not just a single tuple. Each tuple is a valid
480 returns a list of tuples and not just a single tuple. Each tuple is a valid
502 successors set. Not that (A,) may be a valid successors set for changeset A
481 successors set. Not that (A,) may be a valid successors set for changeset A
503 (see below).
482 (see below).
504
483
505 In most cases, a changeset A will have a single element (e.g. the changeset
484 In most cases, a changeset A will have a single element (e.g. the changeset
506 A is replaced by A') in its successors set. Though, it is also common for a
485 A is replaced by A') in its successors set. Though, it is also common for a
507 changeset A to have no elements in its successor set (e.g. the changeset
486 changeset A to have no elements in its successor set (e.g. the changeset
508 has been pruned). Therefore, the returned list of successors sets will be
487 has been pruned). Therefore, the returned list of successors sets will be
509 [(A',)] or [], respectively.
488 [(A',)] or [], respectively.
510
489
511 When a changeset A is split into A' and B', however, it will result in a
490 When a changeset A is split into A' and B', however, it will result in a
512 successors set containing more than a single element, i.e. [(A',B')].
491 successors set containing more than a single element, i.e. [(A',B')].
513 Divergent changesets will result in multiple successors sets, i.e. [(A',),
492 Divergent changesets will result in multiple successors sets, i.e. [(A',),
514 (A'')].
493 (A'')].
515
494
516 If a changeset A is not obsolete, then it will conceptually have no
495 If a changeset A is not obsolete, then it will conceptually have no
517 successors set. To distinguish this from a pruned changeset, the successor
496 successors set. To distinguish this from a pruned changeset, the successor
518 set will only contain itself, i.e. [(A,)].
497 set will only contain itself, i.e. [(A,)].
519
498
520 Finally, successors unknown locally are considered to be pruned (obsoleted
499 Finally, successors unknown locally are considered to be pruned (obsoleted
521 without any successors).
500 without any successors).
522
501
523 The optional `cache` parameter is a dictionary that may contain precomputed
502 The optional `cache` parameter is a dictionary that may contain precomputed
524 successors sets. It is meant to reuse the computation of a previous call to
503 successors sets. It is meant to reuse the computation of a previous call to
525 `successorssets` when multiple calls are made at the same time. The cache
504 `successorssets` when multiple calls are made at the same time. The cache
526 dictionary is updated in place. The caller is responsible for its live
505 dictionary is updated in place. The caller is responsible for its live
527 spawn. Code that makes multiple calls to `successorssets` *must* use this
506 spawn. Code that makes multiple calls to `successorssets` *must* use this
528 cache mechanism or suffer terrible performances.
507 cache mechanism or suffer terrible performances.
529
508
530 """
509 """
531
510
532 succmarkers = repo.obsstore.successors
511 succmarkers = repo.obsstore.successors
533
512
534 # Stack of nodes we search successors sets for
513 # Stack of nodes we search successors sets for
535 toproceed = [initialnode]
514 toproceed = [initialnode]
536 # set version of above list for fast loop detection
515 # set version of above list for fast loop detection
537 # element added to "toproceed" must be added here
516 # element added to "toproceed" must be added here
538 stackedset = set(toproceed)
517 stackedset = set(toproceed)
539 if cache is None:
518 if cache is None:
540 cache = {}
519 cache = {}
541
520
542 # This while loop is the flattened version of a recursive search for
521 # This while loop is the flattened version of a recursive search for
543 # successors sets
522 # successors sets
544 #
523 #
545 # def successorssets(x):
524 # def successorssets(x):
546 # successors = directsuccessors(x)
525 # successors = directsuccessors(x)
547 # ss = [[]]
526 # ss = [[]]
548 # for succ in directsuccessors(x):
527 # for succ in directsuccessors(x):
549 # # product as in itertools cartesian product
528 # # product as in itertools cartesian product
550 # ss = product(ss, successorssets(succ))
529 # ss = product(ss, successorssets(succ))
551 # return ss
530 # return ss
552 #
531 #
553 # But we can not use plain recursive calls here:
532 # But we can not use plain recursive calls here:
554 # - that would blow the python call stack
533 # - that would blow the python call stack
555 # - obsolescence markers may have cycles, we need to handle them.
534 # - obsolescence markers may have cycles, we need to handle them.
556 #
535 #
557 # The `toproceed` list act as our call stack. Every node we search
536 # The `toproceed` list act as our call stack. Every node we search
558 # successors set for are stacked there.
537 # successors set for are stacked there.
559 #
538 #
560 # The `stackedset` is set version of this stack used to check if a node is
539 # The `stackedset` is set version of this stack used to check if a node is
561 # already stacked. This check is used to detect cycles and prevent infinite
540 # already stacked. This check is used to detect cycles and prevent infinite
562 # loop.
541 # loop.
563 #
542 #
564 # successors set of all nodes are stored in the `cache` dictionary.
543 # successors set of all nodes are stored in the `cache` dictionary.
565 #
544 #
566 # After this while loop ends we use the cache to return the successors sets
545 # After this while loop ends we use the cache to return the successors sets
567 # for the node requested by the caller.
546 # for the node requested by the caller.
568 while toproceed:
547 while toproceed:
569 # Every iteration tries to compute the successors sets of the topmost
548 # Every iteration tries to compute the successors sets of the topmost
570 # node of the stack: CURRENT.
549 # node of the stack: CURRENT.
571 #
550 #
572 # There are four possible outcomes:
551 # There are four possible outcomes:
573 #
552 #
574 # 1) We already know the successors sets of CURRENT:
553 # 1) We already know the successors sets of CURRENT:
575 # -> mission accomplished, pop it from the stack.
554 # -> mission accomplished, pop it from the stack.
576 # 2) Node is not obsolete:
555 # 2) Node is not obsolete:
577 # -> the node is its own successors sets. Add it to the cache.
556 # -> the node is its own successors sets. Add it to the cache.
578 # 3) We do not know successors set of direct successors of CURRENT:
557 # 3) We do not know successors set of direct successors of CURRENT:
579 # -> We add those successors to the stack.
558 # -> We add those successors to the stack.
580 # 4) We know successors sets of all direct successors of CURRENT:
559 # 4) We know successors sets of all direct successors of CURRENT:
581 # -> We can compute CURRENT successors set and add it to the
560 # -> We can compute CURRENT successors set and add it to the
582 # cache.
561 # cache.
583 #
562 #
584 current = toproceed[-1]
563 current = toproceed[-1]
585 if current in cache:
564 if current in cache:
586 # case (1): We already know the successors sets
565 # case (1): We already know the successors sets
587 stackedset.remove(toproceed.pop())
566 stackedset.remove(toproceed.pop())
588 elif current not in succmarkers:
567 elif current not in succmarkers:
589 # case (2): The node is not obsolete.
568 # case (2): The node is not obsolete.
590 if current in repo:
569 if current in repo:
591 # We have a valid last successors.
570 # We have a valid last successors.
592 cache[current] = [(current,)]
571 cache[current] = [(current,)]
593 else:
572 else:
594 # Final obsolete version is unknown locally.
573 # Final obsolete version is unknown locally.
595 # Do not count that as a valid successors
574 # Do not count that as a valid successors
596 cache[current] = []
575 cache[current] = []
597 else:
576 else:
598 # cases (3) and (4)
577 # cases (3) and (4)
599 #
578 #
600 # We proceed in two phases. Phase 1 aims to distinguish case (3)
579 # We proceed in two phases. Phase 1 aims to distinguish case (3)
601 # from case (4):
580 # from case (4):
602 #
581 #
603 # For each direct successors of CURRENT, we check whether its
582 # For each direct successors of CURRENT, we check whether its
604 # successors sets are known. If they are not, we stack the
583 # successors sets are known. If they are not, we stack the
605 # unknown node and proceed to the next iteration of the while
584 # unknown node and proceed to the next iteration of the while
606 # loop. (case 3)
585 # loop. (case 3)
607 #
586 #
608 # During this step, we may detect obsolescence cycles: a node
587 # During this step, we may detect obsolescence cycles: a node
609 # with unknown successors sets but already in the call stack.
588 # with unknown successors sets but already in the call stack.
610 # In such a situation, we arbitrary set the successors sets of
589 # In such a situation, we arbitrary set the successors sets of
611 # the node to nothing (node pruned) to break the cycle.
590 # the node to nothing (node pruned) to break the cycle.
612 #
591 #
613 # If no break was encountered we proceed to phase 2.
592 # If no break was encountered we proceed to phase 2.
614 #
593 #
615 # Phase 2 computes successors sets of CURRENT (case 4); see details
594 # Phase 2 computes successors sets of CURRENT (case 4); see details
616 # in phase 2 itself.
595 # in phase 2 itself.
617 #
596 #
618 # Note the two levels of iteration in each phase.
597 # Note the two levels of iteration in each phase.
619 # - The first one handles obsolescence markers using CURRENT as
598 # - The first one handles obsolescence markers using CURRENT as
620 # precursor (successors markers of CURRENT).
599 # precursor (successors markers of CURRENT).
621 #
600 #
622 # Having multiple entry here means divergence.
601 # Having multiple entry here means divergence.
623 #
602 #
624 # - The second one handles successors defined in each marker.
603 # - The second one handles successors defined in each marker.
625 #
604 #
626 # Having none means pruned node, multiple successors means split,
605 # Having none means pruned node, multiple successors means split,
627 # single successors are standard replacement.
606 # single successors are standard replacement.
628 #
607 #
629 for mark in sorted(succmarkers[current]):
608 for mark in sorted(succmarkers[current]):
630 for suc in mark[1]:
609 for suc in mark[1]:
631 if suc not in cache:
610 if suc not in cache:
632 if suc in stackedset:
611 if suc in stackedset:
633 # cycle breaking
612 # cycle breaking
634 cache[suc] = []
613 cache[suc] = []
635 else:
614 else:
636 # case (3) If we have not computed successors sets
615 # case (3) If we have not computed successors sets
637 # of one of those successors we add it to the
616 # of one of those successors we add it to the
638 # `toproceed` stack and stop all work for this
617 # `toproceed` stack and stop all work for this
639 # iteration.
618 # iteration.
640 toproceed.append(suc)
619 toproceed.append(suc)
641 stackedset.add(suc)
620 stackedset.add(suc)
642 break
621 break
643 else:
622 else:
644 continue
623 continue
645 break
624 break
646 else:
625 else:
647 # case (4): we know all successors sets of all direct
626 # case (4): we know all successors sets of all direct
648 # successors
627 # successors
649 #
628 #
650 # Successors set contributed by each marker depends on the
629 # Successors set contributed by each marker depends on the
651 # successors sets of all its "successors" node.
630 # successors sets of all its "successors" node.
652 #
631 #
653 # Each different marker is a divergence in the obsolescence
632 # Each different marker is a divergence in the obsolescence
654 # history. It contributes successors sets distinct from other
633 # history. It contributes successors sets distinct from other
655 # markers.
634 # markers.
656 #
635 #
657 # Within a marker, a successor may have divergent successors
636 # Within a marker, a successor may have divergent successors
658 # sets. In such a case, the marker will contribute multiple
637 # sets. In such a case, the marker will contribute multiple
659 # divergent successors sets. If multiple successors have
638 # divergent successors sets. If multiple successors have
660 # divergent successors sets, a cartesian product is used.
639 # divergent successors sets, a cartesian product is used.
661 #
640 #
662 # At the end we post-process successors sets to remove
641 # At the end we post-process successors sets to remove
663 # duplicated entry and successors set that are strict subset of
642 # duplicated entry and successors set that are strict subset of
664 # another one.
643 # another one.
665 succssets = []
644 succssets = []
666 for mark in sorted(succmarkers[current]):
645 for mark in sorted(succmarkers[current]):
667 # successors sets contributed by this marker
646 # successors sets contributed by this marker
668 markss = [[]]
647 markss = [[]]
669 for suc in mark[1]:
648 for suc in mark[1]:
670 # cardinal product with previous successors
649 # cardinal product with previous successors
671 productresult = []
650 productresult = []
672 for prefix in markss:
651 for prefix in markss:
673 for suffix in cache[suc]:
652 for suffix in cache[suc]:
674 newss = list(prefix)
653 newss = list(prefix)
675 for part in suffix:
654 for part in suffix:
676 # do not duplicated entry in successors set
655 # do not duplicated entry in successors set
677 # first entry wins.
656 # first entry wins.
678 if part not in newss:
657 if part not in newss:
679 newss.append(part)
658 newss.append(part)
680 productresult.append(newss)
659 productresult.append(newss)
681 markss = productresult
660 markss = productresult
682 succssets.extend(markss)
661 succssets.extend(markss)
683 # remove duplicated and subset
662 # remove duplicated and subset
684 seen = []
663 seen = []
685 final = []
664 final = []
686 candidate = sorted(((set(s), s) for s in succssets if s),
665 candidate = sorted(((set(s), s) for s in succssets if s),
687 key=lambda x: len(x[1]), reverse=True)
666 key=lambda x: len(x[1]), reverse=True)
688 for setversion, listversion in candidate:
667 for setversion, listversion in candidate:
689 for seenset in seen:
668 for seenset in seen:
690 if setversion.issubset(seenset):
669 if setversion.issubset(seenset):
691 break
670 break
692 else:
671 else:
693 final.append(listversion)
672 final.append(listversion)
694 seen.append(setversion)
673 seen.append(setversion)
695 final.reverse() # put small successors set first
674 final.reverse() # put small successors set first
696 cache[current] = final
675 cache[current] = final
697 return cache[initialnode]
676 return cache[initialnode]
698
677
699 def _knownrevs(repo, nodes):
678 def _knownrevs(repo, nodes):
700 """yield revision numbers of known nodes passed in parameters
679 """yield revision numbers of known nodes passed in parameters
701
680
702 Unknown revisions are silently ignored."""
681 Unknown revisions are silently ignored."""
703 torev = repo.changelog.nodemap.get
682 torev = repo.changelog.nodemap.get
704 for n in nodes:
683 for n in nodes:
705 rev = torev(n)
684 rev = torev(n)
706 if rev is not None:
685 if rev is not None:
707 yield rev
686 yield rev
708
687
709 # mapping of 'set-name' -> <function to compute this set>
688 # mapping of 'set-name' -> <function to compute this set>
710 cachefuncs = {}
689 cachefuncs = {}
711 def cachefor(name):
690 def cachefor(name):
712 """Decorator to register a function as computing the cache for a set"""
691 """Decorator to register a function as computing the cache for a set"""
713 def decorator(func):
692 def decorator(func):
714 assert name not in cachefuncs
693 assert name not in cachefuncs
715 cachefuncs[name] = func
694 cachefuncs[name] = func
716 return func
695 return func
717 return decorator
696 return decorator
718
697
719 def getrevs(repo, name):
698 def getrevs(repo, name):
720 """Return the set of revision that belong to the <name> set
699 """Return the set of revision that belong to the <name> set
721
700
722 Such access may compute the set and cache it for future use"""
701 Such access may compute the set and cache it for future use"""
723 repo = repo.unfiltered()
702 repo = repo.unfiltered()
724 if not repo.obsstore:
703 if not repo.obsstore:
725 return ()
704 return ()
726 if name not in repo.obsstore.caches:
705 if name not in repo.obsstore.caches:
727 repo.obsstore.caches[name] = cachefuncs[name](repo)
706 repo.obsstore.caches[name] = cachefuncs[name](repo)
728 return repo.obsstore.caches[name]
707 return repo.obsstore.caches[name]
729
708
730 # To be simple we need to invalidate obsolescence cache when:
709 # To be simple we need to invalidate obsolescence cache when:
731 #
710 #
732 # - new changeset is added:
711 # - new changeset is added:
733 # - public phase is changed
712 # - public phase is changed
734 # - obsolescence marker are added
713 # - obsolescence marker are added
735 # - strip is used a repo
714 # - strip is used a repo
736 def clearobscaches(repo):
715 def clearobscaches(repo):
737 """Remove all obsolescence related cache from a repo
716 """Remove all obsolescence related cache from a repo
738
717
739 This remove all cache in obsstore is the obsstore already exist on the
718 This remove all cache in obsstore is the obsstore already exist on the
740 repo.
719 repo.
741
720
742 (We could be smarter here given the exact event that trigger the cache
721 (We could be smarter here given the exact event that trigger the cache
743 clearing)"""
722 clearing)"""
744 # only clear cache is there is obsstore data in this repo
723 # only clear cache is there is obsstore data in this repo
745 if 'obsstore' in repo._filecache:
724 if 'obsstore' in repo._filecache:
746 repo.obsstore.caches.clear()
725 repo.obsstore.caches.clear()
747
726
748 @cachefor('obsolete')
727 @cachefor('obsolete')
749 def _computeobsoleteset(repo):
728 def _computeobsoleteset(repo):
750 """the set of obsolete revisions"""
729 """the set of obsolete revisions"""
751 obs = set()
730 obs = set()
752 getrev = repo.changelog.nodemap.get
731 getrev = repo.changelog.nodemap.get
753 getphase = repo._phasecache.phase
732 getphase = repo._phasecache.phase
754 for node in repo.obsstore.successors:
733 for node in repo.obsstore.successors:
755 rev = getrev(node)
734 rev = getrev(node)
756 if rev is not None and getphase(repo, rev):
735 if rev is not None and getphase(repo, rev):
757 obs.add(rev)
736 obs.add(rev)
758 return obs
737 return obs
759
738
760 @cachefor('unstable')
739 @cachefor('unstable')
761 def _computeunstableset(repo):
740 def _computeunstableset(repo):
762 """the set of non obsolete revisions with obsolete parents"""
741 """the set of non obsolete revisions with obsolete parents"""
763 # revset is not efficient enough here
742 # revset is not efficient enough here
764 # we do (obsolete()::) - obsolete() by hand
743 # we do (obsolete()::) - obsolete() by hand
765 obs = getrevs(repo, 'obsolete')
744 obs = getrevs(repo, 'obsolete')
766 if not obs:
745 if not obs:
767 return set()
746 return set()
768 cl = repo.changelog
747 cl = repo.changelog
769 return set(r for r in cl.descendants(obs) if r not in obs)
748 return set(r for r in cl.descendants(obs) if r not in obs)
770
749
771 @cachefor('suspended')
750 @cachefor('suspended')
772 def _computesuspendedset(repo):
751 def _computesuspendedset(repo):
773 """the set of obsolete parents with non obsolete descendants"""
752 """the set of obsolete parents with non obsolete descendants"""
774 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
753 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
775 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
754 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
776
755
777 @cachefor('extinct')
756 @cachefor('extinct')
778 def _computeextinctset(repo):
757 def _computeextinctset(repo):
779 """the set of obsolete parents without non obsolete descendants"""
758 """the set of obsolete parents without non obsolete descendants"""
780 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
759 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
781
760
782
761
783 @cachefor('bumped')
762 @cachefor('bumped')
784 def _computebumpedset(repo):
763 def _computebumpedset(repo):
785 """the set of revs trying to obsolete public revisions"""
764 """the set of revs trying to obsolete public revisions"""
786 bumped = set()
765 bumped = set()
787 # utils function (avoid attribut lookup in the loop)
766 # utils function (avoid attribut lookup in the loop)
788 phase = repo._phasecache.phase # would be faster to grab the full list
767 phase = repo._phasecache.phase # would be faster to grab the full list
789 public = phases.public
768 public = phases.public
790 cl = repo.changelog
769 cl = repo.changelog
791 torev = cl.nodemap.get
770 torev = cl.nodemap.get
792 obs = getrevs(repo, 'obsolete')
771 obs = getrevs(repo, 'obsolete')
793 for rev in repo:
772 for rev in repo:
794 # We only evaluate mutable, non-obsolete revision
773 # We only evaluate mutable, non-obsolete revision
795 if (public < phase(repo, rev)) and (rev not in obs):
774 if (public < phase(repo, rev)) and (rev not in obs):
796 node = cl.node(rev)
775 node = cl.node(rev)
797 # (future) A cache of precursors may worth if split is very common
776 # (future) A cache of precursors may worth if split is very common
798 for pnode in allprecursors(repo.obsstore, [node],
777 for pnode in allprecursors(repo.obsstore, [node],
799 ignoreflags=bumpedfix):
778 ignoreflags=bumpedfix):
800 prev = torev(pnode) # unfiltered! but so is phasecache
779 prev = torev(pnode) # unfiltered! but so is phasecache
801 if (prev is not None) and (phase(repo, prev) <= public):
780 if (prev is not None) and (phase(repo, prev) <= public):
802 # we have a public precursors
781 # we have a public precursors
803 bumped.add(rev)
782 bumped.add(rev)
804 break # Next draft!
783 break # Next draft!
805 return bumped
784 return bumped
806
785
807 @cachefor('divergent')
786 @cachefor('divergent')
808 def _computedivergentset(repo):
787 def _computedivergentset(repo):
809 """the set of rev that compete to be the final successors of some revision.
788 """the set of rev that compete to be the final successors of some revision.
810 """
789 """
811 divergent = set()
790 divergent = set()
812 obsstore = repo.obsstore
791 obsstore = repo.obsstore
813 newermap = {}
792 newermap = {}
814 for ctx in repo.set('(not public()) - obsolete()'):
793 for ctx in repo.set('(not public()) - obsolete()'):
815 mark = obsstore.precursors.get(ctx.node(), ())
794 mark = obsstore.precursors.get(ctx.node(), ())
816 toprocess = set(mark)
795 toprocess = set(mark)
817 while toprocess:
796 while toprocess:
818 prec = toprocess.pop()[0]
797 prec = toprocess.pop()[0]
819 if prec not in newermap:
798 if prec not in newermap:
820 successorssets(repo, prec, newermap)
799 successorssets(repo, prec, newermap)
821 newer = [n for n in newermap[prec] if n]
800 newer = [n for n in newermap[prec] if n]
822 if len(newer) > 1:
801 if len(newer) > 1:
823 divergent.add(ctx.rev())
802 divergent.add(ctx.rev())
824 break
803 break
825 toprocess.update(obsstore.precursors.get(prec, ()))
804 toprocess.update(obsstore.precursors.get(prec, ()))
826 return divergent
805 return divergent
827
806
828
807
829 def createmarkers(repo, relations, flag=0, metadata=None):
808 def createmarkers(repo, relations, flag=0, metadata=None):
830 """Add obsolete markers between changesets in a repo
809 """Add obsolete markers between changesets in a repo
831
810
832 <relations> must be an iterable of (<old>, (<new>, ...)) tuple.
811 <relations> must be an iterable of (<old>, (<new>, ...)) tuple.
833 `old` and `news` are changectx.
812 `old` and `news` are changectx.
834
813
835 Trying to obsolete a public changeset will raise an exception.
814 Trying to obsolete a public changeset will raise an exception.
836
815
837 Current user and date are used except if specified otherwise in the
816 Current user and date are used except if specified otherwise in the
838 metadata attribute.
817 metadata attribute.
839
818
840 This function operates within a transaction of its own, but does
819 This function operates within a transaction of its own, but does
841 not take any lock on the repo.
820 not take any lock on the repo.
842 """
821 """
843 # prepare metadata
822 # prepare metadata
844 if metadata is None:
823 if metadata is None:
845 metadata = {}
824 metadata = {}
846 if 'date' not in metadata:
825 if 'date' not in metadata:
847 metadata['date'] = '%i %i' % util.makedate()
826 metadata['date'] = '%i %i' % util.makedate()
848 if 'user' not in metadata:
827 if 'user' not in metadata:
849 metadata['user'] = repo.ui.username()
828 metadata['user'] = repo.ui.username()
850 tr = repo.transaction('add-obsolescence-marker')
829 tr = repo.transaction('add-obsolescence-marker')
851 try:
830 try:
852 for prec, sucs in relations:
831 for prec, sucs in relations:
853 if not prec.mutable():
832 if not prec.mutable():
854 raise util.Abort("cannot obsolete immutable changeset: %s"
833 raise util.Abort("cannot obsolete immutable changeset: %s"
855 % prec)
834 % prec)
856 nprec = prec.node()
835 nprec = prec.node()
857 nsucs = tuple(s.node() for s in sucs)
836 nsucs = tuple(s.node() for s in sucs)
858 if nprec in nsucs:
837 if nprec in nsucs:
859 raise util.Abort("changeset %s cannot obsolete itself" % prec)
838 raise util.Abort("changeset %s cannot obsolete itself" % prec)
860 repo.obsstore.create(tr, nprec, nsucs, flag, metadata)
839 repo.obsstore.create(tr, nprec, nsucs, flag, metadata)
861 repo.filteredrevcache.clear()
840 repo.filteredrevcache.clear()
862 tr.close()
841 tr.close()
863 finally:
842 finally:
864 tr.release()
843 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now