##// END OF EJS Templates
push: move obsolescence marker exchange in the exchange module...
Pierre-Yves David -
r20432:1b926f0b default
parent child Browse files
Show More
@@ -1,279 +1,295 b''
1 # exchange.py - utily to exchange data between repo.
1 # exchange.py - utily to exchange data between repo.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex
9 from node import hex
10 import errno
10 import errno
11 import util, scmutil, changegroup
11 import util, scmutil, changegroup
12 import discovery, phases, obsolete, bookmarks
12 import discovery, phases, obsolete, bookmarks
13
13
14
14
15 class pushoperation(object):
15 class pushoperation(object):
16 """A object that represent a single push operation
16 """A object that represent a single push operation
17
17
18 It purpose is to carry push related state and very common operation.
18 It purpose is to carry push related state and very common operation.
19
19
20 A new should be created at the begining of each push and discarded
20 A new should be created at the begining of each push and discarded
21 afterward.
21 afterward.
22 """
22 """
23
23
24 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
24 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
25 # repo we push from
25 # repo we push from
26 self.repo = repo
26 self.repo = repo
27 self.ui = repo.ui
27 self.ui = repo.ui
28 # repo we push to
28 # repo we push to
29 self.remote = remote
29 self.remote = remote
30 # force option provided
30 # force option provided
31 self.force = force
31 self.force = force
32 # revs to be pushed (None is "all")
32 # revs to be pushed (None is "all")
33 self.revs = revs
33 self.revs = revs
34 # allow push of new branch
34 # allow push of new branch
35 self.newbranch = newbranch
35 self.newbranch = newbranch
36
36
37 def push(repo, remote, force=False, revs=None, newbranch=False):
37 def push(repo, remote, force=False, revs=None, newbranch=False):
38 '''Push outgoing changesets (limited by revs) from a local
38 '''Push outgoing changesets (limited by revs) from a local
39 repository to remote. Return an integer:
39 repository to remote. Return an integer:
40 - None means nothing to push
40 - None means nothing to push
41 - 0 means HTTP error
41 - 0 means HTTP error
42 - 1 means we pushed and remote head count is unchanged *or*
42 - 1 means we pushed and remote head count is unchanged *or*
43 we have outgoing changesets but refused to push
43 we have outgoing changesets but refused to push
44 - other values as described by addchangegroup()
44 - other values as described by addchangegroup()
45 '''
45 '''
46 pushop = pushoperation(repo, remote, force, revs, newbranch)
46 pushop = pushoperation(repo, remote, force, revs, newbranch)
47 if pushop.remote.local():
47 if pushop.remote.local():
48 missing = (set(pushop.repo.requirements)
48 missing = (set(pushop.repo.requirements)
49 - pushop.remote.local().supported)
49 - pushop.remote.local().supported)
50 if missing:
50 if missing:
51 msg = _("required features are not"
51 msg = _("required features are not"
52 " supported in the destination:"
52 " supported in the destination:"
53 " %s") % (', '.join(sorted(missing)))
53 " %s") % (', '.join(sorted(missing)))
54 raise util.Abort(msg)
54 raise util.Abort(msg)
55
55
56 # there are two ways to push to remote repo:
56 # there are two ways to push to remote repo:
57 #
57 #
58 # addchangegroup assumes local user can lock remote
58 # addchangegroup assumes local user can lock remote
59 # repo (local filesystem, old ssh servers).
59 # repo (local filesystem, old ssh servers).
60 #
60 #
61 # unbundle assumes local user cannot lock remote repo (new ssh
61 # unbundle assumes local user cannot lock remote repo (new ssh
62 # servers, http servers).
62 # servers, http servers).
63
63
64 if not pushop.remote.canpush():
64 if not pushop.remote.canpush():
65 raise util.Abort(_("destination does not support push"))
65 raise util.Abort(_("destination does not support push"))
66 unfi = pushop.repo.unfiltered()
66 unfi = pushop.repo.unfiltered()
67 def localphasemove(nodes, phase=phases.public):
67 def localphasemove(nodes, phase=phases.public):
68 """move <nodes> to <phase> in the local source repo"""
68 """move <nodes> to <phase> in the local source repo"""
69 if locallock is not None:
69 if locallock is not None:
70 phases.advanceboundary(pushop.repo, phase, nodes)
70 phases.advanceboundary(pushop.repo, phase, nodes)
71 else:
71 else:
72 # repo is not locked, do not change any phases!
72 # repo is not locked, do not change any phases!
73 # Informs the user that phases should have been moved when
73 # Informs the user that phases should have been moved when
74 # applicable.
74 # applicable.
75 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
75 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
76 phasestr = phases.phasenames[phase]
76 phasestr = phases.phasenames[phase]
77 if actualmoves:
77 if actualmoves:
78 pushop.ui.status(_('cannot lock source repo, skipping '
78 pushop.ui.status(_('cannot lock source repo, skipping '
79 'local %s phase update\n') % phasestr)
79 'local %s phase update\n') % phasestr)
80 # get local lock as we might write phase data
80 # get local lock as we might write phase data
81 locallock = None
81 locallock = None
82 try:
82 try:
83 locallock = pushop.repo.lock()
83 locallock = pushop.repo.lock()
84 except IOError, err:
84 except IOError, err:
85 if err.errno != errno.EACCES:
85 if err.errno != errno.EACCES:
86 raise
86 raise
87 # source repo cannot be locked.
87 # source repo cannot be locked.
88 # We do not abort the push, but just disable the local phase
88 # We do not abort the push, but just disable the local phase
89 # synchronisation.
89 # synchronisation.
90 msg = 'cannot lock source repository: %s\n' % err
90 msg = 'cannot lock source repository: %s\n' % err
91 pushop.ui.debug(msg)
91 pushop.ui.debug(msg)
92 try:
92 try:
93 pushop.repo.checkpush(pushop.force, pushop.revs)
93 pushop.repo.checkpush(pushop.force, pushop.revs)
94 lock = None
94 lock = None
95 unbundle = pushop.remote.capable('unbundle')
95 unbundle = pushop.remote.capable('unbundle')
96 if not unbundle:
96 if not unbundle:
97 lock = pushop.remote.lock()
97 lock = pushop.remote.lock()
98 try:
98 try:
99 # discovery
99 # discovery
100 fci = discovery.findcommonincoming
100 fci = discovery.findcommonincoming
101 commoninc = fci(unfi, pushop.remote, force=pushop.force)
101 commoninc = fci(unfi, pushop.remote, force=pushop.force)
102 common, inc, remoteheads = commoninc
102 common, inc, remoteheads = commoninc
103 fco = discovery.findcommonoutgoing
103 fco = discovery.findcommonoutgoing
104 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
104 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
105 commoninc=commoninc, force=pushop.force)
105 commoninc=commoninc, force=pushop.force)
106
106
107
107
108 if not outgoing.missing:
108 if not outgoing.missing:
109 # nothing to push
109 # nothing to push
110 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
110 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
111 ret = None
111 ret = None
112 else:
112 else:
113 # something to push
113 # something to push
114 if not pushop.force:
114 if not pushop.force:
115 # if repo.obsstore == False --> no obsolete
115 # if repo.obsstore == False --> no obsolete
116 # then, save the iteration
116 # then, save the iteration
117 if unfi.obsstore:
117 if unfi.obsstore:
118 # this message are here for 80 char limit reason
118 # this message are here for 80 char limit reason
119 mso = _("push includes obsolete changeset: %s!")
119 mso = _("push includes obsolete changeset: %s!")
120 mst = "push includes %s changeset: %s!"
120 mst = "push includes %s changeset: %s!"
121 # plain versions for i18n tool to detect them
121 # plain versions for i18n tool to detect them
122 _("push includes unstable changeset: %s!")
122 _("push includes unstable changeset: %s!")
123 _("push includes bumped changeset: %s!")
123 _("push includes bumped changeset: %s!")
124 _("push includes divergent changeset: %s!")
124 _("push includes divergent changeset: %s!")
125 # If we are to push if there is at least one
125 # If we are to push if there is at least one
126 # obsolete or unstable changeset in missing, at
126 # obsolete or unstable changeset in missing, at
127 # least one of the missinghead will be obsolete or
127 # least one of the missinghead will be obsolete or
128 # unstable. So checking heads only is ok
128 # unstable. So checking heads only is ok
129 for node in outgoing.missingheads:
129 for node in outgoing.missingheads:
130 ctx = unfi[node]
130 ctx = unfi[node]
131 if ctx.obsolete():
131 if ctx.obsolete():
132 raise util.Abort(mso % ctx)
132 raise util.Abort(mso % ctx)
133 elif ctx.troubled():
133 elif ctx.troubled():
134 raise util.Abort(_(mst)
134 raise util.Abort(_(mst)
135 % (ctx.troubles()[0],
135 % (ctx.troubles()[0],
136 ctx))
136 ctx))
137 newbm = pushop.ui.configlist('bookmarks', 'pushing')
137 newbm = pushop.ui.configlist('bookmarks', 'pushing')
138 discovery.checkheads(unfi, pushop.remote, outgoing,
138 discovery.checkheads(unfi, pushop.remote, outgoing,
139 remoteheads, pushop.newbranch,
139 remoteheads, pushop.newbranch,
140 bool(inc), newbm)
140 bool(inc), newbm)
141
141
142 # TODO: get bundlecaps from remote
142 # TODO: get bundlecaps from remote
143 bundlecaps = None
143 bundlecaps = None
144 # create a changegroup from local
144 # create a changegroup from local
145 if pushop.revs is None and not (outgoing.excluded
145 if pushop.revs is None and not (outgoing.excluded
146 or pushop.repo.changelog.filteredrevs):
146 or pushop.repo.changelog.filteredrevs):
147 # push everything,
147 # push everything,
148 # use the fast path, no race possible on push
148 # use the fast path, no race possible on push
149 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
149 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
150 cg = pushop.repo._changegroupsubset(outgoing,
150 cg = pushop.repo._changegroupsubset(outgoing,
151 bundler,
151 bundler,
152 'push',
152 'push',
153 fastpath=True)
153 fastpath=True)
154 else:
154 else:
155 cg = pushop.repo.getlocalbundle('push', outgoing,
155 cg = pushop.repo.getlocalbundle('push', outgoing,
156 bundlecaps)
156 bundlecaps)
157
157
158 # apply changegroup to remote
158 # apply changegroup to remote
159 if unbundle:
159 if unbundle:
160 # local repo finds heads on server, finds out what
160 # local repo finds heads on server, finds out what
161 # revs it must push. once revs transferred, if server
161 # revs it must push. once revs transferred, if server
162 # finds it has different heads (someone else won
162 # finds it has different heads (someone else won
163 # commit/push race), server aborts.
163 # commit/push race), server aborts.
164 if pushop.force:
164 if pushop.force:
165 remoteheads = ['force']
165 remoteheads = ['force']
166 # ssh: return remote's addchangegroup()
166 # ssh: return remote's addchangegroup()
167 # http: return remote's addchangegroup() or 0 for error
167 # http: return remote's addchangegroup() or 0 for error
168 ret = pushop.remote.unbundle(cg, remoteheads, 'push')
168 ret = pushop.remote.unbundle(cg, remoteheads, 'push')
169 else:
169 else:
170 # we return an integer indicating remote head count
170 # we return an integer indicating remote head count
171 # change
171 # change
172 ret = pushop.remote.addchangegroup(cg, 'push',
172 ret = pushop.remote.addchangegroup(cg, 'push',
173 pushop.repo.url())
173 pushop.repo.url())
174
174
175 if ret:
175 if ret:
176 # push succeed, synchronize target of the push
176 # push succeed, synchronize target of the push
177 cheads = outgoing.missingheads
177 cheads = outgoing.missingheads
178 elif pushop.revs is None:
178 elif pushop.revs is None:
179 # All out push fails. synchronize all common
179 # All out push fails. synchronize all common
180 cheads = outgoing.commonheads
180 cheads = outgoing.commonheads
181 else:
181 else:
182 # I want cheads = heads(::missingheads and ::commonheads)
182 # I want cheads = heads(::missingheads and ::commonheads)
183 # (missingheads is revs with secret changeset filtered out)
183 # (missingheads is revs with secret changeset filtered out)
184 #
184 #
185 # This can be expressed as:
185 # This can be expressed as:
186 # cheads = ( (missingheads and ::commonheads)
186 # cheads = ( (missingheads and ::commonheads)
187 # + (commonheads and ::missingheads))"
187 # + (commonheads and ::missingheads))"
188 # )
188 # )
189 #
189 #
190 # while trying to push we already computed the following:
190 # while trying to push we already computed the following:
191 # common = (::commonheads)
191 # common = (::commonheads)
192 # missing = ((commonheads::missingheads) - commonheads)
192 # missing = ((commonheads::missingheads) - commonheads)
193 #
193 #
194 # We can pick:
194 # We can pick:
195 # * missingheads part of common (::commonheads)
195 # * missingheads part of common (::commonheads)
196 common = set(outgoing.common)
196 common = set(outgoing.common)
197 nm = pushop.repo.changelog.nodemap
197 nm = pushop.repo.changelog.nodemap
198 cheads = [node for node in pushop.revs if nm[node] in common]
198 cheads = [node for node in pushop.revs if nm[node] in common]
199 # and
199 # and
200 # * commonheads parents on missing
200 # * commonheads parents on missing
201 revset = unfi.set('%ln and parents(roots(%ln))',
201 revset = unfi.set('%ln and parents(roots(%ln))',
202 outgoing.commonheads,
202 outgoing.commonheads,
203 outgoing.missing)
203 outgoing.missing)
204 cheads.extend(c.node() for c in revset)
204 cheads.extend(c.node() for c in revset)
205 # even when we don't push, exchanging phase data is useful
205 # even when we don't push, exchanging phase data is useful
206 remotephases = pushop.remote.listkeys('phases')
206 remotephases = pushop.remote.listkeys('phases')
207 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
207 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
208 and remotephases # server supports phases
208 and remotephases # server supports phases
209 and ret is None # nothing was pushed
209 and ret is None # nothing was pushed
210 and remotephases.get('publishing', False)):
210 and remotephases.get('publishing', False)):
211 # When:
211 # When:
212 # - this is a subrepo push
212 # - this is a subrepo push
213 # - and remote support phase
213 # - and remote support phase
214 # - and no changeset was pushed
214 # - and no changeset was pushed
215 # - and remote is publishing
215 # - and remote is publishing
216 # We may be in issue 3871 case!
216 # We may be in issue 3871 case!
217 # We drop the possible phase synchronisation done by
217 # We drop the possible phase synchronisation done by
218 # courtesy to publish changesets possibly locally draft
218 # courtesy to publish changesets possibly locally draft
219 # on the remote.
219 # on the remote.
220 remotephases = {'publishing': 'True'}
220 remotephases = {'publishing': 'True'}
221 if not remotephases: # old server or public only repo
221 if not remotephases: # old server or public only repo
222 localphasemove(cheads)
222 localphasemove(cheads)
223 # don't push any phase data as there is nothing to push
223 # don't push any phase data as there is nothing to push
224 else:
224 else:
225 ana = phases.analyzeremotephases(pushop.repo, cheads,
225 ana = phases.analyzeremotephases(pushop.repo, cheads,
226 remotephases)
226 remotephases)
227 pheads, droots = ana
227 pheads, droots = ana
228 ### Apply remote phase on local
228 ### Apply remote phase on local
229 if remotephases.get('publishing', False):
229 if remotephases.get('publishing', False):
230 localphasemove(cheads)
230 localphasemove(cheads)
231 else: # publish = False
231 else: # publish = False
232 localphasemove(pheads)
232 localphasemove(pheads)
233 localphasemove(cheads, phases.draft)
233 localphasemove(cheads, phases.draft)
234 ### Apply local phase on remote
234 ### Apply local phase on remote
235
235
236 # Get the list of all revs draft on remote by public here.
236 # Get the list of all revs draft on remote by public here.
237 # XXX Beware that revset break if droots is not strictly
237 # XXX Beware that revset break if droots is not strictly
238 # XXX root we may want to ensure it is but it is costly
238 # XXX root we may want to ensure it is but it is costly
239 outdated = unfi.set('heads((%ln::%ln) and public())',
239 outdated = unfi.set('heads((%ln::%ln) and public())',
240 droots, cheads)
240 droots, cheads)
241 for newremotehead in outdated:
241 for newremotehead in outdated:
242 r = pushop.remote.pushkey('phases',
242 r = pushop.remote.pushkey('phases',
243 newremotehead.hex(),
243 newremotehead.hex(),
244 str(phases.draft),
244 str(phases.draft),
245 str(phases.public))
245 str(phases.public))
246 if not r:
246 if not r:
247 pushop.ui.warn(_('updating %s to public failed!\n')
247 pushop.ui.warn(_('updating %s to public failed!\n')
248 % newremotehead)
248 % newremotehead)
249 pushop.ui.debug('try to push obsolete markers to remote\n')
249 pushop.ui.debug('try to push obsolete markers to remote\n')
250 obsolete.syncpush(pushop.repo, pushop.remote)
250 _pushobsolete(pushop.repo, pushop.remote)
251 finally:
251 finally:
252 if lock is not None:
252 if lock is not None:
253 lock.release()
253 lock.release()
254 finally:
254 finally:
255 if locallock is not None:
255 if locallock is not None:
256 locallock.release()
256 locallock.release()
257
257
258 _pushbookmark(pushop)
258 _pushbookmark(pushop)
259 return ret
259 return ret
260
260
261 def _pushobsolete(repo, remote):
262 """utility function to push obsolete markers to a remote
263
264 Exist mostly to allow overriding for experimentation purpose"""
265 if (obsolete._enabled and repo.obsstore and
266 'obsolete' in remote.listkeys('namespaces')):
267 rslts = []
268 remotedata = repo.listkeys('obsolete')
269 for key in sorted(remotedata, reverse=True):
270 # reverse sort to ensure we end with dump0
271 data = remotedata[key]
272 rslts.append(remote.pushkey('obsolete', key, '', data))
273 if [r for r in rslts if not r]:
274 msg = _('failed to push some obsolete markers!\n')
275 repo.ui.warn(msg)
276
261 def _pushbookmark(pushop):
277 def _pushbookmark(pushop):
262 """Update bookmark position on remote"""
278 """Update bookmark position on remote"""
263 ui = pushop.ui
279 ui = pushop.ui
264 repo = pushop.repo.unfiltered()
280 repo = pushop.repo.unfiltered()
265 remote = pushop.remote
281 remote = pushop.remote
266 ui.debug("checking for updated bookmarks\n")
282 ui.debug("checking for updated bookmarks\n")
267 revnums = map(repo.changelog.rev, pushop.revs or [])
283 revnums = map(repo.changelog.rev, pushop.revs or [])
268 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
284 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
269 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
285 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
270 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
286 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
271 srchex=hex)
287 srchex=hex)
272
288
273 for b, scid, dcid in advsrc:
289 for b, scid, dcid in advsrc:
274 if ancestors and repo[scid].rev() not in ancestors:
290 if ancestors and repo[scid].rev() not in ancestors:
275 continue
291 continue
276 if remote.pushkey('bookmarks', b, dcid, scid):
292 if remote.pushkey('bookmarks', b, dcid, scid):
277 ui.status(_("updating bookmark %s\n") % b)
293 ui.status(_("updating bookmark %s\n") % b)
278 else:
294 else:
279 ui.warn(_('updating bookmark %s failed!\n') % b)
295 ui.warn(_('updating bookmark %s failed!\n') % b)
@@ -1,880 +1,864 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete markers handling
9 """Obsolete markers handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewriting operations, and help
17 transformations performed by history rewriting operations, and help
18 building new tools to reconciliate conflicting rewriting actions. To
18 building new tools to reconciliate conflicting rewriting actions. To
19 facilitate conflicts resolution, markers include various annotations
19 facilitate conflicts resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called "precursor" and possible replacements are
23 The old obsoleted changeset is called "precursor" and possible replacements are
24 called "successors". Markers that used changeset X as a precursors are called
24 called "successors". Markers that used changeset X as a precursors are called
25 "successor markers of X" because they hold information about the successors of
25 "successor markers of X" because they hold information about the successors of
26 X. Markers that use changeset Y as a successors are call "precursor markers of
26 X. Markers that use changeset Y as a successors are call "precursor markers of
27 Y" because they hold information about the precursors of Y.
27 Y" because they hold information about the precursors of Y.
28
28
29 Examples:
29 Examples:
30
30
31 - When changeset A is replacement by a changeset A', one marker is stored:
31 - When changeset A is replacement by a changeset A', one marker is stored:
32
32
33 (A, (A'))
33 (A, (A'))
34
34
35 - When changesets A and B are folded into a new changeset C two markers are
35 - When changesets A and B are folded into a new changeset C two markers are
36 stored:
36 stored:
37
37
38 (A, (C,)) and (B, (C,))
38 (A, (C,)) and (B, (C,))
39
39
40 - When changeset A is simply "pruned" from the graph, a marker in create:
40 - When changeset A is simply "pruned" from the graph, a marker in create:
41
41
42 (A, ())
42 (A, ())
43
43
44 - When changeset A is split into B and C, a single marker are used:
44 - When changeset A is split into B and C, a single marker are used:
45
45
46 (A, (C, C))
46 (A, (C, C))
47
47
48 We use a single marker to distinct the "split" case from the "divergence"
48 We use a single marker to distinct the "split" case from the "divergence"
49 case. If two independents operation rewrite the same changeset A in to A' and
49 case. If two independents operation rewrite the same changeset A in to A' and
50 A'' when have an error case: divergent rewriting. We can detect it because
50 A'' when have an error case: divergent rewriting. We can detect it because
51 two markers will be created independently:
51 two markers will be created independently:
52
52
53 (A, (B,)) and (A, (C,))
53 (A, (B,)) and (A, (C,))
54
54
55 Format
55 Format
56 ------
56 ------
57
57
58 Markers are stored in an append-only file stored in
58 Markers are stored in an append-only file stored in
59 '.hg/store/obsstore'.
59 '.hg/store/obsstore'.
60
60
61 The file starts with a version header:
61 The file starts with a version header:
62
62
63 - 1 unsigned byte: version number, starting at zero.
63 - 1 unsigned byte: version number, starting at zero.
64
64
65
65
66 The header is followed by the markers. Each marker is made of:
66 The header is followed by the markers. Each marker is made of:
67
67
68 - 1 unsigned byte: number of new changesets "R", could be zero.
68 - 1 unsigned byte: number of new changesets "R", could be zero.
69
69
70 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
70 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
71
71
72 - 1 byte: a bit field. It is reserved for flags used in obsolete
72 - 1 byte: a bit field. It is reserved for flags used in obsolete
73 markers common operations, to avoid repeated decoding of metadata
73 markers common operations, to avoid repeated decoding of metadata
74 entries.
74 entries.
75
75
76 - 20 bytes: obsoleted changeset identifier.
76 - 20 bytes: obsoleted changeset identifier.
77
77
78 - N*20 bytes: new changesets identifiers.
78 - N*20 bytes: new changesets identifiers.
79
79
80 - M bytes: metadata as a sequence of nul-terminated strings. Each
80 - M bytes: metadata as a sequence of nul-terminated strings. Each
81 string contains a key and a value, separated by a color ':', without
81 string contains a key and a value, separated by a color ':', without
82 additional encoding. Keys cannot contain '\0' or ':' and values
82 additional encoding. Keys cannot contain '\0' or ':' and values
83 cannot contain '\0'.
83 cannot contain '\0'.
84 """
84 """
85 import struct
85 import struct
86 import util, base85, node
86 import util, base85, node
87 import phases
87 import phases
88 from i18n import _
88 from i18n import _
89
89
90 _pack = struct.pack
90 _pack = struct.pack
91 _unpack = struct.unpack
91 _unpack = struct.unpack
92
92
93 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
93 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
94
94
95 # the obsolete feature is not mature enough to be enabled by default.
95 # the obsolete feature is not mature enough to be enabled by default.
96 # you have to rely on third party extension extension to enable this.
96 # you have to rely on third party extension extension to enable this.
97 _enabled = False
97 _enabled = False
98
98
99 # data used for parsing and writing
99 # data used for parsing and writing
100 _fmversion = 0
100 _fmversion = 0
101 _fmfixed = '>BIB20s'
101 _fmfixed = '>BIB20s'
102 _fmnode = '20s'
102 _fmnode = '20s'
103 _fmfsize = struct.calcsize(_fmfixed)
103 _fmfsize = struct.calcsize(_fmfixed)
104 _fnodesize = struct.calcsize(_fmnode)
104 _fnodesize = struct.calcsize(_fmnode)
105
105
106 ### obsolescence marker flag
106 ### obsolescence marker flag
107
107
108 ## bumpedfix flag
108 ## bumpedfix flag
109 #
109 #
110 # When a changeset A' succeed to a changeset A which became public, we call A'
110 # When a changeset A' succeed to a changeset A which became public, we call A'
111 # "bumped" because it's a successors of a public changesets
111 # "bumped" because it's a successors of a public changesets
112 #
112 #
113 # o A' (bumped)
113 # o A' (bumped)
114 # |`:
114 # |`:
115 # | o A
115 # | o A
116 # |/
116 # |/
117 # o Z
117 # o Z
118 #
118 #
119 # The way to solve this situation is to create a new changeset Ad as children
119 # The way to solve this situation is to create a new changeset Ad as children
120 # of A. This changeset have the same content than A'. So the diff from A to A'
120 # of A. This changeset have the same content than A'. So the diff from A to A'
121 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
121 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
122 #
122 #
123 # o Ad
123 # o Ad
124 # |`:
124 # |`:
125 # | x A'
125 # | x A'
126 # |'|
126 # |'|
127 # o | A
127 # o | A
128 # |/
128 # |/
129 # o Z
129 # o Z
130 #
130 #
131 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
131 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
132 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
132 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
133 # This flag mean that the successors express the changes between the public and
133 # This flag mean that the successors express the changes between the public and
134 # bumped version and fix the situation, breaking the transitivity of
134 # bumped version and fix the situation, breaking the transitivity of
135 # "bumped" here.
135 # "bumped" here.
136 bumpedfix = 1
136 bumpedfix = 1
137
137
138 def _readmarkers(data):
138 def _readmarkers(data):
139 """Read and enumerate markers from raw data"""
139 """Read and enumerate markers from raw data"""
140 off = 0
140 off = 0
141 diskversion = _unpack('>B', data[off:off + 1])[0]
141 diskversion = _unpack('>B', data[off:off + 1])[0]
142 off += 1
142 off += 1
143 if diskversion != _fmversion:
143 if diskversion != _fmversion:
144 raise util.Abort(_('parsing obsolete marker: unknown version %r')
144 raise util.Abort(_('parsing obsolete marker: unknown version %r')
145 % diskversion)
145 % diskversion)
146
146
147 # Loop on markers
147 # Loop on markers
148 l = len(data)
148 l = len(data)
149 while off + _fmfsize <= l:
149 while off + _fmfsize <= l:
150 # read fixed part
150 # read fixed part
151 cur = data[off:off + _fmfsize]
151 cur = data[off:off + _fmfsize]
152 off += _fmfsize
152 off += _fmfsize
153 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
153 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
154 # read replacement
154 # read replacement
155 sucs = ()
155 sucs = ()
156 if nbsuc:
156 if nbsuc:
157 s = (_fnodesize * nbsuc)
157 s = (_fnodesize * nbsuc)
158 cur = data[off:off + s]
158 cur = data[off:off + s]
159 sucs = _unpack(_fmnode * nbsuc, cur)
159 sucs = _unpack(_fmnode * nbsuc, cur)
160 off += s
160 off += s
161 # read metadata
161 # read metadata
162 # (metadata will be decoded on demand)
162 # (metadata will be decoded on demand)
163 metadata = data[off:off + mdsize]
163 metadata = data[off:off + mdsize]
164 if len(metadata) != mdsize:
164 if len(metadata) != mdsize:
165 raise util.Abort(_('parsing obsolete marker: metadata is too '
165 raise util.Abort(_('parsing obsolete marker: metadata is too '
166 'short, %d bytes expected, got %d')
166 'short, %d bytes expected, got %d')
167 % (mdsize, len(metadata)))
167 % (mdsize, len(metadata)))
168 off += mdsize
168 off += mdsize
169 yield (pre, sucs, flags, metadata)
169 yield (pre, sucs, flags, metadata)
170
170
171 def encodemeta(meta):
171 def encodemeta(meta):
172 """Return encoded metadata string to string mapping.
172 """Return encoded metadata string to string mapping.
173
173
174 Assume no ':' in key and no '\0' in both key and value."""
174 Assume no ':' in key and no '\0' in both key and value."""
175 for key, value in meta.iteritems():
175 for key, value in meta.iteritems():
176 if ':' in key or '\0' in key:
176 if ':' in key or '\0' in key:
177 raise ValueError("':' and '\0' are forbidden in metadata key'")
177 raise ValueError("':' and '\0' are forbidden in metadata key'")
178 if '\0' in value:
178 if '\0' in value:
179 raise ValueError("':' are forbidden in metadata value'")
179 raise ValueError("':' are forbidden in metadata value'")
180 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
180 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
181
181
182 def decodemeta(data):
182 def decodemeta(data):
183 """Return string to string dictionary from encoded version."""
183 """Return string to string dictionary from encoded version."""
184 d = {}
184 d = {}
185 for l in data.split('\0'):
185 for l in data.split('\0'):
186 if l:
186 if l:
187 key, value = l.split(':')
187 key, value = l.split(':')
188 d[key] = value
188 d[key] = value
189 return d
189 return d
190
190
191 class marker(object):
191 class marker(object):
192 """Wrap obsolete marker raw data"""
192 """Wrap obsolete marker raw data"""
193
193
194 def __init__(self, repo, data):
194 def __init__(self, repo, data):
195 # the repo argument will be used to create changectx in later version
195 # the repo argument will be used to create changectx in later version
196 self._repo = repo
196 self._repo = repo
197 self._data = data
197 self._data = data
198 self._decodedmeta = None
198 self._decodedmeta = None
199
199
200 def __hash__(self):
200 def __hash__(self):
201 return hash(self._data)
201 return hash(self._data)
202
202
203 def __eq__(self, other):
203 def __eq__(self, other):
204 if type(other) != type(self):
204 if type(other) != type(self):
205 return False
205 return False
206 return self._data == other._data
206 return self._data == other._data
207
207
208 def precnode(self):
208 def precnode(self):
209 """Precursor changeset node identifier"""
209 """Precursor changeset node identifier"""
210 return self._data[0]
210 return self._data[0]
211
211
212 def succnodes(self):
212 def succnodes(self):
213 """List of successor changesets node identifiers"""
213 """List of successor changesets node identifiers"""
214 return self._data[1]
214 return self._data[1]
215
215
216 def metadata(self):
216 def metadata(self):
217 """Decoded metadata dictionary"""
217 """Decoded metadata dictionary"""
218 if self._decodedmeta is None:
218 if self._decodedmeta is None:
219 self._decodedmeta = decodemeta(self._data[3])
219 self._decodedmeta = decodemeta(self._data[3])
220 return self._decodedmeta
220 return self._decodedmeta
221
221
222 def date(self):
222 def date(self):
223 """Creation date as (unixtime, offset)"""
223 """Creation date as (unixtime, offset)"""
224 parts = self.metadata()['date'].split(' ')
224 parts = self.metadata()['date'].split(' ')
225 return (float(parts[0]), int(parts[1]))
225 return (float(parts[0]), int(parts[1]))
226
226
227 class obsstore(object):
227 class obsstore(object):
228 """Store obsolete markers
228 """Store obsolete markers
229
229
230 Markers can be accessed with two mappings:
230 Markers can be accessed with two mappings:
231 - precursors[x] -> set(markers on precursors edges of x)
231 - precursors[x] -> set(markers on precursors edges of x)
232 - successors[x] -> set(markers on successors edges of x)
232 - successors[x] -> set(markers on successors edges of x)
233 """
233 """
234
234
235 def __init__(self, sopener):
235 def __init__(self, sopener):
236 # caches for various obsolescence related cache
236 # caches for various obsolescence related cache
237 self.caches = {}
237 self.caches = {}
238 self._all = []
238 self._all = []
239 # new markers to serialize
239 # new markers to serialize
240 self.precursors = {}
240 self.precursors = {}
241 self.successors = {}
241 self.successors = {}
242 self.sopener = sopener
242 self.sopener = sopener
243 data = sopener.tryread('obsstore')
243 data = sopener.tryread('obsstore')
244 if data:
244 if data:
245 self._load(_readmarkers(data))
245 self._load(_readmarkers(data))
246
246
247 def __iter__(self):
247 def __iter__(self):
248 return iter(self._all)
248 return iter(self._all)
249
249
250 def __nonzero__(self):
250 def __nonzero__(self):
251 return bool(self._all)
251 return bool(self._all)
252
252
253 def create(self, transaction, prec, succs=(), flag=0, metadata=None):
253 def create(self, transaction, prec, succs=(), flag=0, metadata=None):
254 """obsolete: add a new obsolete marker
254 """obsolete: add a new obsolete marker
255
255
256 * ensuring it is hashable
256 * ensuring it is hashable
257 * check mandatory metadata
257 * check mandatory metadata
258 * encode metadata
258 * encode metadata
259 """
259 """
260 if metadata is None:
260 if metadata is None:
261 metadata = {}
261 metadata = {}
262 if 'date' not in metadata:
262 if 'date' not in metadata:
263 metadata['date'] = "%d %d" % util.makedate()
263 metadata['date'] = "%d %d" % util.makedate()
264 if len(prec) != 20:
264 if len(prec) != 20:
265 raise ValueError(prec)
265 raise ValueError(prec)
266 for succ in succs:
266 for succ in succs:
267 if len(succ) != 20:
267 if len(succ) != 20:
268 raise ValueError(succ)
268 raise ValueError(succ)
269 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
269 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
270 self.add(transaction, [marker])
270 self.add(transaction, [marker])
271
271
272 def add(self, transaction, markers):
272 def add(self, transaction, markers):
273 """Add new markers to the store
273 """Add new markers to the store
274
274
275 Take care of filtering duplicate.
275 Take care of filtering duplicate.
276 Return the number of new marker."""
276 Return the number of new marker."""
277 if not _enabled:
277 if not _enabled:
278 raise util.Abort('obsolete feature is not enabled on this repo')
278 raise util.Abort('obsolete feature is not enabled on this repo')
279 known = set(self._all)
279 known = set(self._all)
280 new = []
280 new = []
281 for m in markers:
281 for m in markers:
282 if m not in known:
282 if m not in known:
283 known.add(m)
283 known.add(m)
284 new.append(m)
284 new.append(m)
285 if new:
285 if new:
286 f = self.sopener('obsstore', 'ab')
286 f = self.sopener('obsstore', 'ab')
287 try:
287 try:
288 # Whether the file's current position is at the begin or at
288 # Whether the file's current position is at the begin or at
289 # the end after opening a file for appending is implementation
289 # the end after opening a file for appending is implementation
290 # defined. So we must seek to the end before calling tell(),
290 # defined. So we must seek to the end before calling tell(),
291 # or we may get a zero offset for non-zero sized files on
291 # or we may get a zero offset for non-zero sized files on
292 # some platforms (issue3543).
292 # some platforms (issue3543).
293 f.seek(0, _SEEK_END)
293 f.seek(0, _SEEK_END)
294 offset = f.tell()
294 offset = f.tell()
295 transaction.add('obsstore', offset)
295 transaction.add('obsstore', offset)
296 # offset == 0: new file - add the version header
296 # offset == 0: new file - add the version header
297 for bytes in _encodemarkers(new, offset == 0):
297 for bytes in _encodemarkers(new, offset == 0):
298 f.write(bytes)
298 f.write(bytes)
299 finally:
299 finally:
300 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
300 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
301 # call 'filecacheentry.refresh()' here
301 # call 'filecacheentry.refresh()' here
302 f.close()
302 f.close()
303 self._load(new)
303 self._load(new)
304 # new marker *may* have changed several set. invalidate the cache.
304 # new marker *may* have changed several set. invalidate the cache.
305 self.caches.clear()
305 self.caches.clear()
306 return len(new)
306 return len(new)
307
307
308 def mergemarkers(self, transaction, data):
308 def mergemarkers(self, transaction, data):
309 markers = _readmarkers(data)
309 markers = _readmarkers(data)
310 self.add(transaction, markers)
310 self.add(transaction, markers)
311
311
312 def _load(self, markers):
312 def _load(self, markers):
313 for mark in markers:
313 for mark in markers:
314 self._all.append(mark)
314 self._all.append(mark)
315 pre, sucs = mark[:2]
315 pre, sucs = mark[:2]
316 self.successors.setdefault(pre, set()).add(mark)
316 self.successors.setdefault(pre, set()).add(mark)
317 for suc in sucs:
317 for suc in sucs:
318 self.precursors.setdefault(suc, set()).add(mark)
318 self.precursors.setdefault(suc, set()).add(mark)
319 if node.nullid in self.precursors:
319 if node.nullid in self.precursors:
320 raise util.Abort(_('bad obsolescence marker detected: '
320 raise util.Abort(_('bad obsolescence marker detected: '
321 'invalid successors nullid'))
321 'invalid successors nullid'))
322
322
323 def _encodemarkers(markers, addheader=False):
323 def _encodemarkers(markers, addheader=False):
324 # Kept separate from flushmarkers(), it will be reused for
324 # Kept separate from flushmarkers(), it will be reused for
325 # markers exchange.
325 # markers exchange.
326 if addheader:
326 if addheader:
327 yield _pack('>B', _fmversion)
327 yield _pack('>B', _fmversion)
328 for marker in markers:
328 for marker in markers:
329 yield _encodeonemarker(marker)
329 yield _encodeonemarker(marker)
330
330
331
331
332 def _encodeonemarker(marker):
332 def _encodeonemarker(marker):
333 pre, sucs, flags, metadata = marker
333 pre, sucs, flags, metadata = marker
334 nbsuc = len(sucs)
334 nbsuc = len(sucs)
335 format = _fmfixed + (_fmnode * nbsuc)
335 format = _fmfixed + (_fmnode * nbsuc)
336 data = [nbsuc, len(metadata), flags, pre]
336 data = [nbsuc, len(metadata), flags, pre]
337 data.extend(sucs)
337 data.extend(sucs)
338 return _pack(format, *data) + metadata
338 return _pack(format, *data) + metadata
339
339
340 # arbitrary picked to fit into 8K limit from HTTP server
340 # arbitrary picked to fit into 8K limit from HTTP server
341 # you have to take in account:
341 # you have to take in account:
342 # - the version header
342 # - the version header
343 # - the base85 encoding
343 # - the base85 encoding
344 _maxpayload = 5300
344 _maxpayload = 5300
345
345
346 def listmarkers(repo):
346 def listmarkers(repo):
347 """List markers over pushkey"""
347 """List markers over pushkey"""
348 if not repo.obsstore:
348 if not repo.obsstore:
349 return {}
349 return {}
350 keys = {}
350 keys = {}
351 parts = []
351 parts = []
352 currentlen = _maxpayload * 2 # ensure we create a new part
352 currentlen = _maxpayload * 2 # ensure we create a new part
353 for marker in repo.obsstore:
353 for marker in repo.obsstore:
354 nextdata = _encodeonemarker(marker)
354 nextdata = _encodeonemarker(marker)
355 if (len(nextdata) + currentlen > _maxpayload):
355 if (len(nextdata) + currentlen > _maxpayload):
356 currentpart = []
356 currentpart = []
357 currentlen = 0
357 currentlen = 0
358 parts.append(currentpart)
358 parts.append(currentpart)
359 currentpart.append(nextdata)
359 currentpart.append(nextdata)
360 currentlen += len(nextdata)
360 currentlen += len(nextdata)
361 for idx, part in enumerate(reversed(parts)):
361 for idx, part in enumerate(reversed(parts)):
362 data = ''.join([_pack('>B', _fmversion)] + part)
362 data = ''.join([_pack('>B', _fmversion)] + part)
363 keys['dump%i' % idx] = base85.b85encode(data)
363 keys['dump%i' % idx] = base85.b85encode(data)
364 return keys
364 return keys
365
365
366 def pushmarker(repo, key, old, new):
366 def pushmarker(repo, key, old, new):
367 """Push markers over pushkey"""
367 """Push markers over pushkey"""
368 if not key.startswith('dump'):
368 if not key.startswith('dump'):
369 repo.ui.warn(_('unknown key: %r') % key)
369 repo.ui.warn(_('unknown key: %r') % key)
370 return 0
370 return 0
371 if old:
371 if old:
372 repo.ui.warn(_('unexpected old value') % key)
372 repo.ui.warn(_('unexpected old value') % key)
373 return 0
373 return 0
374 data = base85.b85decode(new)
374 data = base85.b85decode(new)
375 lock = repo.lock()
375 lock = repo.lock()
376 try:
376 try:
377 tr = repo.transaction('pushkey: obsolete markers')
377 tr = repo.transaction('pushkey: obsolete markers')
378 try:
378 try:
379 repo.obsstore.mergemarkers(tr, data)
379 repo.obsstore.mergemarkers(tr, data)
380 tr.close()
380 tr.close()
381 return 1
381 return 1
382 finally:
382 finally:
383 tr.release()
383 tr.release()
384 finally:
384 finally:
385 lock.release()
385 lock.release()
386
386
387 def syncpush(repo, remote):
388 """utility function to push obsolete markers to a remote
389
390 Exist mostly to allow overriding for experimentation purpose"""
391 if (_enabled and repo.obsstore and
392 'obsolete' in remote.listkeys('namespaces')):
393 rslts = []
394 remotedata = repo.listkeys('obsolete')
395 for key in sorted(remotedata, reverse=True):
396 # reverse sort to ensure we end with dump0
397 data = remotedata[key]
398 rslts.append(remote.pushkey('obsolete', key, '', data))
399 if [r for r in rslts if not r]:
400 msg = _('failed to push some obsolete markers!\n')
401 repo.ui.warn(msg)
402
403 def syncpull(repo, remote, gettransaction):
387 def syncpull(repo, remote, gettransaction):
404 """utility function to pull obsolete markers from a remote
388 """utility function to pull obsolete markers from a remote
405
389
406 The `gettransaction` is function that return the pull transaction, creating
390 The `gettransaction` is function that return the pull transaction, creating
407 one if necessary. We return the transaction to inform the calling code that
391 one if necessary. We return the transaction to inform the calling code that
408 a new transaction have been created (when applicable).
392 a new transaction have been created (when applicable).
409
393
410 Exists mostly to allow overriding for experimentation purpose"""
394 Exists mostly to allow overriding for experimentation purpose"""
411 tr = None
395 tr = None
412 if _enabled:
396 if _enabled:
413 repo.ui.debug('fetching remote obsolete markers\n')
397 repo.ui.debug('fetching remote obsolete markers\n')
414 remoteobs = remote.listkeys('obsolete')
398 remoteobs = remote.listkeys('obsolete')
415 if 'dump0' in remoteobs:
399 if 'dump0' in remoteobs:
416 tr = gettransaction()
400 tr = gettransaction()
417 for key in sorted(remoteobs, reverse=True):
401 for key in sorted(remoteobs, reverse=True):
418 if key.startswith('dump'):
402 if key.startswith('dump'):
419 data = base85.b85decode(remoteobs[key])
403 data = base85.b85decode(remoteobs[key])
420 repo.obsstore.mergemarkers(tr, data)
404 repo.obsstore.mergemarkers(tr, data)
421 repo.invalidatevolatilesets()
405 repo.invalidatevolatilesets()
422 return tr
406 return tr
423
407
424 def allmarkers(repo):
408 def allmarkers(repo):
425 """all obsolete markers known in a repository"""
409 """all obsolete markers known in a repository"""
426 for markerdata in repo.obsstore:
410 for markerdata in repo.obsstore:
427 yield marker(repo, markerdata)
411 yield marker(repo, markerdata)
428
412
429 def precursormarkers(ctx):
413 def precursormarkers(ctx):
430 """obsolete marker marking this changeset as a successors"""
414 """obsolete marker marking this changeset as a successors"""
431 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
415 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
432 yield marker(ctx._repo, data)
416 yield marker(ctx._repo, data)
433
417
434 def successormarkers(ctx):
418 def successormarkers(ctx):
435 """obsolete marker making this changeset obsolete"""
419 """obsolete marker making this changeset obsolete"""
436 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
420 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
437 yield marker(ctx._repo, data)
421 yield marker(ctx._repo, data)
438
422
439 def allsuccessors(obsstore, nodes, ignoreflags=0):
423 def allsuccessors(obsstore, nodes, ignoreflags=0):
440 """Yield node for every successor of <nodes>.
424 """Yield node for every successor of <nodes>.
441
425
442 Some successors may be unknown locally.
426 Some successors may be unknown locally.
443
427
444 This is a linear yield unsuited to detecting split changesets. It includes
428 This is a linear yield unsuited to detecting split changesets. It includes
445 initial nodes too."""
429 initial nodes too."""
446 remaining = set(nodes)
430 remaining = set(nodes)
447 seen = set(remaining)
431 seen = set(remaining)
448 while remaining:
432 while remaining:
449 current = remaining.pop()
433 current = remaining.pop()
450 yield current
434 yield current
451 for mark in obsstore.successors.get(current, ()):
435 for mark in obsstore.successors.get(current, ()):
452 # ignore marker flagged with specified flag
436 # ignore marker flagged with specified flag
453 if mark[2] & ignoreflags:
437 if mark[2] & ignoreflags:
454 continue
438 continue
455 for suc in mark[1]:
439 for suc in mark[1]:
456 if suc not in seen:
440 if suc not in seen:
457 seen.add(suc)
441 seen.add(suc)
458 remaining.add(suc)
442 remaining.add(suc)
459
443
460 def allprecursors(obsstore, nodes, ignoreflags=0):
444 def allprecursors(obsstore, nodes, ignoreflags=0):
461 """Yield node for every precursors of <nodes>.
445 """Yield node for every precursors of <nodes>.
462
446
463 Some precursors may be unknown locally.
447 Some precursors may be unknown locally.
464
448
465 This is a linear yield unsuited to detecting folded changesets. It includes
449 This is a linear yield unsuited to detecting folded changesets. It includes
466 initial nodes too."""
450 initial nodes too."""
467
451
468 remaining = set(nodes)
452 remaining = set(nodes)
469 seen = set(remaining)
453 seen = set(remaining)
470 while remaining:
454 while remaining:
471 current = remaining.pop()
455 current = remaining.pop()
472 yield current
456 yield current
473 for mark in obsstore.precursors.get(current, ()):
457 for mark in obsstore.precursors.get(current, ()):
474 # ignore marker flagged with specified flag
458 # ignore marker flagged with specified flag
475 if mark[2] & ignoreflags:
459 if mark[2] & ignoreflags:
476 continue
460 continue
477 suc = mark[0]
461 suc = mark[0]
478 if suc not in seen:
462 if suc not in seen:
479 seen.add(suc)
463 seen.add(suc)
480 remaining.add(suc)
464 remaining.add(suc)
481
465
482 def foreground(repo, nodes):
466 def foreground(repo, nodes):
483 """return all nodes in the "foreground" of other node
467 """return all nodes in the "foreground" of other node
484
468
485 The foreground of a revision is anything reachable using parent -> children
469 The foreground of a revision is anything reachable using parent -> children
486 or precursor -> successor relation. It is very similar to "descendant" but
470 or precursor -> successor relation. It is very similar to "descendant" but
487 augmented with obsolescence information.
471 augmented with obsolescence information.
488
472
489 Beware that possible obsolescence cycle may result if complex situation.
473 Beware that possible obsolescence cycle may result if complex situation.
490 """
474 """
491 repo = repo.unfiltered()
475 repo = repo.unfiltered()
492 foreground = set(repo.set('%ln::', nodes))
476 foreground = set(repo.set('%ln::', nodes))
493 if repo.obsstore:
477 if repo.obsstore:
494 # We only need this complicated logic if there is obsolescence
478 # We only need this complicated logic if there is obsolescence
495 # XXX will probably deserve an optimised revset.
479 # XXX will probably deserve an optimised revset.
496 nm = repo.changelog.nodemap
480 nm = repo.changelog.nodemap
497 plen = -1
481 plen = -1
498 # compute the whole set of successors or descendants
482 # compute the whole set of successors or descendants
499 while len(foreground) != plen:
483 while len(foreground) != plen:
500 plen = len(foreground)
484 plen = len(foreground)
501 succs = set(c.node() for c in foreground)
485 succs = set(c.node() for c in foreground)
502 mutable = [c.node() for c in foreground if c.mutable()]
486 mutable = [c.node() for c in foreground if c.mutable()]
503 succs.update(allsuccessors(repo.obsstore, mutable))
487 succs.update(allsuccessors(repo.obsstore, mutable))
504 known = (n for n in succs if n in nm)
488 known = (n for n in succs if n in nm)
505 foreground = set(repo.set('%ln::', known))
489 foreground = set(repo.set('%ln::', known))
506 return set(c.node() for c in foreground)
490 return set(c.node() for c in foreground)
507
491
508
492
509 def successorssets(repo, initialnode, cache=None):
493 def successorssets(repo, initialnode, cache=None):
510 """Return all set of successors of initial nodes
494 """Return all set of successors of initial nodes
511
495
512 The successors set of a changeset A are a group of revisions that succeed
496 The successors set of a changeset A are a group of revisions that succeed
513 A. It succeeds A as a consistent whole, each revision being only a partial
497 A. It succeeds A as a consistent whole, each revision being only a partial
514 replacement. The successors set contains non-obsolete changesets only.
498 replacement. The successors set contains non-obsolete changesets only.
515
499
516 This function returns the full list of successor sets which is why it
500 This function returns the full list of successor sets which is why it
517 returns a list of tuples and not just a single tuple. Each tuple is a valid
501 returns a list of tuples and not just a single tuple. Each tuple is a valid
518 successors set. Not that (A,) may be a valid successors set for changeset A
502 successors set. Not that (A,) may be a valid successors set for changeset A
519 (see below).
503 (see below).
520
504
521 In most cases, a changeset A will have a single element (e.g. the changeset
505 In most cases, a changeset A will have a single element (e.g. the changeset
522 A is replaced by A') in its successors set. Though, it is also common for a
506 A is replaced by A') in its successors set. Though, it is also common for a
523 changeset A to have no elements in its successor set (e.g. the changeset
507 changeset A to have no elements in its successor set (e.g. the changeset
524 has been pruned). Therefore, the returned list of successors sets will be
508 has been pruned). Therefore, the returned list of successors sets will be
525 [(A',)] or [], respectively.
509 [(A',)] or [], respectively.
526
510
527 When a changeset A is split into A' and B', however, it will result in a
511 When a changeset A is split into A' and B', however, it will result in a
528 successors set containing more than a single element, i.e. [(A',B')].
512 successors set containing more than a single element, i.e. [(A',B')].
529 Divergent changesets will result in multiple successors sets, i.e. [(A',),
513 Divergent changesets will result in multiple successors sets, i.e. [(A',),
530 (A'')].
514 (A'')].
531
515
532 If a changeset A is not obsolete, then it will conceptually have no
516 If a changeset A is not obsolete, then it will conceptually have no
533 successors set. To distinguish this from a pruned changeset, the successor
517 successors set. To distinguish this from a pruned changeset, the successor
534 set will only contain itself, i.e. [(A,)].
518 set will only contain itself, i.e. [(A,)].
535
519
536 Finally, successors unknown locally are considered to be pruned (obsoleted
520 Finally, successors unknown locally are considered to be pruned (obsoleted
537 without any successors).
521 without any successors).
538
522
539 The optional `cache` parameter is a dictionary that may contain precomputed
523 The optional `cache` parameter is a dictionary that may contain precomputed
540 successors sets. It is meant to reuse the computation of a previous call to
524 successors sets. It is meant to reuse the computation of a previous call to
541 `successorssets` when multiple calls are made at the same time. The cache
525 `successorssets` when multiple calls are made at the same time. The cache
542 dictionary is updated in place. The caller is responsible for its live
526 dictionary is updated in place. The caller is responsible for its live
543 spawn. Code that makes multiple calls to `successorssets` *must* use this
527 spawn. Code that makes multiple calls to `successorssets` *must* use this
544 cache mechanism or suffer terrible performances.
528 cache mechanism or suffer terrible performances.
545
529
546 """
530 """
547
531
548 succmarkers = repo.obsstore.successors
532 succmarkers = repo.obsstore.successors
549
533
550 # Stack of nodes we search successors sets for
534 # Stack of nodes we search successors sets for
551 toproceed = [initialnode]
535 toproceed = [initialnode]
552 # set version of above list for fast loop detection
536 # set version of above list for fast loop detection
553 # element added to "toproceed" must be added here
537 # element added to "toproceed" must be added here
554 stackedset = set(toproceed)
538 stackedset = set(toproceed)
555 if cache is None:
539 if cache is None:
556 cache = {}
540 cache = {}
557
541
558 # This while loop is the flattened version of a recursive search for
542 # This while loop is the flattened version of a recursive search for
559 # successors sets
543 # successors sets
560 #
544 #
561 # def successorssets(x):
545 # def successorssets(x):
562 # successors = directsuccessors(x)
546 # successors = directsuccessors(x)
563 # ss = [[]]
547 # ss = [[]]
564 # for succ in directsuccessors(x):
548 # for succ in directsuccessors(x):
565 # # product as in itertools cartesian product
549 # # product as in itertools cartesian product
566 # ss = product(ss, successorssets(succ))
550 # ss = product(ss, successorssets(succ))
567 # return ss
551 # return ss
568 #
552 #
569 # But we can not use plain recursive calls here:
553 # But we can not use plain recursive calls here:
570 # - that would blow the python call stack
554 # - that would blow the python call stack
571 # - obsolescence markers may have cycles, we need to handle them.
555 # - obsolescence markers may have cycles, we need to handle them.
572 #
556 #
573 # The `toproceed` list act as our call stack. Every node we search
557 # The `toproceed` list act as our call stack. Every node we search
574 # successors set for are stacked there.
558 # successors set for are stacked there.
575 #
559 #
576 # The `stackedset` is set version of this stack used to check if a node is
560 # The `stackedset` is set version of this stack used to check if a node is
577 # already stacked. This check is used to detect cycles and prevent infinite
561 # already stacked. This check is used to detect cycles and prevent infinite
578 # loop.
562 # loop.
579 #
563 #
580 # successors set of all nodes are stored in the `cache` dictionary.
564 # successors set of all nodes are stored in the `cache` dictionary.
581 #
565 #
582 # After this while loop ends we use the cache to return the successors sets
566 # After this while loop ends we use the cache to return the successors sets
583 # for the node requested by the caller.
567 # for the node requested by the caller.
584 while toproceed:
568 while toproceed:
585 # Every iteration tries to compute the successors sets of the topmost
569 # Every iteration tries to compute the successors sets of the topmost
586 # node of the stack: CURRENT.
570 # node of the stack: CURRENT.
587 #
571 #
588 # There are four possible outcomes:
572 # There are four possible outcomes:
589 #
573 #
590 # 1) We already know the successors sets of CURRENT:
574 # 1) We already know the successors sets of CURRENT:
591 # -> mission accomplished, pop it from the stack.
575 # -> mission accomplished, pop it from the stack.
592 # 2) Node is not obsolete:
576 # 2) Node is not obsolete:
593 # -> the node is its own successors sets. Add it to the cache.
577 # -> the node is its own successors sets. Add it to the cache.
594 # 3) We do not know successors set of direct successors of CURRENT:
578 # 3) We do not know successors set of direct successors of CURRENT:
595 # -> We add those successors to the stack.
579 # -> We add those successors to the stack.
596 # 4) We know successors sets of all direct successors of CURRENT:
580 # 4) We know successors sets of all direct successors of CURRENT:
597 # -> We can compute CURRENT successors set and add it to the
581 # -> We can compute CURRENT successors set and add it to the
598 # cache.
582 # cache.
599 #
583 #
600 current = toproceed[-1]
584 current = toproceed[-1]
601 if current in cache:
585 if current in cache:
602 # case (1): We already know the successors sets
586 # case (1): We already know the successors sets
603 stackedset.remove(toproceed.pop())
587 stackedset.remove(toproceed.pop())
604 elif current not in succmarkers:
588 elif current not in succmarkers:
605 # case (2): The node is not obsolete.
589 # case (2): The node is not obsolete.
606 if current in repo:
590 if current in repo:
607 # We have a valid last successors.
591 # We have a valid last successors.
608 cache[current] = [(current,)]
592 cache[current] = [(current,)]
609 else:
593 else:
610 # Final obsolete version is unknown locally.
594 # Final obsolete version is unknown locally.
611 # Do not count that as a valid successors
595 # Do not count that as a valid successors
612 cache[current] = []
596 cache[current] = []
613 else:
597 else:
614 # cases (3) and (4)
598 # cases (3) and (4)
615 #
599 #
616 # We proceed in two phases. Phase 1 aims to distinguish case (3)
600 # We proceed in two phases. Phase 1 aims to distinguish case (3)
617 # from case (4):
601 # from case (4):
618 #
602 #
619 # For each direct successors of CURRENT, we check whether its
603 # For each direct successors of CURRENT, we check whether its
620 # successors sets are known. If they are not, we stack the
604 # successors sets are known. If they are not, we stack the
621 # unknown node and proceed to the next iteration of the while
605 # unknown node and proceed to the next iteration of the while
622 # loop. (case 3)
606 # loop. (case 3)
623 #
607 #
624 # During this step, we may detect obsolescence cycles: a node
608 # During this step, we may detect obsolescence cycles: a node
625 # with unknown successors sets but already in the call stack.
609 # with unknown successors sets but already in the call stack.
626 # In such a situation, we arbitrary set the successors sets of
610 # In such a situation, we arbitrary set the successors sets of
627 # the node to nothing (node pruned) to break the cycle.
611 # the node to nothing (node pruned) to break the cycle.
628 #
612 #
629 # If no break was encountered we proceed to phase 2.
613 # If no break was encountered we proceed to phase 2.
630 #
614 #
631 # Phase 2 computes successors sets of CURRENT (case 4); see details
615 # Phase 2 computes successors sets of CURRENT (case 4); see details
632 # in phase 2 itself.
616 # in phase 2 itself.
633 #
617 #
634 # Note the two levels of iteration in each phase.
618 # Note the two levels of iteration in each phase.
635 # - The first one handles obsolescence markers using CURRENT as
619 # - The first one handles obsolescence markers using CURRENT as
636 # precursor (successors markers of CURRENT).
620 # precursor (successors markers of CURRENT).
637 #
621 #
638 # Having multiple entry here means divergence.
622 # Having multiple entry here means divergence.
639 #
623 #
640 # - The second one handles successors defined in each marker.
624 # - The second one handles successors defined in each marker.
641 #
625 #
642 # Having none means pruned node, multiple successors means split,
626 # Having none means pruned node, multiple successors means split,
643 # single successors are standard replacement.
627 # single successors are standard replacement.
644 #
628 #
645 for mark in sorted(succmarkers[current]):
629 for mark in sorted(succmarkers[current]):
646 for suc in mark[1]:
630 for suc in mark[1]:
647 if suc not in cache:
631 if suc not in cache:
648 if suc in stackedset:
632 if suc in stackedset:
649 # cycle breaking
633 # cycle breaking
650 cache[suc] = []
634 cache[suc] = []
651 else:
635 else:
652 # case (3) If we have not computed successors sets
636 # case (3) If we have not computed successors sets
653 # of one of those successors we add it to the
637 # of one of those successors we add it to the
654 # `toproceed` stack and stop all work for this
638 # `toproceed` stack and stop all work for this
655 # iteration.
639 # iteration.
656 toproceed.append(suc)
640 toproceed.append(suc)
657 stackedset.add(suc)
641 stackedset.add(suc)
658 break
642 break
659 else:
643 else:
660 continue
644 continue
661 break
645 break
662 else:
646 else:
663 # case (4): we know all successors sets of all direct
647 # case (4): we know all successors sets of all direct
664 # successors
648 # successors
665 #
649 #
666 # Successors set contributed by each marker depends on the
650 # Successors set contributed by each marker depends on the
667 # successors sets of all its "successors" node.
651 # successors sets of all its "successors" node.
668 #
652 #
669 # Each different marker is a divergence in the obsolescence
653 # Each different marker is a divergence in the obsolescence
670 # history. It contributes successors sets distinct from other
654 # history. It contributes successors sets distinct from other
671 # markers.
655 # markers.
672 #
656 #
673 # Within a marker, a successor may have divergent successors
657 # Within a marker, a successor may have divergent successors
674 # sets. In such a case, the marker will contribute multiple
658 # sets. In such a case, the marker will contribute multiple
675 # divergent successors sets. If multiple successors have
659 # divergent successors sets. If multiple successors have
676 # divergent successors sets, a cartesian product is used.
660 # divergent successors sets, a cartesian product is used.
677 #
661 #
678 # At the end we post-process successors sets to remove
662 # At the end we post-process successors sets to remove
679 # duplicated entry and successors set that are strict subset of
663 # duplicated entry and successors set that are strict subset of
680 # another one.
664 # another one.
681 succssets = []
665 succssets = []
682 for mark in sorted(succmarkers[current]):
666 for mark in sorted(succmarkers[current]):
683 # successors sets contributed by this marker
667 # successors sets contributed by this marker
684 markss = [[]]
668 markss = [[]]
685 for suc in mark[1]:
669 for suc in mark[1]:
686 # cardinal product with previous successors
670 # cardinal product with previous successors
687 productresult = []
671 productresult = []
688 for prefix in markss:
672 for prefix in markss:
689 for suffix in cache[suc]:
673 for suffix in cache[suc]:
690 newss = list(prefix)
674 newss = list(prefix)
691 for part in suffix:
675 for part in suffix:
692 # do not duplicated entry in successors set
676 # do not duplicated entry in successors set
693 # first entry wins.
677 # first entry wins.
694 if part not in newss:
678 if part not in newss:
695 newss.append(part)
679 newss.append(part)
696 productresult.append(newss)
680 productresult.append(newss)
697 markss = productresult
681 markss = productresult
698 succssets.extend(markss)
682 succssets.extend(markss)
699 # remove duplicated and subset
683 # remove duplicated and subset
700 seen = []
684 seen = []
701 final = []
685 final = []
702 candidate = sorted(((set(s), s) for s in succssets if s),
686 candidate = sorted(((set(s), s) for s in succssets if s),
703 key=lambda x: len(x[1]), reverse=True)
687 key=lambda x: len(x[1]), reverse=True)
704 for setversion, listversion in candidate:
688 for setversion, listversion in candidate:
705 for seenset in seen:
689 for seenset in seen:
706 if setversion.issubset(seenset):
690 if setversion.issubset(seenset):
707 break
691 break
708 else:
692 else:
709 final.append(listversion)
693 final.append(listversion)
710 seen.append(setversion)
694 seen.append(setversion)
711 final.reverse() # put small successors set first
695 final.reverse() # put small successors set first
712 cache[current] = final
696 cache[current] = final
713 return cache[initialnode]
697 return cache[initialnode]
714
698
715 def _knownrevs(repo, nodes):
699 def _knownrevs(repo, nodes):
716 """yield revision numbers of known nodes passed in parameters
700 """yield revision numbers of known nodes passed in parameters
717
701
718 Unknown revisions are silently ignored."""
702 Unknown revisions are silently ignored."""
719 torev = repo.changelog.nodemap.get
703 torev = repo.changelog.nodemap.get
720 for n in nodes:
704 for n in nodes:
721 rev = torev(n)
705 rev = torev(n)
722 if rev is not None:
706 if rev is not None:
723 yield rev
707 yield rev
724
708
725 # mapping of 'set-name' -> <function to compute this set>
709 # mapping of 'set-name' -> <function to compute this set>
726 cachefuncs = {}
710 cachefuncs = {}
727 def cachefor(name):
711 def cachefor(name):
728 """Decorator to register a function as computing the cache for a set"""
712 """Decorator to register a function as computing the cache for a set"""
729 def decorator(func):
713 def decorator(func):
730 assert name not in cachefuncs
714 assert name not in cachefuncs
731 cachefuncs[name] = func
715 cachefuncs[name] = func
732 return func
716 return func
733 return decorator
717 return decorator
734
718
735 def getrevs(repo, name):
719 def getrevs(repo, name):
736 """Return the set of revision that belong to the <name> set
720 """Return the set of revision that belong to the <name> set
737
721
738 Such access may compute the set and cache it for future use"""
722 Such access may compute the set and cache it for future use"""
739 repo = repo.unfiltered()
723 repo = repo.unfiltered()
740 if not repo.obsstore:
724 if not repo.obsstore:
741 return ()
725 return ()
742 if name not in repo.obsstore.caches:
726 if name not in repo.obsstore.caches:
743 repo.obsstore.caches[name] = cachefuncs[name](repo)
727 repo.obsstore.caches[name] = cachefuncs[name](repo)
744 return repo.obsstore.caches[name]
728 return repo.obsstore.caches[name]
745
729
746 # To be simple we need to invalidate obsolescence cache when:
730 # To be simple we need to invalidate obsolescence cache when:
747 #
731 #
748 # - new changeset is added:
732 # - new changeset is added:
749 # - public phase is changed
733 # - public phase is changed
750 # - obsolescence marker are added
734 # - obsolescence marker are added
751 # - strip is used a repo
735 # - strip is used a repo
752 def clearobscaches(repo):
736 def clearobscaches(repo):
753 """Remove all obsolescence related cache from a repo
737 """Remove all obsolescence related cache from a repo
754
738
755 This remove all cache in obsstore is the obsstore already exist on the
739 This remove all cache in obsstore is the obsstore already exist on the
756 repo.
740 repo.
757
741
758 (We could be smarter here given the exact event that trigger the cache
742 (We could be smarter here given the exact event that trigger the cache
759 clearing)"""
743 clearing)"""
760 # only clear cache is there is obsstore data in this repo
744 # only clear cache is there is obsstore data in this repo
761 if 'obsstore' in repo._filecache:
745 if 'obsstore' in repo._filecache:
762 repo.obsstore.caches.clear()
746 repo.obsstore.caches.clear()
763
747
764 @cachefor('obsolete')
748 @cachefor('obsolete')
765 def _computeobsoleteset(repo):
749 def _computeobsoleteset(repo):
766 """the set of obsolete revisions"""
750 """the set of obsolete revisions"""
767 obs = set()
751 obs = set()
768 getrev = repo.changelog.nodemap.get
752 getrev = repo.changelog.nodemap.get
769 getphase = repo._phasecache.phase
753 getphase = repo._phasecache.phase
770 for node in repo.obsstore.successors:
754 for node in repo.obsstore.successors:
771 rev = getrev(node)
755 rev = getrev(node)
772 if rev is not None and getphase(repo, rev):
756 if rev is not None and getphase(repo, rev):
773 obs.add(rev)
757 obs.add(rev)
774 return obs
758 return obs
775
759
776 @cachefor('unstable')
760 @cachefor('unstable')
777 def _computeunstableset(repo):
761 def _computeunstableset(repo):
778 """the set of non obsolete revisions with obsolete parents"""
762 """the set of non obsolete revisions with obsolete parents"""
779 # revset is not efficient enough here
763 # revset is not efficient enough here
780 # we do (obsolete()::) - obsolete() by hand
764 # we do (obsolete()::) - obsolete() by hand
781 obs = getrevs(repo, 'obsolete')
765 obs = getrevs(repo, 'obsolete')
782 if not obs:
766 if not obs:
783 return set()
767 return set()
784 cl = repo.changelog
768 cl = repo.changelog
785 return set(r for r in cl.descendants(obs) if r not in obs)
769 return set(r for r in cl.descendants(obs) if r not in obs)
786
770
787 @cachefor('suspended')
771 @cachefor('suspended')
788 def _computesuspendedset(repo):
772 def _computesuspendedset(repo):
789 """the set of obsolete parents with non obsolete descendants"""
773 """the set of obsolete parents with non obsolete descendants"""
790 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
774 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
791 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
775 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
792
776
793 @cachefor('extinct')
777 @cachefor('extinct')
794 def _computeextinctset(repo):
778 def _computeextinctset(repo):
795 """the set of obsolete parents without non obsolete descendants"""
779 """the set of obsolete parents without non obsolete descendants"""
796 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
780 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
797
781
798
782
799 @cachefor('bumped')
783 @cachefor('bumped')
800 def _computebumpedset(repo):
784 def _computebumpedset(repo):
801 """the set of revs trying to obsolete public revisions"""
785 """the set of revs trying to obsolete public revisions"""
802 bumped = set()
786 bumped = set()
803 # utils function (avoid attribut lookup in the loop)
787 # utils function (avoid attribut lookup in the loop)
804 phase = repo._phasecache.phase # would be faster to grab the full list
788 phase = repo._phasecache.phase # would be faster to grab the full list
805 public = phases.public
789 public = phases.public
806 cl = repo.changelog
790 cl = repo.changelog
807 torev = cl.nodemap.get
791 torev = cl.nodemap.get
808 obs = getrevs(repo, 'obsolete')
792 obs = getrevs(repo, 'obsolete')
809 for rev in repo:
793 for rev in repo:
810 # We only evaluate mutable, non-obsolete revision
794 # We only evaluate mutable, non-obsolete revision
811 if (public < phase(repo, rev)) and (rev not in obs):
795 if (public < phase(repo, rev)) and (rev not in obs):
812 node = cl.node(rev)
796 node = cl.node(rev)
813 # (future) A cache of precursors may worth if split is very common
797 # (future) A cache of precursors may worth if split is very common
814 for pnode in allprecursors(repo.obsstore, [node],
798 for pnode in allprecursors(repo.obsstore, [node],
815 ignoreflags=bumpedfix):
799 ignoreflags=bumpedfix):
816 prev = torev(pnode) # unfiltered! but so is phasecache
800 prev = torev(pnode) # unfiltered! but so is phasecache
817 if (prev is not None) and (phase(repo, prev) <= public):
801 if (prev is not None) and (phase(repo, prev) <= public):
818 # we have a public precursors
802 # we have a public precursors
819 bumped.add(rev)
803 bumped.add(rev)
820 break # Next draft!
804 break # Next draft!
821 return bumped
805 return bumped
822
806
823 @cachefor('divergent')
807 @cachefor('divergent')
824 def _computedivergentset(repo):
808 def _computedivergentset(repo):
825 """the set of rev that compete to be the final successors of some revision.
809 """the set of rev that compete to be the final successors of some revision.
826 """
810 """
827 divergent = set()
811 divergent = set()
828 obsstore = repo.obsstore
812 obsstore = repo.obsstore
829 newermap = {}
813 newermap = {}
830 for ctx in repo.set('(not public()) - obsolete()'):
814 for ctx in repo.set('(not public()) - obsolete()'):
831 mark = obsstore.precursors.get(ctx.node(), ())
815 mark = obsstore.precursors.get(ctx.node(), ())
832 toprocess = set(mark)
816 toprocess = set(mark)
833 while toprocess:
817 while toprocess:
834 prec = toprocess.pop()[0]
818 prec = toprocess.pop()[0]
835 if prec not in newermap:
819 if prec not in newermap:
836 successorssets(repo, prec, newermap)
820 successorssets(repo, prec, newermap)
837 newer = [n for n in newermap[prec] if n]
821 newer = [n for n in newermap[prec] if n]
838 if len(newer) > 1:
822 if len(newer) > 1:
839 divergent.add(ctx.rev())
823 divergent.add(ctx.rev())
840 break
824 break
841 toprocess.update(obsstore.precursors.get(prec, ()))
825 toprocess.update(obsstore.precursors.get(prec, ()))
842 return divergent
826 return divergent
843
827
844
828
845 def createmarkers(repo, relations, flag=0, metadata=None):
829 def createmarkers(repo, relations, flag=0, metadata=None):
846 """Add obsolete markers between changesets in a repo
830 """Add obsolete markers between changesets in a repo
847
831
848 <relations> must be an iterable of (<old>, (<new>, ...)) tuple.
832 <relations> must be an iterable of (<old>, (<new>, ...)) tuple.
849 `old` and `news` are changectx.
833 `old` and `news` are changectx.
850
834
851 Trying to obsolete a public changeset will raise an exception.
835 Trying to obsolete a public changeset will raise an exception.
852
836
853 Current user and date are used except if specified otherwise in the
837 Current user and date are used except if specified otherwise in the
854 metadata attribute.
838 metadata attribute.
855
839
856 This function operates within a transaction of its own, but does
840 This function operates within a transaction of its own, but does
857 not take any lock on the repo.
841 not take any lock on the repo.
858 """
842 """
859 # prepare metadata
843 # prepare metadata
860 if metadata is None:
844 if metadata is None:
861 metadata = {}
845 metadata = {}
862 if 'date' not in metadata:
846 if 'date' not in metadata:
863 metadata['date'] = '%i %i' % util.makedate()
847 metadata['date'] = '%i %i' % util.makedate()
864 if 'user' not in metadata:
848 if 'user' not in metadata:
865 metadata['user'] = repo.ui.username()
849 metadata['user'] = repo.ui.username()
866 tr = repo.transaction('add-obsolescence-marker')
850 tr = repo.transaction('add-obsolescence-marker')
867 try:
851 try:
868 for prec, sucs in relations:
852 for prec, sucs in relations:
869 if not prec.mutable():
853 if not prec.mutable():
870 raise util.Abort("cannot obsolete immutable changeset: %s"
854 raise util.Abort("cannot obsolete immutable changeset: %s"
871 % prec)
855 % prec)
872 nprec = prec.node()
856 nprec = prec.node()
873 nsucs = tuple(s.node() for s in sucs)
857 nsucs = tuple(s.node() for s in sucs)
874 if nprec in nsucs:
858 if nprec in nsucs:
875 raise util.Abort("changeset %s cannot obsolete itself" % prec)
859 raise util.Abort("changeset %s cannot obsolete itself" % prec)
876 repo.obsstore.create(tr, nprec, nsucs, flag, metadata)
860 repo.obsstore.create(tr, nprec, nsucs, flag, metadata)
877 repo.filteredrevcache.clear()
861 repo.filteredrevcache.clear()
878 tr.close()
862 tr.close()
879 finally:
863 finally:
880 tr.release()
864 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now