##// END OF EJS Templates
phases: use revision number in new_heads...
marmoute -
r52473:b70628a9 default
parent child Browse files
Show More
@@ -1,619 +1,624 b''
1 # discovery.py - protocol changeset discovery functions
1 # discovery.py - protocol changeset discovery functions
2 #
2 #
3 # Copyright 2010 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2010 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import functools
9 import functools
10
10
11 from .i18n import _
11 from .i18n import _
12 from .node import (
12 from .node import (
13 hex,
13 hex,
14 short,
14 short,
15 )
15 )
16
16
17 from . import (
17 from . import (
18 bookmarks,
18 bookmarks,
19 branchmap,
19 branchmap,
20 error,
20 error,
21 obsolete,
21 obsolete,
22 phases,
22 phases,
23 pycompat,
23 pycompat,
24 scmutil,
24 scmutil,
25 setdiscovery,
25 setdiscovery,
26 treediscovery,
26 treediscovery,
27 util,
27 util,
28 )
28 )
29
29
30
30
31 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
31 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
32 """Return a tuple (common, anyincoming, heads) used to identify the common
32 """Return a tuple (common, anyincoming, heads) used to identify the common
33 subset of nodes between repo and remote.
33 subset of nodes between repo and remote.
34
34
35 "common" is a list of (at least) the heads of the common subset.
35 "common" is a list of (at least) the heads of the common subset.
36 "anyincoming" is testable as a boolean indicating if any nodes are missing
36 "anyincoming" is testable as a boolean indicating if any nodes are missing
37 locally. If remote does not support getbundle, this actually is a list of
37 locally. If remote does not support getbundle, this actually is a list of
38 roots of the nodes that would be incoming, to be supplied to
38 roots of the nodes that would be incoming, to be supplied to
39 changegroupsubset. No code except for pull should be relying on this fact
39 changegroupsubset. No code except for pull should be relying on this fact
40 any longer.
40 any longer.
41 "heads" is either the supplied heads, or else the remote's heads.
41 "heads" is either the supplied heads, or else the remote's heads.
42 "ancestorsof" if not None, restrict the discovery to a subset defined by
42 "ancestorsof" if not None, restrict the discovery to a subset defined by
43 these nodes. Changeset outside of this set won't be considered (but may
43 these nodes. Changeset outside of this set won't be considered (but may
44 still appear in "common").
44 still appear in "common").
45
45
46 If you pass heads and they are all known locally, the response lists just
46 If you pass heads and they are all known locally, the response lists just
47 these heads in "common" and in "heads".
47 these heads in "common" and in "heads".
48
48
49 Please use findcommonoutgoing to compute the set of outgoing nodes to give
49 Please use findcommonoutgoing to compute the set of outgoing nodes to give
50 extensions a good hook into outgoing.
50 extensions a good hook into outgoing.
51 """
51 """
52
52
53 if not remote.capable(b'getbundle'):
53 if not remote.capable(b'getbundle'):
54 return treediscovery.findcommonincoming(repo, remote, heads, force)
54 return treediscovery.findcommonincoming(repo, remote, heads, force)
55
55
56 if heads:
56 if heads:
57 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
57 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
58 if all(knownnode(h) for h in heads):
58 if all(knownnode(h) for h in heads):
59 return (heads, False, heads)
59 return (heads, False, heads)
60
60
61 res = setdiscovery.findcommonheads(
61 res = setdiscovery.findcommonheads(
62 repo.ui,
62 repo.ui,
63 repo,
63 repo,
64 remote,
64 remote,
65 abortwhenunrelated=not force,
65 abortwhenunrelated=not force,
66 ancestorsof=ancestorsof,
66 ancestorsof=ancestorsof,
67 )
67 )
68 common, anyinc, srvheads = res
68 common, anyinc, srvheads = res
69 if heads and not anyinc:
69 if heads and not anyinc:
70 # server could be lying on the advertised heads
70 # server could be lying on the advertised heads
71 has_node = repo.changelog.hasnode
71 has_node = repo.changelog.hasnode
72 anyinc = any(not has_node(n) for n in heads)
72 anyinc = any(not has_node(n) for n in heads)
73 return (list(common), anyinc, heads or list(srvheads))
73 return (list(common), anyinc, heads or list(srvheads))
74
74
75
75
76 class outgoing:
76 class outgoing:
77 """Represents the result of a findcommonoutgoing() call.
77 """Represents the result of a findcommonoutgoing() call.
78
78
79 Members:
79 Members:
80
80
81 ancestorsof is a list of the nodes whose ancestors are included in the
81 ancestorsof is a list of the nodes whose ancestors are included in the
82 outgoing operation.
82 outgoing operation.
83
83
84 missing is a list of those ancestors of ancestorsof that are present in
84 missing is a list of those ancestors of ancestorsof that are present in
85 local but not in remote.
85 local but not in remote.
86
86
87 common is a set containing revs common between the local and the remote
87 common is a set containing revs common between the local and the remote
88 repository (at least all of those that are ancestors of ancestorsof).
88 repository (at least all of those that are ancestors of ancestorsof).
89
89
90 commonheads is the list of heads of common.
90 commonheads is the list of heads of common.
91
91
92 excluded is the list of missing changeset that shouldn't be sent
92 excluded is the list of missing changeset that shouldn't be sent
93 remotely.
93 remotely.
94
94
95 Some members are computed on demand from the heads, unless provided upfront
95 Some members are computed on demand from the heads, unless provided upfront
96 by discovery."""
96 by discovery."""
97
97
98 def __init__(
98 def __init__(
99 self, repo, commonheads=None, ancestorsof=None, missingroots=None
99 self, repo, commonheads=None, ancestorsof=None, missingroots=None
100 ):
100 ):
101 # at least one of them must not be set
101 # at least one of them must not be set
102 assert None in (commonheads, missingroots)
102 assert None in (commonheads, missingroots)
103 cl = repo.changelog
103 cl = repo.changelog
104 if ancestorsof is None:
104 if ancestorsof is None:
105 ancestorsof = cl.heads()
105 ancestorsof = cl.heads()
106 if missingroots:
106 if missingroots:
107 # TODO remove call to nodesbetween.
107 # TODO remove call to nodesbetween.
108 # TODO populate attributes on outgoing instance instead of setting
108 # TODO populate attributes on outgoing instance instead of setting
109 # discbases.
109 # discbases.
110 csets, roots, heads = cl.nodesbetween(missingroots, ancestorsof)
110 csets, roots, heads = cl.nodesbetween(missingroots, ancestorsof)
111 included = set(csets)
111 included = set(csets)
112 discbases = []
112 discbases = []
113 for n in csets:
113 for n in csets:
114 discbases.extend([p for p in cl.parents(n) if p != repo.nullid])
114 discbases.extend([p for p in cl.parents(n) if p != repo.nullid])
115 ancestorsof = heads
115 ancestorsof = heads
116 commonheads = [n for n in discbases if n not in included]
116 commonheads = [n for n in discbases if n not in included]
117 elif not commonheads:
117 elif not commonheads:
118 commonheads = [repo.nullid]
118 commonheads = [repo.nullid]
119 self.commonheads = commonheads
119 self.commonheads = commonheads
120 self.ancestorsof = ancestorsof
120 self.ancestorsof = ancestorsof
121 self._revlog = cl
121 self._revlog = cl
122 self._common = None
122 self._common = None
123 self._missing = None
123 self._missing = None
124 self.excluded = []
124 self.excluded = []
125
125
126 def _computecommonmissing(self):
126 def _computecommonmissing(self):
127 sets = self._revlog.findcommonmissing(
127 sets = self._revlog.findcommonmissing(
128 self.commonheads, self.ancestorsof
128 self.commonheads, self.ancestorsof
129 )
129 )
130 self._common, self._missing = sets
130 self._common, self._missing = sets
131
131
132 @util.propertycache
132 @util.propertycache
133 def common(self):
133 def common(self):
134 if self._common is None:
134 if self._common is None:
135 self._computecommonmissing()
135 self._computecommonmissing()
136 return self._common
136 return self._common
137
137
138 @util.propertycache
138 @util.propertycache
139 def missing(self):
139 def missing(self):
140 if self._missing is None:
140 if self._missing is None:
141 self._computecommonmissing()
141 self._computecommonmissing()
142 return self._missing
142 return self._missing
143
143
144
144
145 def findcommonoutgoing(
145 def findcommonoutgoing(
146 repo, other, onlyheads=None, force=False, commoninc=None, portable=False
146 repo, other, onlyheads=None, force=False, commoninc=None, portable=False
147 ):
147 ):
148 """Return an outgoing instance to identify the nodes present in repo but
148 """Return an outgoing instance to identify the nodes present in repo but
149 not in other.
149 not in other.
150
150
151 If onlyheads is given, only nodes ancestral to nodes in onlyheads
151 If onlyheads is given, only nodes ancestral to nodes in onlyheads
152 (inclusive) are included. If you already know the local repo's heads,
152 (inclusive) are included. If you already know the local repo's heads,
153 passing them in onlyheads is faster than letting them be recomputed here.
153 passing them in onlyheads is faster than letting them be recomputed here.
154
154
155 If commoninc is given, it must be the result of a prior call to
155 If commoninc is given, it must be the result of a prior call to
156 findcommonincoming(repo, other, force) to avoid recomputing it here.
156 findcommonincoming(repo, other, force) to avoid recomputing it here.
157
157
158 If portable is given, compute more conservative common and ancestorsof,
158 If portable is given, compute more conservative common and ancestorsof,
159 to make bundles created from the instance more portable."""
159 to make bundles created from the instance more portable."""
160 # declare an empty outgoing object to be filled later
160 # declare an empty outgoing object to be filled later
161 og = outgoing(repo, None, None)
161 og = outgoing(repo, None, None)
162
162
163 # get common set if not provided
163 # get common set if not provided
164 if commoninc is None:
164 if commoninc is None:
165 commoninc = findcommonincoming(
165 commoninc = findcommonincoming(
166 repo, other, force=force, ancestorsof=onlyheads
166 repo, other, force=force, ancestorsof=onlyheads
167 )
167 )
168 og.commonheads, _any, _hds = commoninc
168 og.commonheads, _any, _hds = commoninc
169
169
170 # compute outgoing
170 # compute outgoing
171 mayexclude = phases.hassecret(repo) or repo.obsstore
171 mayexclude = phases.hassecret(repo) or repo.obsstore
172 if not mayexclude:
172 if not mayexclude:
173 og.ancestorsof = onlyheads or repo.heads()
173 og.ancestorsof = onlyheads or repo.heads()
174 elif onlyheads is None:
174 elif onlyheads is None:
175 # use visible heads as it should be cached
175 # use visible heads as it should be cached
176 og.ancestorsof = repo.filtered(b"served").heads()
176 og.ancestorsof = repo.filtered(b"served").heads()
177 og.excluded = [ctx.node() for ctx in repo.set(b'secret() or extinct()')]
177 og.excluded = [ctx.node() for ctx in repo.set(b'secret() or extinct()')]
178 else:
178 else:
179 # compute common, missing and exclude secret stuff
179 # compute common, missing and exclude secret stuff
180 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
180 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
181 og._common, allmissing = sets
181 og._common, allmissing = sets
182 og._missing = missing = []
182 og._missing = missing = []
183 og.excluded = excluded = []
183 og.excluded = excluded = []
184 for node in allmissing:
184 for node in allmissing:
185 ctx = repo[node]
185 ctx = repo[node]
186 if ctx.phase() >= phases.secret or ctx.extinct():
186 if ctx.phase() >= phases.secret or ctx.extinct():
187 excluded.append(node)
187 excluded.append(node)
188 else:
188 else:
189 missing.append(node)
189 missing.append(node)
190 if len(missing) == len(allmissing):
190 if len(missing) == len(allmissing):
191 ancestorsof = onlyheads
191 ancestorsof = onlyheads
192 else: # update missing heads
192 else: # update missing heads
193 ancestorsof = phases.newheads(repo, onlyheads, excluded)
193 to_rev = repo.changelog.index.rev
194 to_node = repo.changelog.node
195 excluded_revs = [to_rev(r) for r in excluded]
196 onlyheads_revs = [to_rev(r) for r in onlyheads]
197 new_heads = phases.new_heads(repo, onlyheads_revs, excluded_revs)
198 ancestorsof = [to_node(r) for r in new_heads]
194 og.ancestorsof = ancestorsof
199 og.ancestorsof = ancestorsof
195 if portable:
200 if portable:
196 # recompute common and ancestorsof as if -r<rev> had been given for
201 # recompute common and ancestorsof as if -r<rev> had been given for
197 # each head of missing, and --base <rev> for each head of the proper
202 # each head of missing, and --base <rev> for each head of the proper
198 # ancestors of missing
203 # ancestors of missing
199 og._computecommonmissing()
204 og._computecommonmissing()
200 cl = repo.changelog
205 cl = repo.changelog
201 missingrevs = {cl.rev(n) for n in og._missing}
206 missingrevs = {cl.rev(n) for n in og._missing}
202 og._common = set(cl.ancestors(missingrevs)) - missingrevs
207 og._common = set(cl.ancestors(missingrevs)) - missingrevs
203 commonheads = set(og.commonheads)
208 commonheads = set(og.commonheads)
204 og.ancestorsof = [h for h in og.ancestorsof if h not in commonheads]
209 og.ancestorsof = [h for h in og.ancestorsof if h not in commonheads]
205
210
206 return og
211 return og
207
212
208
213
209 def _headssummary(pushop):
214 def _headssummary(pushop):
210 """compute a summary of branch and heads status before and after push
215 """compute a summary of branch and heads status before and after push
211
216
212 return {'branch': ([remoteheads], [newheads],
217 return {'branch': ([remoteheads], [newheads],
213 [unsyncedheads], [discardedheads])} mapping
218 [unsyncedheads], [discardedheads])} mapping
214
219
215 - branch: the branch name,
220 - branch: the branch name,
216 - remoteheads: the list of remote heads known locally
221 - remoteheads: the list of remote heads known locally
217 None if the branch is new,
222 None if the branch is new,
218 - newheads: the new remote heads (known locally) with outgoing pushed,
223 - newheads: the new remote heads (known locally) with outgoing pushed,
219 - unsyncedheads: the list of remote heads unknown locally,
224 - unsyncedheads: the list of remote heads unknown locally,
220 - discardedheads: the list of heads made obsolete by the push.
225 - discardedheads: the list of heads made obsolete by the push.
221 """
226 """
222 repo = pushop.repo.unfiltered()
227 repo = pushop.repo.unfiltered()
223 remote = pushop.remote
228 remote = pushop.remote
224 outgoing = pushop.outgoing
229 outgoing = pushop.outgoing
225 cl = repo.changelog
230 cl = repo.changelog
226 headssum = {}
231 headssum = {}
227 missingctx = set()
232 missingctx = set()
228 # A. Create set of branches involved in the push.
233 # A. Create set of branches involved in the push.
229 branches = set()
234 branches = set()
230 for n in outgoing.missing:
235 for n in outgoing.missing:
231 ctx = repo[n]
236 ctx = repo[n]
232 missingctx.add(ctx)
237 missingctx.add(ctx)
233 branches.add(ctx.branch())
238 branches.add(ctx.branch())
234
239
235 with remote.commandexecutor() as e:
240 with remote.commandexecutor() as e:
236 remotemap = e.callcommand(b'branchmap', {}).result()
241 remotemap = e.callcommand(b'branchmap', {}).result()
237
242
238 knownnode = cl.hasnode # do not use nodemap until it is filtered
243 knownnode = cl.hasnode # do not use nodemap until it is filtered
239 # A. register remote heads of branches which are in outgoing set
244 # A. register remote heads of branches which are in outgoing set
240 for branch, heads in remotemap.items():
245 for branch, heads in remotemap.items():
241 # don't add head info about branches which we don't have locally
246 # don't add head info about branches which we don't have locally
242 if branch not in branches:
247 if branch not in branches:
243 continue
248 continue
244 known = []
249 known = []
245 unsynced = []
250 unsynced = []
246 for h in heads:
251 for h in heads:
247 if knownnode(h):
252 if knownnode(h):
248 known.append(h)
253 known.append(h)
249 else:
254 else:
250 unsynced.append(h)
255 unsynced.append(h)
251 headssum[branch] = (known, list(known), unsynced)
256 headssum[branch] = (known, list(known), unsynced)
252
257
253 # B. add new branch data
258 # B. add new branch data
254 for branch in branches:
259 for branch in branches:
255 if branch not in headssum:
260 if branch not in headssum:
256 headssum[branch] = (None, [], [])
261 headssum[branch] = (None, [], [])
257
262
258 # C. Update newmap with outgoing changes.
263 # C. Update newmap with outgoing changes.
259 # This will possibly add new heads and remove existing ones.
264 # This will possibly add new heads and remove existing ones.
260 newmap = branchmap.remotebranchcache(
265 newmap = branchmap.remotebranchcache(
261 repo,
266 repo,
262 (
267 (
263 (branch, heads[1])
268 (branch, heads[1])
264 for branch, heads in headssum.items()
269 for branch, heads in headssum.items()
265 if heads[0] is not None
270 if heads[0] is not None
266 ),
271 ),
267 )
272 )
268 newmap.update(repo, (ctx.rev() for ctx in missingctx))
273 newmap.update(repo, (ctx.rev() for ctx in missingctx))
269 for branch, newheads in newmap.items():
274 for branch, newheads in newmap.items():
270 headssum[branch][1][:] = newheads
275 headssum[branch][1][:] = newheads
271 for branch, items in headssum.items():
276 for branch, items in headssum.items():
272 for l in items:
277 for l in items:
273 if l is not None:
278 if l is not None:
274 l.sort()
279 l.sort()
275 headssum[branch] = items + ([],)
280 headssum[branch] = items + ([],)
276
281
277 # If there are no obsstore, no post processing are needed.
282 # If there are no obsstore, no post processing are needed.
278 if repo.obsstore:
283 if repo.obsstore:
279 torev = repo.changelog.rev
284 torev = repo.changelog.rev
280 futureheads = {torev(h) for h in outgoing.ancestorsof}
285 futureheads = {torev(h) for h in outgoing.ancestorsof}
281 futureheads |= {torev(h) for h in outgoing.commonheads}
286 futureheads |= {torev(h) for h in outgoing.commonheads}
282 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
287 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
283 for branch, heads in sorted(pycompat.iteritems(headssum)):
288 for branch, heads in sorted(pycompat.iteritems(headssum)):
284 remoteheads, newheads, unsyncedheads, placeholder = heads
289 remoteheads, newheads, unsyncedheads, placeholder = heads
285 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
290 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
286 headssum[branch] = (
291 headssum[branch] = (
287 remoteheads,
292 remoteheads,
288 sorted(result[0]),
293 sorted(result[0]),
289 unsyncedheads,
294 unsyncedheads,
290 sorted(result[1]),
295 sorted(result[1]),
291 )
296 )
292 return headssum
297 return headssum
293
298
294
299
295 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
300 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
296 """Compute branchmapsummary for repo without branchmap support"""
301 """Compute branchmapsummary for repo without branchmap support"""
297
302
298 # 1-4b. old servers: Check for new topological heads.
303 # 1-4b. old servers: Check for new topological heads.
299 # Construct {old,new}map with branch = None (topological branch).
304 # Construct {old,new}map with branch = None (topological branch).
300 # (code based on update)
305 # (code based on update)
301 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
306 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
302 oldheads = sorted(h for h in remoteheads if knownnode(h))
307 oldheads = sorted(h for h in remoteheads if knownnode(h))
303 # all nodes in outgoing.missing are children of either:
308 # all nodes in outgoing.missing are children of either:
304 # - an element of oldheads
309 # - an element of oldheads
305 # - another element of outgoing.missing
310 # - another element of outgoing.missing
306 # - nullrev
311 # - nullrev
307 # This explains why the new head are very simple to compute.
312 # This explains why the new head are very simple to compute.
308 r = repo.set(b'heads(%ln + %ln)', oldheads, outgoing.missing)
313 r = repo.set(b'heads(%ln + %ln)', oldheads, outgoing.missing)
309 newheads = sorted(c.node() for c in r)
314 newheads = sorted(c.node() for c in r)
310 # set some unsynced head to issue the "unsynced changes" warning
315 # set some unsynced head to issue the "unsynced changes" warning
311 if inc:
316 if inc:
312 unsynced = [None]
317 unsynced = [None]
313 else:
318 else:
314 unsynced = []
319 unsynced = []
315 return {None: (oldheads, newheads, unsynced, [])}
320 return {None: (oldheads, newheads, unsynced, [])}
316
321
317
322
318 def _nowarnheads(pushop):
323 def _nowarnheads(pushop):
319 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
324 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
320 repo = pushop.repo.unfiltered()
325 repo = pushop.repo.unfiltered()
321 remote = pushop.remote
326 remote = pushop.remote
322 localbookmarks = repo._bookmarks
327 localbookmarks = repo._bookmarks
323
328
324 with remote.commandexecutor() as e:
329 with remote.commandexecutor() as e:
325 remotebookmarks = e.callcommand(
330 remotebookmarks = e.callcommand(
326 b'listkeys',
331 b'listkeys',
327 {
332 {
328 b'namespace': b'bookmarks',
333 b'namespace': b'bookmarks',
329 },
334 },
330 ).result()
335 ).result()
331
336
332 bookmarkedheads = set()
337 bookmarkedheads = set()
333
338
334 # internal config: bookmarks.pushing
339 # internal config: bookmarks.pushing
335 newbookmarks = [
340 newbookmarks = [
336 localbookmarks.expandname(b)
341 localbookmarks.expandname(b)
337 for b in pushop.ui.configlist(b'bookmarks', b'pushing')
342 for b in pushop.ui.configlist(b'bookmarks', b'pushing')
338 ]
343 ]
339
344
340 for bm in localbookmarks:
345 for bm in localbookmarks:
341 rnode = remotebookmarks.get(bm)
346 rnode = remotebookmarks.get(bm)
342 if rnode and rnode in repo:
347 if rnode and rnode in repo:
343 lctx, rctx = repo[localbookmarks[bm]], repo[rnode]
348 lctx, rctx = repo[localbookmarks[bm]], repo[rnode]
344 if bookmarks.validdest(repo, rctx, lctx):
349 if bookmarks.validdest(repo, rctx, lctx):
345 bookmarkedheads.add(lctx.node())
350 bookmarkedheads.add(lctx.node())
346 else:
351 else:
347 if bm in newbookmarks and bm not in remotebookmarks:
352 if bm in newbookmarks and bm not in remotebookmarks:
348 bookmarkedheads.add(localbookmarks[bm])
353 bookmarkedheads.add(localbookmarks[bm])
349
354
350 return bookmarkedheads
355 return bookmarkedheads
351
356
352
357
353 def checkheads(pushop):
358 def checkheads(pushop):
354 """Check that a push won't add any outgoing head
359 """Check that a push won't add any outgoing head
355
360
356 raise StateError error and display ui message as needed.
361 raise StateError error and display ui message as needed.
357 """
362 """
358
363
359 repo = pushop.repo.unfiltered()
364 repo = pushop.repo.unfiltered()
360 remote = pushop.remote
365 remote = pushop.remote
361 outgoing = pushop.outgoing
366 outgoing = pushop.outgoing
362 remoteheads = pushop.remoteheads
367 remoteheads = pushop.remoteheads
363 newbranch = pushop.newbranch
368 newbranch = pushop.newbranch
364 inc = bool(pushop.incoming)
369 inc = bool(pushop.incoming)
365
370
366 # Check for each named branch if we're creating new remote heads.
371 # Check for each named branch if we're creating new remote heads.
367 # To be a remote head after push, node must be either:
372 # To be a remote head after push, node must be either:
368 # - unknown locally
373 # - unknown locally
369 # - a local outgoing head descended from update
374 # - a local outgoing head descended from update
370 # - a remote head that's known locally and not
375 # - a remote head that's known locally and not
371 # ancestral to an outgoing head
376 # ancestral to an outgoing head
372 if remoteheads == [repo.nullid]:
377 if remoteheads == [repo.nullid]:
373 # remote is empty, nothing to check.
378 # remote is empty, nothing to check.
374 return
379 return
375
380
376 if remote.capable(b'branchmap'):
381 if remote.capable(b'branchmap'):
377 headssum = _headssummary(pushop)
382 headssum = _headssummary(pushop)
378 else:
383 else:
379 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
384 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
380 pushop.pushbranchmap = headssum
385 pushop.pushbranchmap = headssum
381 newbranches = [
386 newbranches = [
382 branch for branch, heads in headssum.items() if heads[0] is None
387 branch for branch, heads in headssum.items() if heads[0] is None
383 ]
388 ]
384 # 1. Check for new branches on the remote.
389 # 1. Check for new branches on the remote.
385 if newbranches and not newbranch: # new branch requires --new-branch
390 if newbranches and not newbranch: # new branch requires --new-branch
386 branchnames = b', '.join(sorted(newbranches))
391 branchnames = b', '.join(sorted(newbranches))
387 # Calculate how many of the new branches are closed branches
392 # Calculate how many of the new branches are closed branches
388 closedbranches = set()
393 closedbranches = set()
389 for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
394 for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
390 if isclosed:
395 if isclosed:
391 closedbranches.add(tag)
396 closedbranches.add(tag)
392 closedbranches = closedbranches & set(newbranches)
397 closedbranches = closedbranches & set(newbranches)
393 if closedbranches:
398 if closedbranches:
394 errmsg = _(b"push creates new remote branches: %s (%d closed)") % (
399 errmsg = _(b"push creates new remote branches: %s (%d closed)") % (
395 branchnames,
400 branchnames,
396 len(closedbranches),
401 len(closedbranches),
397 )
402 )
398 else:
403 else:
399 errmsg = _(b"push creates new remote branches: %s") % branchnames
404 errmsg = _(b"push creates new remote branches: %s") % branchnames
400 hint = _(b"use 'hg push --new-branch' to create new remote branches")
405 hint = _(b"use 'hg push --new-branch' to create new remote branches")
401 raise error.StateError(errmsg, hint=hint)
406 raise error.StateError(errmsg, hint=hint)
402
407
403 # 2. Find heads that we need not warn about
408 # 2. Find heads that we need not warn about
404 nowarnheads = _nowarnheads(pushop)
409 nowarnheads = _nowarnheads(pushop)
405
410
406 # 3. Check for new heads.
411 # 3. Check for new heads.
407 # If there are more heads after the push than before, a suitable
412 # If there are more heads after the push than before, a suitable
408 # error message, depending on unsynced status, is displayed.
413 # error message, depending on unsynced status, is displayed.
409 errormsg = None
414 errormsg = None
410 for branch, heads in sorted(pycompat.iteritems(headssum)):
415 for branch, heads in sorted(pycompat.iteritems(headssum)):
411 remoteheads, newheads, unsyncedheads, discardedheads = heads
416 remoteheads, newheads, unsyncedheads, discardedheads = heads
412 # add unsynced data
417 # add unsynced data
413 if remoteheads is None:
418 if remoteheads is None:
414 oldhs = set()
419 oldhs = set()
415 else:
420 else:
416 oldhs = set(remoteheads)
421 oldhs = set(remoteheads)
417 oldhs.update(unsyncedheads)
422 oldhs.update(unsyncedheads)
418 dhs = None # delta heads, the new heads on branch
423 dhs = None # delta heads, the new heads on branch
419 newhs = set(newheads)
424 newhs = set(newheads)
420 newhs.update(unsyncedheads)
425 newhs.update(unsyncedheads)
421 if unsyncedheads:
426 if unsyncedheads:
422 if None in unsyncedheads:
427 if None in unsyncedheads:
423 # old remote, no heads data
428 # old remote, no heads data
424 heads = None
429 heads = None
425 else:
430 else:
426 heads = scmutil.nodesummaries(repo, unsyncedheads)
431 heads = scmutil.nodesummaries(repo, unsyncedheads)
427 if heads is None:
432 if heads is None:
428 repo.ui.status(
433 repo.ui.status(
429 _(b"remote has heads that are not known locally\n")
434 _(b"remote has heads that are not known locally\n")
430 )
435 )
431 elif branch is None:
436 elif branch is None:
432 repo.ui.status(
437 repo.ui.status(
433 _(b"remote has heads that are not known locally: %s\n")
438 _(b"remote has heads that are not known locally: %s\n")
434 % heads
439 % heads
435 )
440 )
436 else:
441 else:
437 repo.ui.status(
442 repo.ui.status(
438 _(
443 _(
439 b"remote has heads on branch '%s' that are "
444 b"remote has heads on branch '%s' that are "
440 b"not known locally: %s\n"
445 b"not known locally: %s\n"
441 )
446 )
442 % (branch, heads)
447 % (branch, heads)
443 )
448 )
444 if remoteheads is None:
449 if remoteheads is None:
445 if len(newhs) > 1:
450 if len(newhs) > 1:
446 dhs = list(newhs)
451 dhs = list(newhs)
447 if errormsg is None:
452 if errormsg is None:
448 errormsg = (
453 errormsg = (
449 _(b"push creates new branch '%s' with multiple heads")
454 _(b"push creates new branch '%s' with multiple heads")
450 % branch
455 % branch
451 )
456 )
452 hint = _(
457 hint = _(
453 b"merge or"
458 b"merge or"
454 b" see 'hg help push' for details about"
459 b" see 'hg help push' for details about"
455 b" pushing new heads"
460 b" pushing new heads"
456 )
461 )
457 elif len(newhs) > len(oldhs):
462 elif len(newhs) > len(oldhs):
458 # remove bookmarked or existing remote heads from the new heads list
463 # remove bookmarked or existing remote heads from the new heads list
459 dhs = sorted(newhs - nowarnheads - oldhs)
464 dhs = sorted(newhs - nowarnheads - oldhs)
460 if dhs:
465 if dhs:
461 if errormsg is None:
466 if errormsg is None:
462 if branch not in (b'default', None):
467 if branch not in (b'default', None):
463 errormsg = _(
468 errormsg = _(
464 b"push creates new remote head %s on branch '%s'"
469 b"push creates new remote head %s on branch '%s'"
465 ) % (
470 ) % (
466 short(dhs[0]),
471 short(dhs[0]),
467 branch,
472 branch,
468 )
473 )
469 elif repo[dhs[0]].bookmarks():
474 elif repo[dhs[0]].bookmarks():
470 errormsg = _(
475 errormsg = _(
471 b"push creates new remote head %s "
476 b"push creates new remote head %s "
472 b"with bookmark '%s'"
477 b"with bookmark '%s'"
473 ) % (short(dhs[0]), repo[dhs[0]].bookmarks()[0])
478 ) % (short(dhs[0]), repo[dhs[0]].bookmarks()[0])
474 else:
479 else:
475 errormsg = _(b"push creates new remote head %s") % short(
480 errormsg = _(b"push creates new remote head %s") % short(
476 dhs[0]
481 dhs[0]
477 )
482 )
478 if unsyncedheads:
483 if unsyncedheads:
479 hint = _(
484 hint = _(
480 b"pull and merge or"
485 b"pull and merge or"
481 b" see 'hg help push' for details about"
486 b" see 'hg help push' for details about"
482 b" pushing new heads"
487 b" pushing new heads"
483 )
488 )
484 else:
489 else:
485 hint = _(
490 hint = _(
486 b"merge or"
491 b"merge or"
487 b" see 'hg help push' for details about"
492 b" see 'hg help push' for details about"
488 b" pushing new heads"
493 b" pushing new heads"
489 )
494 )
490 if branch is None:
495 if branch is None:
491 repo.ui.note(_(b"new remote heads:\n"))
496 repo.ui.note(_(b"new remote heads:\n"))
492 else:
497 else:
493 repo.ui.note(_(b"new remote heads on branch '%s':\n") % branch)
498 repo.ui.note(_(b"new remote heads on branch '%s':\n") % branch)
494 for h in dhs:
499 for h in dhs:
495 repo.ui.note(b" %s\n" % short(h))
500 repo.ui.note(b" %s\n" % short(h))
496 if errormsg:
501 if errormsg:
497 raise error.StateError(errormsg, hint=hint)
502 raise error.StateError(errormsg, hint=hint)
498
503
499
504
500 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
505 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
501 """post process the list of new heads with obsolescence information
506 """post process the list of new heads with obsolescence information
502
507
503 Exists as a sub-function to contain the complexity and allow extensions to
508 Exists as a sub-function to contain the complexity and allow extensions to
504 experiment with smarter logic.
509 experiment with smarter logic.
505
510
506 Returns (newheads, discarded_heads) tuple
511 Returns (newheads, discarded_heads) tuple
507 """
512 """
508 # known issue
513 # known issue
509 #
514 #
510 # * We "silently" skip processing on all changeset unknown locally
515 # * We "silently" skip processing on all changeset unknown locally
511 #
516 #
512 # * if <nh> is public on the remote, it won't be affected by obsolete
517 # * if <nh> is public on the remote, it won't be affected by obsolete
513 # marker and a new is created
518 # marker and a new is created
514
519
515 # define various utilities and containers
520 # define various utilities and containers
516 repo = pushop.repo
521 repo = pushop.repo
517 unfi = repo.unfiltered()
522 unfi = repo.unfiltered()
518 torev = unfi.changelog.index.get_rev
523 torev = unfi.changelog.index.get_rev
519 public = phases.public
524 public = phases.public
520 getphase = unfi._phasecache.phase
525 getphase = unfi._phasecache.phase
521 ispublic = lambda r: getphase(unfi, r) == public
526 ispublic = lambda r: getphase(unfi, r) == public
522 ispushed = lambda n: torev(n) in futurecommon
527 ispushed = lambda n: torev(n) in futurecommon
523 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
528 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
524 successorsmarkers = unfi.obsstore.successors
529 successorsmarkers = unfi.obsstore.successors
525 newhs = set() # final set of new heads
530 newhs = set() # final set of new heads
526 discarded = set() # new head of fully replaced branch
531 discarded = set() # new head of fully replaced branch
527
532
528 localcandidate = set() # candidate heads known locally
533 localcandidate = set() # candidate heads known locally
529 unknownheads = set() # candidate heads unknown locally
534 unknownheads = set() # candidate heads unknown locally
530 for h in candidate_newhs:
535 for h in candidate_newhs:
531 if h in unfi:
536 if h in unfi:
532 localcandidate.add(h)
537 localcandidate.add(h)
533 else:
538 else:
534 if successorsmarkers.get(h) is not None:
539 if successorsmarkers.get(h) is not None:
535 msg = (
540 msg = (
536 b'checkheads: remote head unknown locally has'
541 b'checkheads: remote head unknown locally has'
537 b' local marker: %s\n'
542 b' local marker: %s\n'
538 )
543 )
539 repo.ui.debug(msg % hex(h))
544 repo.ui.debug(msg % hex(h))
540 unknownheads.add(h)
545 unknownheads.add(h)
541
546
542 # fast path the simple case
547 # fast path the simple case
543 if len(localcandidate) == 1:
548 if len(localcandidate) == 1:
544 return unknownheads | set(candidate_newhs), set()
549 return unknownheads | set(candidate_newhs), set()
545
550
546 obsrevs = obsolete.getrevs(unfi, b'obsolete')
551 obsrevs = obsolete.getrevs(unfi, b'obsolete')
547 futurenonobsolete = frozenset(futurecommon) - obsrevs
552 futurenonobsolete = frozenset(futurecommon) - obsrevs
548
553
549 # actually process branch replacement
554 # actually process branch replacement
550 while localcandidate:
555 while localcandidate:
551 nh = localcandidate.pop()
556 nh = localcandidate.pop()
552 r = torev(nh)
557 r = torev(nh)
553 current_branch = unfi[nh].branch()
558 current_branch = unfi[nh].branch()
554 # run this check early to skip the evaluation of the whole branch
559 # run this check early to skip the evaluation of the whole branch
555 if ispublic(r) or r not in obsrevs:
560 if ispublic(r) or r not in obsrevs:
556 newhs.add(nh)
561 newhs.add(nh)
557 continue
562 continue
558
563
559 # Get all revs/nodes on the branch exclusive to this head
564 # Get all revs/nodes on the branch exclusive to this head
560 # (already filtered heads are "ignored"))
565 # (already filtered heads are "ignored"))
561 branchrevs = unfi.revs(
566 branchrevs = unfi.revs(
562 b'only(%n, (%ln+%ln))', nh, localcandidate, newhs
567 b'only(%n, (%ln+%ln))', nh, localcandidate, newhs
563 )
568 )
564
569
565 branchnodes = []
570 branchnodes = []
566 for r in branchrevs:
571 for r in branchrevs:
567 c = unfi[r]
572 c = unfi[r]
568 if c.branch() == current_branch:
573 if c.branch() == current_branch:
569 branchnodes.append(c.node())
574 branchnodes.append(c.node())
570
575
571 # The branch won't be hidden on the remote if
576 # The branch won't be hidden on the remote if
572 # * any part of it is public,
577 # * any part of it is public,
573 # * any part of it is considered part of the result by previous logic,
578 # * any part of it is considered part of the result by previous logic,
574 # * if we have no markers to push to obsolete it.
579 # * if we have no markers to push to obsolete it.
575 if (
580 if (
576 any(ispublic(r) for r in branchrevs)
581 any(ispublic(r) for r in branchrevs)
577 or any(torev(n) in futurenonobsolete for n in branchnodes)
582 or any(torev(n) in futurenonobsolete for n in branchnodes)
578 or any(not hasoutmarker(n) for n in branchnodes)
583 or any(not hasoutmarker(n) for n in branchnodes)
579 ):
584 ):
580 newhs.add(nh)
585 newhs.add(nh)
581 else:
586 else:
582 # note: there is a corner case if there is a merge in the branch.
587 # note: there is a corner case if there is a merge in the branch.
583 # we might end up with -more- heads. However, these heads are not
588 # we might end up with -more- heads. However, these heads are not
584 # "added" by the push, but more by the "removal" on the remote so I
589 # "added" by the push, but more by the "removal" on the remote so I
585 # think is a okay to ignore them,
590 # think is a okay to ignore them,
586 discarded.add(nh)
591 discarded.add(nh)
587 newhs |= unknownheads
592 newhs |= unknownheads
588 return newhs, discarded
593 return newhs, discarded
589
594
590
595
591 def pushingmarkerfor(obsstore, ispushed, node):
596 def pushingmarkerfor(obsstore, ispushed, node):
592 """true if some markers are to be pushed for node
597 """true if some markers are to be pushed for node
593
598
594 We cannot just look in to the pushed obsmarkers from the pushop because
599 We cannot just look in to the pushed obsmarkers from the pushop because
595 discovery might have filtered relevant markers. In addition listing all
600 discovery might have filtered relevant markers. In addition listing all
596 markers relevant to all changesets in the pushed set would be too expensive
601 markers relevant to all changesets in the pushed set would be too expensive
597 (O(len(repo)))
602 (O(len(repo)))
598
603
599 (note: There are cache opportunity in this function. but it would requires
604 (note: There are cache opportunity in this function. but it would requires
600 a two dimensional stack.)
605 a two dimensional stack.)
601 """
606 """
602 successorsmarkers = obsstore.successors
607 successorsmarkers = obsstore.successors
603 stack = [node]
608 stack = [node]
604 seen = set(stack)
609 seen = set(stack)
605 while stack:
610 while stack:
606 current = stack.pop()
611 current = stack.pop()
607 if ispushed(current):
612 if ispushed(current):
608 return True
613 return True
609 markers = successorsmarkers.get(current, ())
614 markers = successorsmarkers.get(current, ())
610 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
615 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
611 for m in markers:
616 for m in markers:
612 nexts = m[1] # successors
617 nexts = m[1] # successors
613 if not nexts: # this is a prune marker
618 if not nexts: # this is a prune marker
614 nexts = m[5] or () # parents
619 nexts = m[5] or () # parents
615 for n in nexts:
620 for n in nexts:
616 if n not in seen:
621 if n not in seen:
617 seen.add(n)
622 seen.add(n)
618 stack.append(n)
623 stack.append(n)
619 return False
624 return False
@@ -1,1223 +1,1230 b''
1 """ Mercurial phases support code
1 """ Mercurial phases support code
2
2
3 ---
3 ---
4
4
5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
6 Logilab SA <contact@logilab.fr>
6 Logilab SA <contact@logilab.fr>
7 Augie Fackler <durin42@gmail.com>
7 Augie Fackler <durin42@gmail.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License version 2 or any later version.
10 of the GNU General Public License version 2 or any later version.
11
11
12 ---
12 ---
13
13
14 This module implements most phase logic in mercurial.
14 This module implements most phase logic in mercurial.
15
15
16
16
17 Basic Concept
17 Basic Concept
18 =============
18 =============
19
19
20 A 'changeset phase' is an indicator that tells us how a changeset is
20 A 'changeset phase' is an indicator that tells us how a changeset is
21 manipulated and communicated. The details of each phase is described
21 manipulated and communicated. The details of each phase is described
22 below, here we describe the properties they have in common.
22 below, here we describe the properties they have in common.
23
23
24 Like bookmarks, phases are not stored in history and thus are not
24 Like bookmarks, phases are not stored in history and thus are not
25 permanent and leave no audit trail.
25 permanent and leave no audit trail.
26
26
27 First, no changeset can be in two phases at once. Phases are ordered,
27 First, no changeset can be in two phases at once. Phases are ordered,
28 so they can be considered from lowest to highest. The default, lowest
28 so they can be considered from lowest to highest. The default, lowest
29 phase is 'public' - this is the normal phase of existing changesets. A
29 phase is 'public' - this is the normal phase of existing changesets. A
30 child changeset can not be in a lower phase than its parents.
30 child changeset can not be in a lower phase than its parents.
31
31
32 These phases share a hierarchy of traits:
32 These phases share a hierarchy of traits:
33
33
34 immutable shared
34 immutable shared
35 public: X X
35 public: X X
36 draft: X
36 draft: X
37 secret:
37 secret:
38
38
39 Local commits are draft by default.
39 Local commits are draft by default.
40
40
41 Phase Movement and Exchange
41 Phase Movement and Exchange
42 ===========================
42 ===========================
43
43
44 Phase data is exchanged by pushkey on pull and push. Some servers have
44 Phase data is exchanged by pushkey on pull and push. Some servers have
45 a publish option set, we call such a server a "publishing server".
45 a publish option set, we call such a server a "publishing server".
46 Pushing a draft changeset to a publishing server changes the phase to
46 Pushing a draft changeset to a publishing server changes the phase to
47 public.
47 public.
48
48
49 A small list of fact/rules define the exchange of phase:
49 A small list of fact/rules define the exchange of phase:
50
50
51 * old client never changes server states
51 * old client never changes server states
52 * pull never changes server states
52 * pull never changes server states
53 * publish and old server changesets are seen as public by client
53 * publish and old server changesets are seen as public by client
54 * any secret changeset seen in another repository is lowered to at
54 * any secret changeset seen in another repository is lowered to at
55 least draft
55 least draft
56
56
57 Here is the final table summing up the 49 possible use cases of phase
57 Here is the final table summing up the 49 possible use cases of phase
58 exchange:
58 exchange:
59
59
60 server
60 server
61 old publish non-publish
61 old publish non-publish
62 N X N D P N D P
62 N X N D P N D P
63 old client
63 old client
64 pull
64 pull
65 N - X/X - X/D X/P - X/D X/P
65 N - X/X - X/D X/P - X/D X/P
66 X - X/X - X/D X/P - X/D X/P
66 X - X/X - X/D X/P - X/D X/P
67 push
67 push
68 X X/X X/X X/P X/P X/P X/D X/D X/P
68 X X/X X/X X/P X/P X/P X/D X/D X/P
69 new client
69 new client
70 pull
70 pull
71 N - P/X - P/D P/P - D/D P/P
71 N - P/X - P/D P/P - D/D P/P
72 D - P/X - P/D P/P - D/D P/P
72 D - P/X - P/D P/P - D/D P/P
73 P - P/X - P/D P/P - P/D P/P
73 P - P/X - P/D P/P - P/D P/P
74 push
74 push
75 D P/X P/X P/P P/P P/P D/D D/D P/P
75 D P/X P/X P/P P/P P/P D/D D/D P/P
76 P P/X P/X P/P P/P P/P P/P P/P P/P
76 P P/X P/X P/P P/P P/P P/P P/P P/P
77
77
78 Legend:
78 Legend:
79
79
80 A/B = final state on client / state on server
80 A/B = final state on client / state on server
81
81
82 * N = new/not present,
82 * N = new/not present,
83 * P = public,
83 * P = public,
84 * D = draft,
84 * D = draft,
85 * X = not tracked (i.e., the old client or server has no internal
85 * X = not tracked (i.e., the old client or server has no internal
86 way of recording the phase.)
86 way of recording the phase.)
87
87
88 passive = only pushes
88 passive = only pushes
89
89
90
90
91 A cell here can be read like this:
91 A cell here can be read like this:
92
92
93 "When a new client pushes a draft changeset (D) to a publishing
93 "When a new client pushes a draft changeset (D) to a publishing
94 server where it's not present (N), it's marked public on both
94 server where it's not present (N), it's marked public on both
95 sides (P/P)."
95 sides (P/P)."
96
96
97 Note: old client behave as a publishing server with draft only content
97 Note: old client behave as a publishing server with draft only content
98 - other people see it as public
98 - other people see it as public
99 - content is pushed as draft
99 - content is pushed as draft
100
100
101 """
101 """
102
102
103
103
104 import heapq
104 import heapq
105 import struct
105 import struct
106 import typing
106 import typing
107 import weakref
107 import weakref
108
108
109 from typing import (
109 from typing import (
110 Any,
110 Any,
111 Callable,
111 Callable,
112 Collection,
112 Dict,
113 Dict,
113 Iterable,
114 Iterable,
114 List,
115 List,
115 Optional,
116 Optional,
116 Set,
117 Set,
117 Tuple,
118 Tuple,
118 )
119 )
119
120
120 from .i18n import _
121 from .i18n import _
121 from .node import (
122 from .node import (
122 bin,
123 bin,
123 hex,
124 hex,
124 nullrev,
125 nullrev,
125 short,
126 short,
126 wdirrev,
127 wdirrev,
127 )
128 )
128 from . import (
129 from . import (
129 error,
130 error,
130 pycompat,
131 requirements,
131 requirements,
132 smartset,
132 smartset,
133 txnutil,
133 txnutil,
134 util,
134 util,
135 )
135 )
136
136
137 Phaseroots = Dict[int, Set[int]]
137 Phaseroots = Dict[int, Set[int]]
138 PhaseSets = Dict[int, Set[int]]
138 PhaseSets = Dict[int, Set[int]]
139
139
140 if typing.TYPE_CHECKING:
140 if typing.TYPE_CHECKING:
141 from . import (
141 from . import (
142 localrepo,
142 localrepo,
143 ui as uimod,
143 ui as uimod,
144 )
144 )
145
145
146 # keeps pyflakes happy
146 # keeps pyflakes happy
147 assert [uimod]
147 assert [uimod]
148
148
149 Phasedefaults = List[
149 Phasedefaults = List[
150 Callable[[localrepo.localrepository, Phaseroots], Phaseroots]
150 Callable[[localrepo.localrepository, Phaseroots], Phaseroots]
151 ]
151 ]
152
152
153
153
154 _fphasesentry = struct.Struct(b'>i20s')
154 _fphasesentry = struct.Struct(b'>i20s')
155
155
156 # record phase index
156 # record phase index
157 public: int = 0
157 public: int = 0
158 draft: int = 1
158 draft: int = 1
159 secret: int = 2
159 secret: int = 2
160 archived = 32 # non-continuous for compatibility
160 archived = 32 # non-continuous for compatibility
161 internal = 96 # non-continuous for compatibility
161 internal = 96 # non-continuous for compatibility
162 allphases = (public, draft, secret, archived, internal)
162 allphases = (public, draft, secret, archived, internal)
163 trackedphases = (draft, secret, archived, internal)
163 trackedphases = (draft, secret, archived, internal)
164 not_public_phases = trackedphases
164 not_public_phases = trackedphases
165 # record phase names
165 # record phase names
166 cmdphasenames = [b'public', b'draft', b'secret'] # known to `hg phase` command
166 cmdphasenames = [b'public', b'draft', b'secret'] # known to `hg phase` command
167 phasenames = dict(enumerate(cmdphasenames))
167 phasenames = dict(enumerate(cmdphasenames))
168 phasenames[archived] = b'archived'
168 phasenames[archived] = b'archived'
169 phasenames[internal] = b'internal'
169 phasenames[internal] = b'internal'
170 # map phase name to phase number
170 # map phase name to phase number
171 phasenumber = {name: phase for phase, name in phasenames.items()}
171 phasenumber = {name: phase for phase, name in phasenames.items()}
172 # like phasenumber, but also include maps for the numeric and binary
172 # like phasenumber, but also include maps for the numeric and binary
173 # phase number to the phase number
173 # phase number to the phase number
174 phasenumber2 = phasenumber.copy()
174 phasenumber2 = phasenumber.copy()
175 phasenumber2.update({phase: phase for phase in phasenames})
175 phasenumber2.update({phase: phase for phase in phasenames})
176 phasenumber2.update({b'%i' % phase: phase for phase in phasenames})
176 phasenumber2.update({b'%i' % phase: phase for phase in phasenames})
177 # record phase property
177 # record phase property
178 mutablephases = (draft, secret, archived, internal)
178 mutablephases = (draft, secret, archived, internal)
179 relevant_mutable_phases = (draft, secret) # could be obsolete or unstable
179 relevant_mutable_phases = (draft, secret) # could be obsolete or unstable
180 remotehiddenphases = (secret, archived, internal)
180 remotehiddenphases = (secret, archived, internal)
181 localhiddenphases = (internal, archived)
181 localhiddenphases = (internal, archived)
182
182
183 all_internal_phases = tuple(p for p in allphases if p & internal)
183 all_internal_phases = tuple(p for p in allphases if p & internal)
184 # We do not want any internal content to exit the repository, ever.
184 # We do not want any internal content to exit the repository, ever.
185 no_bundle_phases = all_internal_phases
185 no_bundle_phases = all_internal_phases
186
186
187
187
188 def supportinternal(repo: "localrepo.localrepository") -> bool:
188 def supportinternal(repo: "localrepo.localrepository") -> bool:
189 """True if the internal phase can be used on a repository"""
189 """True if the internal phase can be used on a repository"""
190 return requirements.INTERNAL_PHASE_REQUIREMENT in repo.requirements
190 return requirements.INTERNAL_PHASE_REQUIREMENT in repo.requirements
191
191
192
192
193 def supportarchived(repo: "localrepo.localrepository") -> bool:
193 def supportarchived(repo: "localrepo.localrepository") -> bool:
194 """True if the archived phase can be used on a repository"""
194 """True if the archived phase can be used on a repository"""
195 return requirements.ARCHIVED_PHASE_REQUIREMENT in repo.requirements
195 return requirements.ARCHIVED_PHASE_REQUIREMENT in repo.requirements
196
196
197
197
198 def _readroots(
198 def _readroots(
199 repo: "localrepo.localrepository",
199 repo: "localrepo.localrepository",
200 phasedefaults: Optional["Phasedefaults"] = None,
200 phasedefaults: Optional["Phasedefaults"] = None,
201 ) -> Tuple[Phaseroots, bool]:
201 ) -> Tuple[Phaseroots, bool]:
202 """Read phase roots from disk
202 """Read phase roots from disk
203
203
204 phasedefaults is a list of fn(repo, roots) callable, which are
204 phasedefaults is a list of fn(repo, roots) callable, which are
205 executed if the phase roots file does not exist. When phases are
205 executed if the phase roots file does not exist. When phases are
206 being initialized on an existing repository, this could be used to
206 being initialized on an existing repository, this could be used to
207 set selected changesets phase to something else than public.
207 set selected changesets phase to something else than public.
208
208
209 Return (roots, dirty) where dirty is true if roots differ from
209 Return (roots, dirty) where dirty is true if roots differ from
210 what is being stored.
210 what is being stored.
211 """
211 """
212 repo = repo.unfiltered()
212 repo = repo.unfiltered()
213 dirty = False
213 dirty = False
214 roots = {i: set() for i in allphases}
214 roots = {i: set() for i in allphases}
215 to_rev = repo.changelog.index.get_rev
215 to_rev = repo.changelog.index.get_rev
216 unknown_msg = b'removing unknown node %s from %i-phase boundary\n'
216 unknown_msg = b'removing unknown node %s from %i-phase boundary\n'
217 try:
217 try:
218 f, pending = txnutil.trypending(repo.root, repo.svfs, b'phaseroots')
218 f, pending = txnutil.trypending(repo.root, repo.svfs, b'phaseroots')
219 try:
219 try:
220 for line in f:
220 for line in f:
221 str_phase, hex_node = line.split()
221 str_phase, hex_node = line.split()
222 phase = int(str_phase)
222 phase = int(str_phase)
223 node = bin(hex_node)
223 node = bin(hex_node)
224 rev = to_rev(node)
224 rev = to_rev(node)
225 if rev is None:
225 if rev is None:
226 repo.ui.debug(unknown_msg % (short(hex_node), phase))
226 repo.ui.debug(unknown_msg % (short(hex_node), phase))
227 dirty = True
227 dirty = True
228 else:
228 else:
229 roots[phase].add(rev)
229 roots[phase].add(rev)
230 finally:
230 finally:
231 f.close()
231 f.close()
232 except FileNotFoundError:
232 except FileNotFoundError:
233 if phasedefaults:
233 if phasedefaults:
234 for f in phasedefaults:
234 for f in phasedefaults:
235 roots = f(repo, roots)
235 roots = f(repo, roots)
236 dirty = True
236 dirty = True
237 return roots, dirty
237 return roots, dirty
238
238
239
239
240 def binaryencode(phasemapping: Dict[int, List[bytes]]) -> bytes:
240 def binaryencode(phasemapping: Dict[int, List[bytes]]) -> bytes:
241 """encode a 'phase -> nodes' mapping into a binary stream
241 """encode a 'phase -> nodes' mapping into a binary stream
242
242
243 The revision lists are encoded as (phase, root) pairs.
243 The revision lists are encoded as (phase, root) pairs.
244 """
244 """
245 binarydata = []
245 binarydata = []
246 for phase, nodes in phasemapping.items():
246 for phase, nodes in phasemapping.items():
247 for head in nodes:
247 for head in nodes:
248 binarydata.append(_fphasesentry.pack(phase, head))
248 binarydata.append(_fphasesentry.pack(phase, head))
249 return b''.join(binarydata)
249 return b''.join(binarydata)
250
250
251
251
252 def binarydecode(stream) -> Dict[int, List[bytes]]:
252 def binarydecode(stream) -> Dict[int, List[bytes]]:
253 """decode a binary stream into a 'phase -> nodes' mapping
253 """decode a binary stream into a 'phase -> nodes' mapping
254
254
255 The (phase, root) pairs are turned back into a dictionary with
255 The (phase, root) pairs are turned back into a dictionary with
256 the phase as index and the aggregated roots of that phase as value."""
256 the phase as index and the aggregated roots of that phase as value."""
257 headsbyphase = {i: [] for i in allphases}
257 headsbyphase = {i: [] for i in allphases}
258 entrysize = _fphasesentry.size
258 entrysize = _fphasesentry.size
259 while True:
259 while True:
260 entry = stream.read(entrysize)
260 entry = stream.read(entrysize)
261 if len(entry) < entrysize:
261 if len(entry) < entrysize:
262 if entry:
262 if entry:
263 raise error.Abort(_(b'bad phase-heads stream'))
263 raise error.Abort(_(b'bad phase-heads stream'))
264 break
264 break
265 phase, node = _fphasesentry.unpack(entry)
265 phase, node = _fphasesentry.unpack(entry)
266 headsbyphase[phase].append(node)
266 headsbyphase[phase].append(node)
267 return headsbyphase
267 return headsbyphase
268
268
269
269
270 def _sortedrange_insert(data, idx, rev, t):
270 def _sortedrange_insert(data, idx, rev, t):
271 merge_before = False
271 merge_before = False
272 if idx:
272 if idx:
273 r1, t1 = data[idx - 1]
273 r1, t1 = data[idx - 1]
274 merge_before = r1[-1] + 1 == rev and t1 == t
274 merge_before = r1[-1] + 1 == rev and t1 == t
275 merge_after = False
275 merge_after = False
276 if idx < len(data):
276 if idx < len(data):
277 r2, t2 = data[idx]
277 r2, t2 = data[idx]
278 merge_after = r2[0] == rev + 1 and t2 == t
278 merge_after = r2[0] == rev + 1 and t2 == t
279
279
280 if merge_before and merge_after:
280 if merge_before and merge_after:
281 data[idx - 1] = (range(r1[0], r2[-1] + 1), t)
281 data[idx - 1] = (range(r1[0], r2[-1] + 1), t)
282 data.pop(idx)
282 data.pop(idx)
283 elif merge_before:
283 elif merge_before:
284 data[idx - 1] = (range(r1[0], rev + 1), t)
284 data[idx - 1] = (range(r1[0], rev + 1), t)
285 elif merge_after:
285 elif merge_after:
286 data[idx] = (range(rev, r2[-1] + 1), t)
286 data[idx] = (range(rev, r2[-1] + 1), t)
287 else:
287 else:
288 data.insert(idx, (range(rev, rev + 1), t))
288 data.insert(idx, (range(rev, rev + 1), t))
289
289
290
290
291 def _sortedrange_split(data, idx, rev, t):
291 def _sortedrange_split(data, idx, rev, t):
292 r1, t1 = data[idx]
292 r1, t1 = data[idx]
293 if t == t1:
293 if t == t1:
294 return
294 return
295 t = (t1[0], t[1])
295 t = (t1[0], t[1])
296 if len(r1) == 1:
296 if len(r1) == 1:
297 data.pop(idx)
297 data.pop(idx)
298 _sortedrange_insert(data, idx, rev, t)
298 _sortedrange_insert(data, idx, rev, t)
299 elif r1[0] == rev:
299 elif r1[0] == rev:
300 data[idx] = (range(rev + 1, r1[-1] + 1), t1)
300 data[idx] = (range(rev + 1, r1[-1] + 1), t1)
301 _sortedrange_insert(data, idx, rev, t)
301 _sortedrange_insert(data, idx, rev, t)
302 elif r1[-1] == rev:
302 elif r1[-1] == rev:
303 data[idx] = (range(r1[0], rev), t1)
303 data[idx] = (range(r1[0], rev), t1)
304 _sortedrange_insert(data, idx + 1, rev, t)
304 _sortedrange_insert(data, idx + 1, rev, t)
305 else:
305 else:
306 data[idx : idx + 1] = [
306 data[idx : idx + 1] = [
307 (range(r1[0], rev), t1),
307 (range(r1[0], rev), t1),
308 (range(rev, rev + 1), t),
308 (range(rev, rev + 1), t),
309 (range(rev + 1, r1[-1] + 1), t1),
309 (range(rev + 1, r1[-1] + 1), t1),
310 ]
310 ]
311
311
312
312
313 def _trackphasechange(data, rev, old, new):
313 def _trackphasechange(data, rev, old, new):
314 """add a phase move to the <data> list of ranges
314 """add a phase move to the <data> list of ranges
315
315
316 If data is None, nothing happens.
316 If data is None, nothing happens.
317 """
317 """
318 if data is None:
318 if data is None:
319 return
319 return
320
320
321 # If data is empty, create a one-revision range and done
321 # If data is empty, create a one-revision range and done
322 if not data:
322 if not data:
323 data.insert(0, (range(rev, rev + 1), (old, new)))
323 data.insert(0, (range(rev, rev + 1), (old, new)))
324 return
324 return
325
325
326 low = 0
326 low = 0
327 high = len(data)
327 high = len(data)
328 t = (old, new)
328 t = (old, new)
329 while low < high:
329 while low < high:
330 mid = (low + high) // 2
330 mid = (low + high) // 2
331 revs = data[mid][0]
331 revs = data[mid][0]
332 revs_low = revs[0]
332 revs_low = revs[0]
333 revs_high = revs[-1]
333 revs_high = revs[-1]
334
334
335 if rev >= revs_low and rev <= revs_high:
335 if rev >= revs_low and rev <= revs_high:
336 _sortedrange_split(data, mid, rev, t)
336 _sortedrange_split(data, mid, rev, t)
337 return
337 return
338
338
339 if revs_low == rev + 1:
339 if revs_low == rev + 1:
340 if mid and data[mid - 1][0][-1] == rev:
340 if mid and data[mid - 1][0][-1] == rev:
341 _sortedrange_split(data, mid - 1, rev, t)
341 _sortedrange_split(data, mid - 1, rev, t)
342 else:
342 else:
343 _sortedrange_insert(data, mid, rev, t)
343 _sortedrange_insert(data, mid, rev, t)
344 return
344 return
345
345
346 if revs_high == rev - 1:
346 if revs_high == rev - 1:
347 if mid + 1 < len(data) and data[mid + 1][0][0] == rev:
347 if mid + 1 < len(data) and data[mid + 1][0][0] == rev:
348 _sortedrange_split(data, mid + 1, rev, t)
348 _sortedrange_split(data, mid + 1, rev, t)
349 else:
349 else:
350 _sortedrange_insert(data, mid + 1, rev, t)
350 _sortedrange_insert(data, mid + 1, rev, t)
351 return
351 return
352
352
353 if revs_low > rev:
353 if revs_low > rev:
354 high = mid
354 high = mid
355 else:
355 else:
356 low = mid + 1
356 low = mid + 1
357
357
358 if low == len(data):
358 if low == len(data):
359 data.append((range(rev, rev + 1), t))
359 data.append((range(rev, rev + 1), t))
360 return
360 return
361
361
362 r1, t1 = data[low]
362 r1, t1 = data[low]
363 if r1[0] > rev:
363 if r1[0] > rev:
364 data.insert(low, (range(rev, rev + 1), t))
364 data.insert(low, (range(rev, rev + 1), t))
365 else:
365 else:
366 data.insert(low + 1, (range(rev, rev + 1), t))
366 data.insert(low + 1, (range(rev, rev + 1), t))
367
367
368
368
369 # consider incrementaly updating the phase set the update set is not bigger
369 # consider incrementaly updating the phase set the update set is not bigger
370 # than this size
370 # than this size
371 #
371 #
372 # Be warned, this number is picked arbitrarily, without any benchmark. It
372 # Be warned, this number is picked arbitrarily, without any benchmark. It
373 # should blindly pickup "small update"
373 # should blindly pickup "small update"
374 INCREMENTAL_PHASE_SETS_UPDATE_MAX_UPDATE = 100
374 INCREMENTAL_PHASE_SETS_UPDATE_MAX_UPDATE = 100
375
375
376
376
377 class phasecache:
377 class phasecache:
378 def __init__(
378 def __init__(
379 self,
379 self,
380 repo: "localrepo.localrepository",
380 repo: "localrepo.localrepository",
381 phasedefaults: Optional["Phasedefaults"],
381 phasedefaults: Optional["Phasedefaults"],
382 _load: bool = True,
382 _load: bool = True,
383 ):
383 ):
384 if _load:
384 if _load:
385 # Cheap trick to allow shallow-copy without copy module
385 # Cheap trick to allow shallow-copy without copy module
386 loaded = _readroots(repo, phasedefaults)
386 loaded = _readroots(repo, phasedefaults)
387 self._phaseroots: Phaseroots = loaded[0]
387 self._phaseroots: Phaseroots = loaded[0]
388 self.dirty: bool = loaded[1]
388 self.dirty: bool = loaded[1]
389 self._loadedrevslen = 0
389 self._loadedrevslen = 0
390 self._phasesets: PhaseSets = None
390 self._phasesets: PhaseSets = None
391
391
392 def hasnonpublicphases(self, repo: "localrepo.localrepository") -> bool:
392 def hasnonpublicphases(self, repo: "localrepo.localrepository") -> bool:
393 """detect if there are revisions with non-public phase"""
393 """detect if there are revisions with non-public phase"""
394 # XXX deprecate the unused repo argument
394 # XXX deprecate the unused repo argument
395 return any(
395 return any(
396 revs for phase, revs in self._phaseroots.items() if phase != public
396 revs for phase, revs in self._phaseroots.items() if phase != public
397 )
397 )
398
398
399 def nonpublicphaseroots(
399 def nonpublicphaseroots(
400 self, repo: "localrepo.localrepository"
400 self, repo: "localrepo.localrepository"
401 ) -> Set[int]:
401 ) -> Set[int]:
402 """returns the roots of all non-public phases
402 """returns the roots of all non-public phases
403
403
404 The roots are not minimized, so if the secret revisions are
404 The roots are not minimized, so if the secret revisions are
405 descendants of draft revisions, their roots will still be present.
405 descendants of draft revisions, their roots will still be present.
406 """
406 """
407 repo = repo.unfiltered()
407 repo = repo.unfiltered()
408 self._ensure_phase_sets(repo)
408 self._ensure_phase_sets(repo)
409 return set().union(
409 return set().union(
410 *[
410 *[
411 revs
411 revs
412 for phase, revs in self._phaseroots.items()
412 for phase, revs in self._phaseroots.items()
413 if phase != public
413 if phase != public
414 ]
414 ]
415 )
415 )
416
416
417 def getrevset(
417 def getrevset(
418 self,
418 self,
419 repo: "localrepo.localrepository",
419 repo: "localrepo.localrepository",
420 phases: Iterable[int],
420 phases: Iterable[int],
421 subset: Optional[Any] = None,
421 subset: Optional[Any] = None,
422 ) -> Any:
422 ) -> Any:
423 # TODO: finish typing this
423 # TODO: finish typing this
424 """return a smartset for the given phases"""
424 """return a smartset for the given phases"""
425 self._ensure_phase_sets(repo.unfiltered())
425 self._ensure_phase_sets(repo.unfiltered())
426 phases = set(phases)
426 phases = set(phases)
427 publicphase = public in phases
427 publicphase = public in phases
428
428
429 if publicphase:
429 if publicphase:
430 # In this case, phases keeps all the *other* phases.
430 # In this case, phases keeps all the *other* phases.
431 phases = set(allphases).difference(phases)
431 phases = set(allphases).difference(phases)
432 if not phases:
432 if not phases:
433 return smartset.fullreposet(repo)
433 return smartset.fullreposet(repo)
434
434
435 # fast path: _phasesets contains the interesting sets,
435 # fast path: _phasesets contains the interesting sets,
436 # might only need a union and post-filtering.
436 # might only need a union and post-filtering.
437 revsneedscopy = False
437 revsneedscopy = False
438 if len(phases) == 1:
438 if len(phases) == 1:
439 [p] = phases
439 [p] = phases
440 revs = self._phasesets[p]
440 revs = self._phasesets[p]
441 revsneedscopy = True # Don't modify _phasesets
441 revsneedscopy = True # Don't modify _phasesets
442 else:
442 else:
443 # revs has the revisions in all *other* phases.
443 # revs has the revisions in all *other* phases.
444 revs = set.union(*[self._phasesets[p] for p in phases])
444 revs = set.union(*[self._phasesets[p] for p in phases])
445
445
446 def _addwdir(wdirsubset, wdirrevs):
446 def _addwdir(wdirsubset, wdirrevs):
447 if wdirrev in wdirsubset and repo[None].phase() in phases:
447 if wdirrev in wdirsubset and repo[None].phase() in phases:
448 if revsneedscopy:
448 if revsneedscopy:
449 wdirrevs = wdirrevs.copy()
449 wdirrevs = wdirrevs.copy()
450 # The working dir would never be in the # cache, but it was in
450 # The working dir would never be in the # cache, but it was in
451 # the subset being filtered for its phase (or filtered out,
451 # the subset being filtered for its phase (or filtered out,
452 # depending on publicphase), so add it to the output to be
452 # depending on publicphase), so add it to the output to be
453 # included (or filtered out).
453 # included (or filtered out).
454 wdirrevs.add(wdirrev)
454 wdirrevs.add(wdirrev)
455 return wdirrevs
455 return wdirrevs
456
456
457 if not publicphase:
457 if not publicphase:
458 if repo.changelog.filteredrevs:
458 if repo.changelog.filteredrevs:
459 revs = revs - repo.changelog.filteredrevs
459 revs = revs - repo.changelog.filteredrevs
460
460
461 if subset is None:
461 if subset is None:
462 return smartset.baseset(revs)
462 return smartset.baseset(revs)
463 else:
463 else:
464 revs = _addwdir(subset, revs)
464 revs = _addwdir(subset, revs)
465 return subset & smartset.baseset(revs)
465 return subset & smartset.baseset(revs)
466 else:
466 else:
467 if subset is None:
467 if subset is None:
468 subset = smartset.fullreposet(repo)
468 subset = smartset.fullreposet(repo)
469
469
470 revs = _addwdir(subset, revs)
470 revs = _addwdir(subset, revs)
471
471
472 if not revs:
472 if not revs:
473 return subset
473 return subset
474 return subset.filter(lambda r: r not in revs)
474 return subset.filter(lambda r: r not in revs)
475
475
476 def copy(self):
476 def copy(self):
477 # Shallow copy meant to ensure isolation in
477 # Shallow copy meant to ensure isolation in
478 # advance/retractboundary(), nothing more.
478 # advance/retractboundary(), nothing more.
479 ph = self.__class__(None, None, _load=False)
479 ph = self.__class__(None, None, _load=False)
480 ph._phaseroots = self._phaseroots.copy()
480 ph._phaseroots = self._phaseroots.copy()
481 ph.dirty = self.dirty
481 ph.dirty = self.dirty
482 ph._loadedrevslen = self._loadedrevslen
482 ph._loadedrevslen = self._loadedrevslen
483 if self._phasesets is None:
483 if self._phasesets is None:
484 ph._phasesets = None
484 ph._phasesets = None
485 else:
485 else:
486 ph._phasesets = self._phasesets.copy()
486 ph._phasesets = self._phasesets.copy()
487 return ph
487 return ph
488
488
489 def replace(self, phcache):
489 def replace(self, phcache):
490 """replace all values in 'self' with content of phcache"""
490 """replace all values in 'self' with content of phcache"""
491 for a in (
491 for a in (
492 '_phaseroots',
492 '_phaseroots',
493 'dirty',
493 'dirty',
494 '_loadedrevslen',
494 '_loadedrevslen',
495 '_phasesets',
495 '_phasesets',
496 ):
496 ):
497 setattr(self, a, getattr(phcache, a))
497 setattr(self, a, getattr(phcache, a))
498
498
499 def _getphaserevsnative(self, repo):
499 def _getphaserevsnative(self, repo):
500 repo = repo.unfiltered()
500 repo = repo.unfiltered()
501 return repo.changelog.computephases(self._phaseroots)
501 return repo.changelog.computephases(self._phaseroots)
502
502
503 def _computephaserevspure(self, repo):
503 def _computephaserevspure(self, repo):
504 repo = repo.unfiltered()
504 repo = repo.unfiltered()
505 cl = repo.changelog
505 cl = repo.changelog
506 self._phasesets = {phase: set() for phase in allphases}
506 self._phasesets = {phase: set() for phase in allphases}
507 lowerroots = set()
507 lowerroots = set()
508 for phase in reversed(trackedphases):
508 for phase in reversed(trackedphases):
509 roots = self._phaseroots[phase]
509 roots = self._phaseroots[phase]
510 if roots:
510 if roots:
511 ps = set(cl.descendants(roots))
511 ps = set(cl.descendants(roots))
512 for root in roots:
512 for root in roots:
513 ps.add(root)
513 ps.add(root)
514 ps.difference_update(lowerroots)
514 ps.difference_update(lowerroots)
515 lowerroots.update(ps)
515 lowerroots.update(ps)
516 self._phasesets[phase] = ps
516 self._phasesets[phase] = ps
517 self._loadedrevslen = len(cl)
517 self._loadedrevslen = len(cl)
518
518
519 def _ensure_phase_sets(self, repo: "localrepo.localrepository") -> None:
519 def _ensure_phase_sets(self, repo: "localrepo.localrepository") -> None:
520 """ensure phase information is loaded in the object"""
520 """ensure phase information is loaded in the object"""
521 assert repo.filtername is None
521 assert repo.filtername is None
522 update = -1
522 update = -1
523 cl = repo.changelog
523 cl = repo.changelog
524 cl_size = len(cl)
524 cl_size = len(cl)
525 if self._phasesets is None:
525 if self._phasesets is None:
526 update = 0
526 update = 0
527 else:
527 else:
528 if cl_size > self._loadedrevslen:
528 if cl_size > self._loadedrevslen:
529 # check if an incremental update is worth it.
529 # check if an incremental update is worth it.
530 # note we need a tradeoff here because the whole logic is not
530 # note we need a tradeoff here because the whole logic is not
531 # stored and implemented in native code nd datastructure.
531 # stored and implemented in native code nd datastructure.
532 # Otherwise the incremental update woul always be a win.
532 # Otherwise the incremental update woul always be a win.
533 missing = cl_size - self._loadedrevslen
533 missing = cl_size - self._loadedrevslen
534 if missing <= INCREMENTAL_PHASE_SETS_UPDATE_MAX_UPDATE:
534 if missing <= INCREMENTAL_PHASE_SETS_UPDATE_MAX_UPDATE:
535 update = self._loadedrevslen
535 update = self._loadedrevslen
536 else:
536 else:
537 update = 0
537 update = 0
538
538
539 if update == 0:
539 if update == 0:
540 try:
540 try:
541 res = self._getphaserevsnative(repo)
541 res = self._getphaserevsnative(repo)
542 self._loadedrevslen, self._phasesets = res
542 self._loadedrevslen, self._phasesets = res
543 except AttributeError:
543 except AttributeError:
544 self._computephaserevspure(repo)
544 self._computephaserevspure(repo)
545 assert self._loadedrevslen == len(repo.changelog)
545 assert self._loadedrevslen == len(repo.changelog)
546 elif update > 0:
546 elif update > 0:
547 # good candidate for native code
547 # good candidate for native code
548 assert update == self._loadedrevslen
548 assert update == self._loadedrevslen
549 if self.hasnonpublicphases(repo):
549 if self.hasnonpublicphases(repo):
550 start = self._loadedrevslen
550 start = self._loadedrevslen
551 get_phase = self.phase
551 get_phase = self.phase
552 rev_phases = [0] * missing
552 rev_phases = [0] * missing
553 parents = cl.parentrevs
553 parents = cl.parentrevs
554 sets = {phase: set() for phase in self._phasesets}
554 sets = {phase: set() for phase in self._phasesets}
555 for phase, roots in self._phaseroots.items():
555 for phase, roots in self._phaseroots.items():
556 # XXX should really store the max somewhere
556 # XXX should really store the max somewhere
557 for r in roots:
557 for r in roots:
558 if r >= start:
558 if r >= start:
559 rev_phases[r - start] = phase
559 rev_phases[r - start] = phase
560 for rev in range(start, cl_size):
560 for rev in range(start, cl_size):
561 phase = rev_phases[rev - start]
561 phase = rev_phases[rev - start]
562 p1, p2 = parents(rev)
562 p1, p2 = parents(rev)
563 if p1 == nullrev:
563 if p1 == nullrev:
564 p1_phase = public
564 p1_phase = public
565 elif p1 >= start:
565 elif p1 >= start:
566 p1_phase = rev_phases[p1 - start]
566 p1_phase = rev_phases[p1 - start]
567 else:
567 else:
568 p1_phase = max(phase, get_phase(repo, p1))
568 p1_phase = max(phase, get_phase(repo, p1))
569 if p2 == nullrev:
569 if p2 == nullrev:
570 p2_phase = public
570 p2_phase = public
571 elif p2 >= start:
571 elif p2 >= start:
572 p2_phase = rev_phases[p2 - start]
572 p2_phase = rev_phases[p2 - start]
573 else:
573 else:
574 p2_phase = max(phase, get_phase(repo, p2))
574 p2_phase = max(phase, get_phase(repo, p2))
575 phase = max(phase, p1_phase, p2_phase)
575 phase = max(phase, p1_phase, p2_phase)
576 if phase > public:
576 if phase > public:
577 rev_phases[rev - start] = phase
577 rev_phases[rev - start] = phase
578 sets[phase].add(rev)
578 sets[phase].add(rev)
579
579
580 # Be careful to preserve shallow-copied values: do not update
580 # Be careful to preserve shallow-copied values: do not update
581 # phaseroots values, replace them.
581 # phaseroots values, replace them.
582 for phase, extra in sets.items():
582 for phase, extra in sets.items():
583 if extra:
583 if extra:
584 self._phasesets[phase] = self._phasesets[phase] | extra
584 self._phasesets[phase] = self._phasesets[phase] | extra
585 self._loadedrevslen = cl_size
585 self._loadedrevslen = cl_size
586
586
587 def invalidate(self):
587 def invalidate(self):
588 self._loadedrevslen = 0
588 self._loadedrevslen = 0
589 self._phasesets = None
589 self._phasesets = None
590
590
591 def phase(self, repo: "localrepo.localrepository", rev: int) -> int:
591 def phase(self, repo: "localrepo.localrepository", rev: int) -> int:
592 # We need a repo argument here to be able to build _phasesets
592 # We need a repo argument here to be able to build _phasesets
593 # if necessary. The repository instance is not stored in
593 # if necessary. The repository instance is not stored in
594 # phasecache to avoid reference cycles. The changelog instance
594 # phasecache to avoid reference cycles. The changelog instance
595 # is not stored because it is a filecache() property and can
595 # is not stored because it is a filecache() property and can
596 # be replaced without us being notified.
596 # be replaced without us being notified.
597 if rev == nullrev:
597 if rev == nullrev:
598 return public
598 return public
599 if rev < nullrev:
599 if rev < nullrev:
600 raise ValueError(_(b'cannot lookup negative revision'))
600 raise ValueError(_(b'cannot lookup negative revision'))
601 # double check self._loadedrevslen to avoid an extra method call as
601 # double check self._loadedrevslen to avoid an extra method call as
602 # python is slow for that.
602 # python is slow for that.
603 if rev >= self._loadedrevslen:
603 if rev >= self._loadedrevslen:
604 self._ensure_phase_sets(repo.unfiltered())
604 self._ensure_phase_sets(repo.unfiltered())
605 for phase in trackedphases:
605 for phase in trackedphases:
606 if rev in self._phasesets[phase]:
606 if rev in self._phasesets[phase]:
607 return phase
607 return phase
608 return public
608 return public
609
609
610 def write(self, repo):
610 def write(self, repo):
611 if not self.dirty:
611 if not self.dirty:
612 return
612 return
613 f = repo.svfs(b'phaseroots', b'w', atomictemp=True, checkambig=True)
613 f = repo.svfs(b'phaseroots', b'w', atomictemp=True, checkambig=True)
614 try:
614 try:
615 self._write(repo.unfiltered(), f)
615 self._write(repo.unfiltered(), f)
616 finally:
616 finally:
617 f.close()
617 f.close()
618
618
619 def _write(self, repo, fp):
619 def _write(self, repo, fp):
620 assert repo.filtername is None
620 assert repo.filtername is None
621 to_node = repo.changelog.node
621 to_node = repo.changelog.node
622 for phase, roots in self._phaseroots.items():
622 for phase, roots in self._phaseroots.items():
623 for r in sorted(roots):
623 for r in sorted(roots):
624 h = to_node(r)
624 h = to_node(r)
625 fp.write(b'%i %s\n' % (phase, hex(h)))
625 fp.write(b'%i %s\n' % (phase, hex(h)))
626 self.dirty = False
626 self.dirty = False
627
627
628 def _updateroots(self, repo, phase, newroots, tr, invalidate=True):
628 def _updateroots(self, repo, phase, newroots, tr, invalidate=True):
629 self._phaseroots[phase] = newroots
629 self._phaseroots[phase] = newroots
630 self.dirty = True
630 self.dirty = True
631 if invalidate:
631 if invalidate:
632 self.invalidate()
632 self.invalidate()
633
633
634 assert repo.filtername is None
634 assert repo.filtername is None
635 wrepo = weakref.ref(repo)
635 wrepo = weakref.ref(repo)
636
636
637 def tr_write(fp):
637 def tr_write(fp):
638 repo = wrepo()
638 repo = wrepo()
639 assert repo is not None
639 assert repo is not None
640 self._write(repo, fp)
640 self._write(repo, fp)
641
641
642 tr.addfilegenerator(b'phase', (b'phaseroots',), tr_write)
642 tr.addfilegenerator(b'phase', (b'phaseroots',), tr_write)
643 tr.hookargs[b'phases_moved'] = b'1'
643 tr.hookargs[b'phases_moved'] = b'1'
644
644
645 def registernew(self, repo, tr, targetphase, revs):
645 def registernew(self, repo, tr, targetphase, revs):
646 repo = repo.unfiltered()
646 repo = repo.unfiltered()
647 self._retractboundary(repo, tr, targetphase, [], revs=revs)
647 self._retractboundary(repo, tr, targetphase, [], revs=revs)
648 if tr is not None and b'phases' in tr.changes:
648 if tr is not None and b'phases' in tr.changes:
649 phasetracking = tr.changes[b'phases']
649 phasetracking = tr.changes[b'phases']
650 phase = self.phase
650 phase = self.phase
651 for rev in sorted(revs):
651 for rev in sorted(revs):
652 revphase = phase(repo, rev)
652 revphase = phase(repo, rev)
653 _trackphasechange(phasetracking, rev, None, revphase)
653 _trackphasechange(phasetracking, rev, None, revphase)
654 repo.invalidatevolatilesets()
654 repo.invalidatevolatilesets()
655
655
656 def advanceboundary(
656 def advanceboundary(
657 self, repo, tr, targetphase, nodes=None, revs=None, dryrun=None
657 self, repo, tr, targetphase, nodes=None, revs=None, dryrun=None
658 ):
658 ):
659 """Set all 'nodes' to phase 'targetphase'
659 """Set all 'nodes' to phase 'targetphase'
660
660
661 Nodes with a phase lower than 'targetphase' are not affected.
661 Nodes with a phase lower than 'targetphase' are not affected.
662
662
663 If dryrun is True, no actions will be performed
663 If dryrun is True, no actions will be performed
664
664
665 Returns a set of revs whose phase is changed or should be changed
665 Returns a set of revs whose phase is changed or should be changed
666 """
666 """
667 if targetphase == public and not self.hasnonpublicphases(repo):
667 if targetphase == public and not self.hasnonpublicphases(repo):
668 return set()
668 return set()
669 repo = repo.unfiltered()
669 repo = repo.unfiltered()
670 cl = repo.changelog
670 cl = repo.changelog
671 torev = cl.index.rev
671 torev = cl.index.rev
672 # Be careful to preserve shallow-copied values: do not update
672 # Be careful to preserve shallow-copied values: do not update
673 # phaseroots values, replace them.
673 # phaseroots values, replace them.
674 new_revs = set()
674 new_revs = set()
675 if revs is not None:
675 if revs is not None:
676 new_revs.update(revs)
676 new_revs.update(revs)
677 if nodes is not None:
677 if nodes is not None:
678 new_revs.update(torev(node) for node in nodes)
678 new_revs.update(torev(node) for node in nodes)
679 if not new_revs: # bail out early to avoid the loadphaserevs call
679 if not new_revs: # bail out early to avoid the loadphaserevs call
680 return (
680 return (
681 set()
681 set()
682 ) # note: why do people call advanceboundary with nothing?
682 ) # note: why do people call advanceboundary with nothing?
683
683
684 if tr is None:
684 if tr is None:
685 phasetracking = None
685 phasetracking = None
686 else:
686 else:
687 phasetracking = tr.changes.get(b'phases')
687 phasetracking = tr.changes.get(b'phases')
688
688
689 affectable_phases = sorted(
689 affectable_phases = sorted(
690 p for p in allphases if p > targetphase and self._phaseroots[p]
690 p for p in allphases if p > targetphase and self._phaseroots[p]
691 )
691 )
692 # filter revision already in the right phases
692 # filter revision already in the right phases
693 candidates = new_revs
693 candidates = new_revs
694 new_revs = set()
694 new_revs = set()
695 self._ensure_phase_sets(repo)
695 self._ensure_phase_sets(repo)
696 for phase in affectable_phases:
696 for phase in affectable_phases:
697 found = candidates & self._phasesets[phase]
697 found = candidates & self._phasesets[phase]
698 new_revs |= found
698 new_revs |= found
699 candidates -= found
699 candidates -= found
700 if not candidates:
700 if not candidates:
701 break
701 break
702 if not new_revs:
702 if not new_revs:
703 return set()
703 return set()
704
704
705 # search for affected high phase changesets and roots
705 # search for affected high phase changesets and roots
706 seen = set(new_revs)
706 seen = set(new_revs)
707 push = heapq.heappush
707 push = heapq.heappush
708 pop = heapq.heappop
708 pop = heapq.heappop
709 parents = cl.parentrevs
709 parents = cl.parentrevs
710 get_phase = self.phase
710 get_phase = self.phase
711 changed = {} # set of revisions to be changed
711 changed = {} # set of revisions to be changed
712 # set of root deleted by this path
712 # set of root deleted by this path
713 delroots = set()
713 delroots = set()
714 new_roots = {p: set() for p in affectable_phases}
714 new_roots = {p: set() for p in affectable_phases}
715 new_target_roots = set()
715 new_target_roots = set()
716 # revision to walk down
716 # revision to walk down
717 revs = [-r for r in new_revs]
717 revs = [-r for r in new_revs]
718 heapq.heapify(revs)
718 heapq.heapify(revs)
719 while revs:
719 while revs:
720 current = -pop(revs)
720 current = -pop(revs)
721 current_phase = get_phase(repo, current)
721 current_phase = get_phase(repo, current)
722 changed[current] = current_phase
722 changed[current] = current_phase
723 p1, p2 = parents(current)
723 p1, p2 = parents(current)
724 if p1 == nullrev:
724 if p1 == nullrev:
725 p1_phase = public
725 p1_phase = public
726 else:
726 else:
727 p1_phase = get_phase(repo, p1)
727 p1_phase = get_phase(repo, p1)
728 if p2 == nullrev:
728 if p2 == nullrev:
729 p2_phase = public
729 p2_phase = public
730 else:
730 else:
731 p2_phase = get_phase(repo, p2)
731 p2_phase = get_phase(repo, p2)
732 # do we have a root ?
732 # do we have a root ?
733 if current_phase != p1_phase and current_phase != p2_phase:
733 if current_phase != p1_phase and current_phase != p2_phase:
734 # do not record phase, because we could have "duplicated"
734 # do not record phase, because we could have "duplicated"
735 # roots, were one root is shadowed by the very same roots of an
735 # roots, were one root is shadowed by the very same roots of an
736 # higher phases
736 # higher phases
737 delroots.add(current)
737 delroots.add(current)
738 # schedule a walk down if needed
738 # schedule a walk down if needed
739 if p1_phase > targetphase and p1 not in seen:
739 if p1_phase > targetphase and p1 not in seen:
740 seen.add(p1)
740 seen.add(p1)
741 push(revs, -p1)
741 push(revs, -p1)
742 if p2_phase > targetphase and p2 not in seen:
742 if p2_phase > targetphase and p2 not in seen:
743 seen.add(p2)
743 seen.add(p2)
744 push(revs, -p2)
744 push(revs, -p2)
745 if p1_phase < targetphase and p2_phase < targetphase:
745 if p1_phase < targetphase and p2_phase < targetphase:
746 new_target_roots.add(current)
746 new_target_roots.add(current)
747
747
748 # the last iteration was done with the smallest value
748 # the last iteration was done with the smallest value
749 min_current = current
749 min_current = current
750 # do we have unwalked children that might be new roots
750 # do we have unwalked children that might be new roots
751 if (min_current + len(changed)) < len(cl):
751 if (min_current + len(changed)) < len(cl):
752 for r in range(min_current, len(cl)):
752 for r in range(min_current, len(cl)):
753 if r in changed:
753 if r in changed:
754 continue
754 continue
755 phase = get_phase(repo, r)
755 phase = get_phase(repo, r)
756 if phase <= targetphase:
756 if phase <= targetphase:
757 continue
757 continue
758 p1, p2 = parents(r)
758 p1, p2 = parents(r)
759 if not (p1 in changed or p2 in changed):
759 if not (p1 in changed or p2 in changed):
760 continue # not affected
760 continue # not affected
761 if p1 != nullrev and p1 not in changed:
761 if p1 != nullrev and p1 not in changed:
762 p1_phase = get_phase(repo, p1)
762 p1_phase = get_phase(repo, p1)
763 if p1_phase == phase:
763 if p1_phase == phase:
764 continue # not a root
764 continue # not a root
765 if p2 != nullrev and p2 not in changed:
765 if p2 != nullrev and p2 not in changed:
766 p2_phase = get_phase(repo, p2)
766 p2_phase = get_phase(repo, p2)
767 if p2_phase == phase:
767 if p2_phase == phase:
768 continue # not a root
768 continue # not a root
769 new_roots[phase].add(r)
769 new_roots[phase].add(r)
770
770
771 # apply the changes
771 # apply the changes
772 if not dryrun:
772 if not dryrun:
773 for r, p in changed.items():
773 for r, p in changed.items():
774 _trackphasechange(phasetracking, r, p, targetphase)
774 _trackphasechange(phasetracking, r, p, targetphase)
775 if targetphase > public:
775 if targetphase > public:
776 self._phasesets[targetphase].update(changed)
776 self._phasesets[targetphase].update(changed)
777 for phase in affectable_phases:
777 for phase in affectable_phases:
778 roots = self._phaseroots[phase]
778 roots = self._phaseroots[phase]
779 removed = roots & delroots
779 removed = roots & delroots
780 if removed or new_roots[phase]:
780 if removed or new_roots[phase]:
781 self._phasesets[phase].difference_update(changed)
781 self._phasesets[phase].difference_update(changed)
782 # Be careful to preserve shallow-copied values: do not
782 # Be careful to preserve shallow-copied values: do not
783 # update phaseroots values, replace them.
783 # update phaseroots values, replace them.
784 final_roots = roots - delroots | new_roots[phase]
784 final_roots = roots - delroots | new_roots[phase]
785 self._updateroots(
785 self._updateroots(
786 repo, phase, final_roots, tr, invalidate=False
786 repo, phase, final_roots, tr, invalidate=False
787 )
787 )
788 if new_target_roots:
788 if new_target_roots:
789 # Thanks for previous filtering, we can't replace existing
789 # Thanks for previous filtering, we can't replace existing
790 # roots
790 # roots
791 new_target_roots |= self._phaseroots[targetphase]
791 new_target_roots |= self._phaseroots[targetphase]
792 self._updateroots(
792 self._updateroots(
793 repo, targetphase, new_target_roots, tr, invalidate=False
793 repo, targetphase, new_target_roots, tr, invalidate=False
794 )
794 )
795 repo.invalidatevolatilesets()
795 repo.invalidatevolatilesets()
796 return changed
796 return changed
797
797
798 def retractboundary(self, repo, tr, targetphase, nodes):
798 def retractboundary(self, repo, tr, targetphase, nodes):
799 if tr is None:
799 if tr is None:
800 phasetracking = None
800 phasetracking = None
801 else:
801 else:
802 phasetracking = tr.changes.get(b'phases')
802 phasetracking = tr.changes.get(b'phases')
803 repo = repo.unfiltered()
803 repo = repo.unfiltered()
804 retracted = self._retractboundary(repo, tr, targetphase, nodes)
804 retracted = self._retractboundary(repo, tr, targetphase, nodes)
805 if retracted and phasetracking is not None:
805 if retracted and phasetracking is not None:
806 for r, old_phase in sorted(retracted.items()):
806 for r, old_phase in sorted(retracted.items()):
807 _trackphasechange(phasetracking, r, old_phase, targetphase)
807 _trackphasechange(phasetracking, r, old_phase, targetphase)
808 repo.invalidatevolatilesets()
808 repo.invalidatevolatilesets()
809
809
810 def _retractboundary(self, repo, tr, targetphase, nodes=None, revs=None):
810 def _retractboundary(self, repo, tr, targetphase, nodes=None, revs=None):
811 if targetphase == public:
811 if targetphase == public:
812 return {}
812 return {}
813 if (
813 if (
814 targetphase == internal
814 targetphase == internal
815 and not supportinternal(repo)
815 and not supportinternal(repo)
816 or targetphase == archived
816 or targetphase == archived
817 and not supportarchived(repo)
817 and not supportarchived(repo)
818 ):
818 ):
819 name = phasenames[targetphase]
819 name = phasenames[targetphase]
820 msg = b'this repository does not support the %s phase' % name
820 msg = b'this repository does not support the %s phase' % name
821 raise error.ProgrammingError(msg)
821 raise error.ProgrammingError(msg)
822 assert repo.filtername is None
822 assert repo.filtername is None
823 cl = repo.changelog
823 cl = repo.changelog
824 torev = cl.index.rev
824 torev = cl.index.rev
825 new_revs = set()
825 new_revs = set()
826 if revs is not None:
826 if revs is not None:
827 new_revs.update(revs)
827 new_revs.update(revs)
828 if nodes is not None:
828 if nodes is not None:
829 new_revs.update(torev(node) for node in nodes)
829 new_revs.update(torev(node) for node in nodes)
830 if not new_revs: # bail out early to avoid the loadphaserevs call
830 if not new_revs: # bail out early to avoid the loadphaserevs call
831 return {} # note: why do people call retractboundary with nothing ?
831 return {} # note: why do people call retractboundary with nothing ?
832
832
833 if nullrev in new_revs:
833 if nullrev in new_revs:
834 raise error.Abort(_(b'cannot change null revision phase'))
834 raise error.Abort(_(b'cannot change null revision phase'))
835
835
836 # Filter revision that are already in the right phase
836 # Filter revision that are already in the right phase
837 self._ensure_phase_sets(repo)
837 self._ensure_phase_sets(repo)
838 for phase, revs in self._phasesets.items():
838 for phase, revs in self._phasesets.items():
839 if phase >= targetphase:
839 if phase >= targetphase:
840 new_revs -= revs
840 new_revs -= revs
841 if not new_revs: # all revisions already in the right phases
841 if not new_revs: # all revisions already in the right phases
842 return {}
842 return {}
843
843
844 # Compute change in phase roots by walking the graph
844 # Compute change in phase roots by walking the graph
845 #
845 #
846 # note: If we had a cheap parent β†’ children mapping we could do
846 # note: If we had a cheap parent β†’ children mapping we could do
847 # something even cheaper/more-bounded
847 # something even cheaper/more-bounded
848 #
848 #
849 # The idea would be to walk from item in new_revs stopping at
849 # The idea would be to walk from item in new_revs stopping at
850 # descendant with phases >= target_phase.
850 # descendant with phases >= target_phase.
851 #
851 #
852 # 1) This detect new_revs that are not new_roots (either already >=
852 # 1) This detect new_revs that are not new_roots (either already >=
853 # target_phase or reachable though another new_revs
853 # target_phase or reachable though another new_revs
854 # 2) This detect replaced current_roots as we reach them
854 # 2) This detect replaced current_roots as we reach them
855 # 3) This can avoid walking to the tip if we retract over a small
855 # 3) This can avoid walking to the tip if we retract over a small
856 # branch.
856 # branch.
857 #
857 #
858 # So instead, we do a variation of this, we walk from the smaller new
858 # So instead, we do a variation of this, we walk from the smaller new
859 # revision to the tip to avoid missing any potential children.
859 # revision to the tip to avoid missing any potential children.
860 #
860 #
861 # The following code would be a good candidate for native code… if only
861 # The following code would be a good candidate for native code… if only
862 # we could knew the phase of a changeset efficiently in native code.
862 # we could knew the phase of a changeset efficiently in native code.
863 parents = cl.parentrevs
863 parents = cl.parentrevs
864 phase = self.phase
864 phase = self.phase
865 new_roots = set() # roots added by this phases
865 new_roots = set() # roots added by this phases
866 changed_revs = {} # revision affected by this call
866 changed_revs = {} # revision affected by this call
867 replaced_roots = set() # older roots replaced by this call
867 replaced_roots = set() # older roots replaced by this call
868 currentroots = self._phaseroots[targetphase]
868 currentroots = self._phaseroots[targetphase]
869 start = min(new_revs)
869 start = min(new_revs)
870 end = len(cl)
870 end = len(cl)
871 rev_phases = [None] * (end - start)
871 rev_phases = [None] * (end - start)
872
872
873 this_phase_set = self._phasesets[targetphase]
873 this_phase_set = self._phasesets[targetphase]
874 for r in range(start, end):
874 for r in range(start, end):
875
875
876 # gather information about the current_rev
876 # gather information about the current_rev
877 r_phase = phase(repo, r)
877 r_phase = phase(repo, r)
878 p_phase = None # phase inherited from parents
878 p_phase = None # phase inherited from parents
879 p1, p2 = parents(r)
879 p1, p2 = parents(r)
880 if p1 >= start:
880 if p1 >= start:
881 p1_phase = rev_phases[p1 - start]
881 p1_phase = rev_phases[p1 - start]
882 if p1_phase is not None:
882 if p1_phase is not None:
883 p_phase = p1_phase
883 p_phase = p1_phase
884 if p2 >= start:
884 if p2 >= start:
885 p2_phase = rev_phases[p2 - start]
885 p2_phase = rev_phases[p2 - start]
886 if p2_phase is not None:
886 if p2_phase is not None:
887 if p_phase is not None:
887 if p_phase is not None:
888 p_phase = max(p_phase, p2_phase)
888 p_phase = max(p_phase, p2_phase)
889 else:
889 else:
890 p_phase = p2_phase
890 p_phase = p2_phase
891
891
892 # assess the situation
892 # assess the situation
893 if r in new_revs and r_phase < targetphase:
893 if r in new_revs and r_phase < targetphase:
894 if p_phase is None or p_phase < targetphase:
894 if p_phase is None or p_phase < targetphase:
895 new_roots.add(r)
895 new_roots.add(r)
896 rev_phases[r - start] = targetphase
896 rev_phases[r - start] = targetphase
897 changed_revs[r] = r_phase
897 changed_revs[r] = r_phase
898 this_phase_set.add(r)
898 this_phase_set.add(r)
899 elif p_phase is None:
899 elif p_phase is None:
900 rev_phases[r - start] = r_phase
900 rev_phases[r - start] = r_phase
901 else:
901 else:
902 if p_phase > r_phase:
902 if p_phase > r_phase:
903 rev_phases[r - start] = p_phase
903 rev_phases[r - start] = p_phase
904 else:
904 else:
905 rev_phases[r - start] = r_phase
905 rev_phases[r - start] = r_phase
906 if p_phase == targetphase:
906 if p_phase == targetphase:
907 if p_phase > r_phase:
907 if p_phase > r_phase:
908 changed_revs[r] = r_phase
908 changed_revs[r] = r_phase
909 this_phase_set.add(r)
909 this_phase_set.add(r)
910 elif r in currentroots:
910 elif r in currentroots:
911 replaced_roots.add(r)
911 replaced_roots.add(r)
912 sets = self._phasesets
912 sets = self._phasesets
913 if targetphase > draft:
913 if targetphase > draft:
914 for r, old in changed_revs.items():
914 for r, old in changed_revs.items():
915 if old > public:
915 if old > public:
916 sets[old].discard(r)
916 sets[old].discard(r)
917
917
918 if new_roots:
918 if new_roots:
919 assert changed_revs
919 assert changed_revs
920
920
921 final_roots = new_roots | currentroots - replaced_roots
921 final_roots = new_roots | currentroots - replaced_roots
922 self._updateroots(
922 self._updateroots(
923 repo,
923 repo,
924 targetphase,
924 targetphase,
925 final_roots,
925 final_roots,
926 tr,
926 tr,
927 invalidate=False,
927 invalidate=False,
928 )
928 )
929 if targetphase > 1:
929 if targetphase > 1:
930 retracted = set(changed_revs)
930 retracted = set(changed_revs)
931 for lower_phase in range(1, targetphase):
931 for lower_phase in range(1, targetphase):
932 lower_roots = self._phaseroots.get(lower_phase)
932 lower_roots = self._phaseroots.get(lower_phase)
933 if lower_roots is None:
933 if lower_roots is None:
934 continue
934 continue
935 if lower_roots & retracted:
935 if lower_roots & retracted:
936 simpler_roots = lower_roots - retracted
936 simpler_roots = lower_roots - retracted
937 self._updateroots(
937 self._updateroots(
938 repo,
938 repo,
939 lower_phase,
939 lower_phase,
940 simpler_roots,
940 simpler_roots,
941 tr,
941 tr,
942 invalidate=False,
942 invalidate=False,
943 )
943 )
944 return changed_revs
944 return changed_revs
945 else:
945 else:
946 assert not changed_revs
946 assert not changed_revs
947 assert not replaced_roots
947 assert not replaced_roots
948 return {}
948 return {}
949
949
950 def register_strip(
950 def register_strip(
951 self,
951 self,
952 repo,
952 repo,
953 tr,
953 tr,
954 strip_rev: int,
954 strip_rev: int,
955 ):
955 ):
956 """announce a strip to the phase cache
956 """announce a strip to the phase cache
957
957
958 Any roots higher than the stripped revision should be dropped.
958 Any roots higher than the stripped revision should be dropped.
959 """
959 """
960 for targetphase, roots in list(self._phaseroots.items()):
960 for targetphase, roots in list(self._phaseroots.items()):
961 filtered = {r for r in roots if r >= strip_rev}
961 filtered = {r for r in roots if r >= strip_rev}
962 if filtered:
962 if filtered:
963 self._updateroots(repo, targetphase, roots - filtered, tr)
963 self._updateroots(repo, targetphase, roots - filtered, tr)
964 self.invalidate()
964 self.invalidate()
965
965
966
966
967 def advanceboundary(repo, tr, targetphase, nodes, revs=None, dryrun=None):
967 def advanceboundary(repo, tr, targetphase, nodes, revs=None, dryrun=None):
968 """Add nodes to a phase changing other nodes phases if necessary.
968 """Add nodes to a phase changing other nodes phases if necessary.
969
969
970 This function move boundary *forward* this means that all nodes
970 This function move boundary *forward* this means that all nodes
971 are set in the target phase or kept in a *lower* phase.
971 are set in the target phase or kept in a *lower* phase.
972
972
973 Simplify boundary to contains phase roots only.
973 Simplify boundary to contains phase roots only.
974
974
975 If dryrun is True, no actions will be performed
975 If dryrun is True, no actions will be performed
976
976
977 Returns a set of revs whose phase is changed or should be changed
977 Returns a set of revs whose phase is changed or should be changed
978 """
978 """
979 if revs is None:
979 if revs is None:
980 revs = []
980 revs = []
981 phcache = repo._phasecache.copy()
981 phcache = repo._phasecache.copy()
982 changes = phcache.advanceboundary(
982 changes = phcache.advanceboundary(
983 repo, tr, targetphase, nodes, revs=revs, dryrun=dryrun
983 repo, tr, targetphase, nodes, revs=revs, dryrun=dryrun
984 )
984 )
985 if not dryrun:
985 if not dryrun:
986 repo._phasecache.replace(phcache)
986 repo._phasecache.replace(phcache)
987 return changes
987 return changes
988
988
989
989
990 def retractboundary(repo, tr, targetphase, nodes):
990 def retractboundary(repo, tr, targetphase, nodes):
991 """Set nodes back to a phase changing other nodes phases if
991 """Set nodes back to a phase changing other nodes phases if
992 necessary.
992 necessary.
993
993
994 This function move boundary *backward* this means that all nodes
994 This function move boundary *backward* this means that all nodes
995 are set in the target phase or kept in a *higher* phase.
995 are set in the target phase or kept in a *higher* phase.
996
996
997 Simplify boundary to contains phase roots only."""
997 Simplify boundary to contains phase roots only."""
998 phcache = repo._phasecache.copy()
998 phcache = repo._phasecache.copy()
999 phcache.retractboundary(repo, tr, targetphase, nodes)
999 phcache.retractboundary(repo, tr, targetphase, nodes)
1000 repo._phasecache.replace(phcache)
1000 repo._phasecache.replace(phcache)
1001
1001
1002
1002
1003 def registernew(repo, tr, targetphase, revs):
1003 def registernew(repo, tr, targetphase, revs):
1004 """register a new revision and its phase
1004 """register a new revision and its phase
1005
1005
1006 Code adding revisions to the repository should use this function to
1006 Code adding revisions to the repository should use this function to
1007 set new changeset in their target phase (or higher).
1007 set new changeset in their target phase (or higher).
1008 """
1008 """
1009 phcache = repo._phasecache.copy()
1009 phcache = repo._phasecache.copy()
1010 phcache.registernew(repo, tr, targetphase, revs)
1010 phcache.registernew(repo, tr, targetphase, revs)
1011 repo._phasecache.replace(phcache)
1011 repo._phasecache.replace(phcache)
1012
1012
1013
1013
1014 def listphases(repo: "localrepo.localrepository") -> Dict[bytes, bytes]:
1014 def listphases(repo: "localrepo.localrepository") -> Dict[bytes, bytes]:
1015 """List phases root for serialization over pushkey"""
1015 """List phases root for serialization over pushkey"""
1016 # Use ordered dictionary so behavior is deterministic.
1016 # Use ordered dictionary so behavior is deterministic.
1017 keys = util.sortdict()
1017 keys = util.sortdict()
1018 value = b'%i' % draft
1018 value = b'%i' % draft
1019 cl = repo.unfiltered().changelog
1019 cl = repo.unfiltered().changelog
1020 to_node = cl.node
1020 to_node = cl.node
1021 for root in repo._phasecache._phaseroots[draft]:
1021 for root in repo._phasecache._phaseroots[draft]:
1022 if repo._phasecache.phase(repo, root) <= draft:
1022 if repo._phasecache.phase(repo, root) <= draft:
1023 keys[hex(to_node(root))] = value
1023 keys[hex(to_node(root))] = value
1024
1024
1025 if repo.publishing():
1025 if repo.publishing():
1026 # Add an extra data to let remote know we are a publishing
1026 # Add an extra data to let remote know we are a publishing
1027 # repo. Publishing repo can't just pretend they are old repo.
1027 # repo. Publishing repo can't just pretend they are old repo.
1028 # When pushing to a publishing repo, the client still need to
1028 # When pushing to a publishing repo, the client still need to
1029 # push phase boundary
1029 # push phase boundary
1030 #
1030 #
1031 # Push do not only push changeset. It also push phase data.
1031 # Push do not only push changeset. It also push phase data.
1032 # New phase data may apply to common changeset which won't be
1032 # New phase data may apply to common changeset which won't be
1033 # push (as they are common). Here is a very simple example:
1033 # push (as they are common). Here is a very simple example:
1034 #
1034 #
1035 # 1) repo A push changeset X as draft to repo B
1035 # 1) repo A push changeset X as draft to repo B
1036 # 2) repo B make changeset X public
1036 # 2) repo B make changeset X public
1037 # 3) repo B push to repo A. X is not pushed but the data that
1037 # 3) repo B push to repo A. X is not pushed but the data that
1038 # X as now public should
1038 # X as now public should
1039 #
1039 #
1040 # The server can't handle it on it's own as it has no idea of
1040 # The server can't handle it on it's own as it has no idea of
1041 # client phase data.
1041 # client phase data.
1042 keys[b'publishing'] = b'True'
1042 keys[b'publishing'] = b'True'
1043 return keys
1043 return keys
1044
1044
1045
1045
1046 def pushphase(
1046 def pushphase(
1047 repo: "localrepo.localrepository",
1047 repo: "localrepo.localrepository",
1048 nhex: bytes,
1048 nhex: bytes,
1049 oldphasestr: bytes,
1049 oldphasestr: bytes,
1050 newphasestr: bytes,
1050 newphasestr: bytes,
1051 ) -> bool:
1051 ) -> bool:
1052 """List phases root for serialization over pushkey"""
1052 """List phases root for serialization over pushkey"""
1053 repo = repo.unfiltered()
1053 repo = repo.unfiltered()
1054 with repo.lock():
1054 with repo.lock():
1055 currentphase = repo[nhex].phase()
1055 currentphase = repo[nhex].phase()
1056 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
1056 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
1057 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
1057 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
1058 if currentphase == oldphase and newphase < oldphase:
1058 if currentphase == oldphase and newphase < oldphase:
1059 with repo.transaction(b'pushkey-phase') as tr:
1059 with repo.transaction(b'pushkey-phase') as tr:
1060 advanceboundary(repo, tr, newphase, [bin(nhex)])
1060 advanceboundary(repo, tr, newphase, [bin(nhex)])
1061 return True
1061 return True
1062 elif currentphase == newphase:
1062 elif currentphase == newphase:
1063 # raced, but got correct result
1063 # raced, but got correct result
1064 return True
1064 return True
1065 else:
1065 else:
1066 return False
1066 return False
1067
1067
1068
1068
1069 def subsetphaseheads(repo, subset):
1069 def subsetphaseheads(repo, subset):
1070 """Finds the phase heads for a subset of a history
1070 """Finds the phase heads for a subset of a history
1071
1071
1072 Returns a list indexed by phase number where each item is a list of phase
1072 Returns a list indexed by phase number where each item is a list of phase
1073 head nodes.
1073 head nodes.
1074 """
1074 """
1075 cl = repo.changelog
1075 cl = repo.changelog
1076
1076
1077 headsbyphase = {i: [] for i in allphases}
1077 headsbyphase = {i: [] for i in allphases}
1078 for phase in allphases:
1078 for phase in allphases:
1079 revset = b"heads(%%ln & _phase(%d))" % phase
1079 revset = b"heads(%%ln & _phase(%d))" % phase
1080 headsbyphase[phase] = [cl.node(r) for r in repo.revs(revset, subset)]
1080 headsbyphase[phase] = [cl.node(r) for r in repo.revs(revset, subset)]
1081 return headsbyphase
1081 return headsbyphase
1082
1082
1083
1083
1084 def updatephases(repo, trgetter, headsbyphase):
1084 def updatephases(repo, trgetter, headsbyphase):
1085 """Updates the repo with the given phase heads"""
1085 """Updates the repo with the given phase heads"""
1086 # Now advance phase boundaries of all phases
1086 # Now advance phase boundaries of all phases
1087 #
1087 #
1088 # run the update (and fetch transaction) only if there are actually things
1088 # run the update (and fetch transaction) only if there are actually things
1089 # to update. This avoid creating empty transaction during no-op operation.
1089 # to update. This avoid creating empty transaction during no-op operation.
1090
1090
1091 for phase in allphases:
1091 for phase in allphases:
1092 revset = b'%ln - _phase(%s)'
1092 revset = b'%ln - _phase(%s)'
1093 heads = [c.node() for c in repo.set(revset, headsbyphase[phase], phase)]
1093 heads = [c.node() for c in repo.set(revset, headsbyphase[phase], phase)]
1094 if heads:
1094 if heads:
1095 advanceboundary(repo, trgetter(), phase, heads)
1095 advanceboundary(repo, trgetter(), phase, heads)
1096
1096
1097
1097
1098 def analyzeremotephases(repo, subset, roots):
1098 def analyzeremotephases(repo, subset, roots):
1099 """Compute phases heads and root in a subset of node from root dict
1099 """Compute phases heads and root in a subset of node from root dict
1100
1100
1101 * subset is heads of the subset
1101 * subset is heads of the subset
1102 * roots is {<nodeid> => phase} mapping. key and value are string.
1102 * roots is {<nodeid> => phase} mapping. key and value are string.
1103
1103
1104 Accept unknown element input
1104 Accept unknown element input
1105 """
1105 """
1106 repo = repo.unfiltered()
1106 repo = repo.unfiltered()
1107 # build list from dictionary
1107 # build list from dictionary
1108 draft_roots = []
1108 draft_roots = []
1109 to_rev = repo.changelog.index.get_rev
1109 to_rev = repo.changelog.index.get_rev
1110 to_node = repo.changelog.node
1110 to_node = repo.changelog.node
1111 for nhex, phase in roots.items():
1111 for nhex, phase in roots.items():
1112 if nhex == b'publishing': # ignore data related to publish option
1112 if nhex == b'publishing': # ignore data related to publish option
1113 continue
1113 continue
1114 node = bin(nhex)
1114 node = bin(nhex)
1115 phase = int(phase)
1115 phase = int(phase)
1116 if phase == public:
1116 if phase == public:
1117 if node != repo.nullid:
1117 if node != repo.nullid:
1118 msg = _(b'ignoring inconsistent public root from remote: %s\n')
1118 msg = _(b'ignoring inconsistent public root from remote: %s\n')
1119 repo.ui.warn(msg % nhex)
1119 repo.ui.warn(msg % nhex)
1120 elif phase == draft:
1120 elif phase == draft:
1121 rev = to_rev(node)
1121 rev = to_rev(node)
1122 if rev is not None: # to filter unknown nodes
1122 if rev is not None: # to filter unknown nodes
1123 draft_roots.append(rev)
1123 draft_roots.append(rev)
1124 else:
1124 else:
1125 msg = _(b'ignoring unexpected root from remote: %i %s\n')
1125 msg = _(b'ignoring unexpected root from remote: %i %s\n')
1126 repo.ui.warn(msg % (phase, nhex))
1126 repo.ui.warn(msg % (phase, nhex))
1127 # compute heads
1127 # compute heads
1128 subset_revs = [to_rev(n) for n in subset]
1129 public_heads = new_heads(repo, subset_revs, draft_roots)
1128 draft_nodes = [to_node(r) for r in draft_roots]
1130 draft_nodes = [to_node(r) for r in draft_roots]
1129 publicheads = newheads(repo, subset, draft_nodes)
1131 public_nodes = [to_node(r) for r in public_heads]
1130 return publicheads, draft_nodes
1132 return public_nodes, draft_nodes
1131
1133
1132
1134
1133 class remotephasessummary:
1135 class remotephasessummary:
1134 """summarize phase information on the remote side
1136 """summarize phase information on the remote side
1135
1137
1136 :publishing: True is the remote is publishing
1138 :publishing: True is the remote is publishing
1137 :publicheads: list of remote public phase heads (nodes)
1139 :publicheads: list of remote public phase heads (nodes)
1138 :draftheads: list of remote draft phase heads (nodes)
1140 :draftheads: list of remote draft phase heads (nodes)
1139 :draftroots: list of remote draft phase root (nodes)
1141 :draftroots: list of remote draft phase root (nodes)
1140 """
1142 """
1141
1143
1142 def __init__(self, repo, remotesubset, remoteroots):
1144 def __init__(self, repo, remotesubset, remoteroots):
1143 unfi = repo.unfiltered()
1145 unfi = repo.unfiltered()
1144 self._allremoteroots = remoteroots
1146 self._allremoteroots = remoteroots
1145
1147
1146 self.publishing = remoteroots.get(b'publishing', False)
1148 self.publishing = remoteroots.get(b'publishing', False)
1147
1149
1148 ana = analyzeremotephases(repo, remotesubset, remoteroots)
1150 ana = analyzeremotephases(repo, remotesubset, remoteroots)
1149 self.publicheads, self.draftroots = ana
1151 self.publicheads, self.draftroots = ana
1150 # Get the list of all "heads" revs draft on remote
1152 # Get the list of all "heads" revs draft on remote
1151 dheads = unfi.set(b'heads(%ln::%ln)', self.draftroots, remotesubset)
1153 dheads = unfi.set(b'heads(%ln::%ln)', self.draftroots, remotesubset)
1152 self.draftheads = [c.node() for c in dheads]
1154 self.draftheads = [c.node() for c in dheads]
1153
1155
1154
1156
1155 def newheads(repo, heads, roots):
1157 def new_heads(
1158 repo,
1159 heads: Collection[int],
1160 roots: Collection[int],
1161 ) -> Collection[int]:
1156 """compute new head of a subset minus another
1162 """compute new head of a subset minus another
1157
1163
1158 * `heads`: define the first subset
1164 * `heads`: define the first subset
1159 * `roots`: define the second we subtract from the first"""
1165 * `roots`: define the second we subtract from the first"""
1160 # prevent an import cycle
1166 # prevent an import cycle
1161 # phases > dagop > patch > copies > scmutil > obsolete > obsutil > phases
1167 # phases > dagop > patch > copies > scmutil > obsolete > obsutil > phases
1162 from . import dagop
1168 from . import dagop
1163
1169
1164 repo = repo.unfiltered()
1165 cl = repo.changelog
1166 rev = cl.index.get_rev
1167 if not roots:
1170 if not roots:
1168 return heads
1171 return heads
1169 if not heads or heads == [repo.nullid]:
1172 if not heads or heads == [nullrev]:
1170 return []
1173 return []
1171 # The logic operated on revisions, convert arguments early for convenience
1174 # The logic operated on revisions, convert arguments early for convenience
1172 new_heads = {rev(n) for n in heads if n != repo.nullid}
1175 # PERF-XXX: maybe heads could directly comes as a set without impacting
1173 roots = [rev(n) for n in roots]
1176 # other user of that value
1177 new_heads = set(heads)
1178 new_heads.discard(nullrev)
1174 # compute the area we need to remove
1179 # compute the area we need to remove
1175 affected_zone = repo.revs(b"(%ld::%ld)", roots, new_heads)
1180 affected_zone = repo.revs(b"(%ld::%ld)", roots, new_heads)
1176 # heads in the area are no longer heads
1181 # heads in the area are no longer heads
1177 new_heads.difference_update(affected_zone)
1182 new_heads.difference_update(affected_zone)
1178 # revisions in the area have children outside of it,
1183 # revisions in the area have children outside of it,
1179 # They might be new heads
1184 # They might be new heads
1180 candidates = repo.revs(
1185 candidates = repo.revs(
1181 b"parents(%ld + (%ld and merge())) and not null", roots, affected_zone
1186 b"parents(%ld + (%ld and merge())) and not null", roots, affected_zone
1182 )
1187 )
1183 candidates -= affected_zone
1188 candidates -= affected_zone
1184 if new_heads or candidates:
1189 if new_heads or candidates:
1185 # remove candidate that are ancestors of other heads
1190 # remove candidate that are ancestors of other heads
1186 new_heads.update(candidates)
1191 new_heads.update(candidates)
1187 prunestart = repo.revs(b"parents(%ld) and not null", new_heads)
1192 prunestart = repo.revs(b"parents(%ld) and not null", new_heads)
1188 pruned = dagop.reachableroots(repo, candidates, prunestart)
1193 pruned = dagop.reachableroots(repo, candidates, prunestart)
1189 new_heads.difference_update(pruned)
1194 new_heads.difference_update(pruned)
1190
1195
1191 return pycompat.maplist(cl.node, sorted(new_heads))
1196 # PERF-XXX: do we actually need a sorted list here? Could we simply return
1197 # a set?
1198 return sorted(new_heads)
1192
1199
1193
1200
1194 def newcommitphase(ui: "uimod.ui") -> int:
1201 def newcommitphase(ui: "uimod.ui") -> int:
1195 """helper to get the target phase of new commit
1202 """helper to get the target phase of new commit
1196
1203
1197 Handle all possible values for the phases.new-commit options.
1204 Handle all possible values for the phases.new-commit options.
1198
1205
1199 """
1206 """
1200 v = ui.config(b'phases', b'new-commit')
1207 v = ui.config(b'phases', b'new-commit')
1201 try:
1208 try:
1202 return phasenumber2[v]
1209 return phasenumber2[v]
1203 except KeyError:
1210 except KeyError:
1204 raise error.ConfigError(
1211 raise error.ConfigError(
1205 _(b"phases.new-commit: not a valid phase name ('%s')") % v
1212 _(b"phases.new-commit: not a valid phase name ('%s')") % v
1206 )
1213 )
1207
1214
1208
1215
1209 def hassecret(repo: "localrepo.localrepository") -> bool:
1216 def hassecret(repo: "localrepo.localrepository") -> bool:
1210 """utility function that check if a repo have any secret changeset."""
1217 """utility function that check if a repo have any secret changeset."""
1211 return bool(repo._phasecache._phaseroots[secret])
1218 return bool(repo._phasecache._phaseroots[secret])
1212
1219
1213
1220
1214 def preparehookargs(
1221 def preparehookargs(
1215 node: bytes,
1222 node: bytes,
1216 old: Optional[int],
1223 old: Optional[int],
1217 new: Optional[int],
1224 new: Optional[int],
1218 ) -> Dict[bytes, bytes]:
1225 ) -> Dict[bytes, bytes]:
1219 if old is None:
1226 if old is None:
1220 old = b''
1227 old = b''
1221 else:
1228 else:
1222 old = phasenames[old]
1229 old = phasenames[old]
1223 return {b'node': node, b'oldphase': old, b'phase': phasenames[new]}
1230 return {b'node': node, b'oldphase': old, b'phase': phasenames[new]}
General Comments 0
You need to be logged in to leave comments. Login now