##// END OF EJS Templates
phases: check secret presence the right way during discovery...
marmoute -
r52295:2e10ddbb default
parent child Browse files
Show More
@@ -1,619 +1,619 b''
1 # discovery.py - protocol changeset discovery functions
1 # discovery.py - protocol changeset discovery functions
2 #
2 #
3 # Copyright 2010 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2010 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import functools
9 import functools
10
10
11 from .i18n import _
11 from .i18n import _
12 from .node import (
12 from .node import (
13 hex,
13 hex,
14 short,
14 short,
15 )
15 )
16
16
17 from . import (
17 from . import (
18 bookmarks,
18 bookmarks,
19 branchmap,
19 branchmap,
20 error,
20 error,
21 obsolete,
21 obsolete,
22 phases,
22 phases,
23 pycompat,
23 pycompat,
24 scmutil,
24 scmutil,
25 setdiscovery,
25 setdiscovery,
26 treediscovery,
26 treediscovery,
27 util,
27 util,
28 )
28 )
29
29
30
30
31 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
31 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
32 """Return a tuple (common, anyincoming, heads) used to identify the common
32 """Return a tuple (common, anyincoming, heads) used to identify the common
33 subset of nodes between repo and remote.
33 subset of nodes between repo and remote.
34
34
35 "common" is a list of (at least) the heads of the common subset.
35 "common" is a list of (at least) the heads of the common subset.
36 "anyincoming" is testable as a boolean indicating if any nodes are missing
36 "anyincoming" is testable as a boolean indicating if any nodes are missing
37 locally. If remote does not support getbundle, this actually is a list of
37 locally. If remote does not support getbundle, this actually is a list of
38 roots of the nodes that would be incoming, to be supplied to
38 roots of the nodes that would be incoming, to be supplied to
39 changegroupsubset. No code except for pull should be relying on this fact
39 changegroupsubset. No code except for pull should be relying on this fact
40 any longer.
40 any longer.
41 "heads" is either the supplied heads, or else the remote's heads.
41 "heads" is either the supplied heads, or else the remote's heads.
42 "ancestorsof" if not None, restrict the discovery to a subset defined by
42 "ancestorsof" if not None, restrict the discovery to a subset defined by
43 these nodes. Changeset outside of this set won't be considered (but may
43 these nodes. Changeset outside of this set won't be considered (but may
44 still appear in "common").
44 still appear in "common").
45
45
46 If you pass heads and they are all known locally, the response lists just
46 If you pass heads and they are all known locally, the response lists just
47 these heads in "common" and in "heads".
47 these heads in "common" and in "heads".
48
48
49 Please use findcommonoutgoing to compute the set of outgoing nodes to give
49 Please use findcommonoutgoing to compute the set of outgoing nodes to give
50 extensions a good hook into outgoing.
50 extensions a good hook into outgoing.
51 """
51 """
52
52
53 if not remote.capable(b'getbundle'):
53 if not remote.capable(b'getbundle'):
54 return treediscovery.findcommonincoming(repo, remote, heads, force)
54 return treediscovery.findcommonincoming(repo, remote, heads, force)
55
55
56 if heads:
56 if heads:
57 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
57 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
58 if all(knownnode(h) for h in heads):
58 if all(knownnode(h) for h in heads):
59 return (heads, False, heads)
59 return (heads, False, heads)
60
60
61 res = setdiscovery.findcommonheads(
61 res = setdiscovery.findcommonheads(
62 repo.ui,
62 repo.ui,
63 repo,
63 repo,
64 remote,
64 remote,
65 abortwhenunrelated=not force,
65 abortwhenunrelated=not force,
66 ancestorsof=ancestorsof,
66 ancestorsof=ancestorsof,
67 )
67 )
68 common, anyinc, srvheads = res
68 common, anyinc, srvheads = res
69 if heads and not anyinc:
69 if heads and not anyinc:
70 # server could be lying on the advertised heads
70 # server could be lying on the advertised heads
71 has_node = repo.changelog.hasnode
71 has_node = repo.changelog.hasnode
72 anyinc = any(not has_node(n) for n in heads)
72 anyinc = any(not has_node(n) for n in heads)
73 return (list(common), anyinc, heads or list(srvheads))
73 return (list(common), anyinc, heads or list(srvheads))
74
74
75
75
76 class outgoing:
76 class outgoing:
77 """Represents the result of a findcommonoutgoing() call.
77 """Represents the result of a findcommonoutgoing() call.
78
78
79 Members:
79 Members:
80
80
81 ancestorsof is a list of the nodes whose ancestors are included in the
81 ancestorsof is a list of the nodes whose ancestors are included in the
82 outgoing operation.
82 outgoing operation.
83
83
84 missing is a list of those ancestors of ancestorsof that are present in
84 missing is a list of those ancestors of ancestorsof that are present in
85 local but not in remote.
85 local but not in remote.
86
86
87 common is a set containing revs common between the local and the remote
87 common is a set containing revs common between the local and the remote
88 repository (at least all of those that are ancestors of ancestorsof).
88 repository (at least all of those that are ancestors of ancestorsof).
89
89
90 commonheads is the list of heads of common.
90 commonheads is the list of heads of common.
91
91
92 excluded is the list of missing changeset that shouldn't be sent
92 excluded is the list of missing changeset that shouldn't be sent
93 remotely.
93 remotely.
94
94
95 Some members are computed on demand from the heads, unless provided upfront
95 Some members are computed on demand from the heads, unless provided upfront
96 by discovery."""
96 by discovery."""
97
97
98 def __init__(
98 def __init__(
99 self, repo, commonheads=None, ancestorsof=None, missingroots=None
99 self, repo, commonheads=None, ancestorsof=None, missingroots=None
100 ):
100 ):
101 # at least one of them must not be set
101 # at least one of them must not be set
102 assert None in (commonheads, missingroots)
102 assert None in (commonheads, missingroots)
103 cl = repo.changelog
103 cl = repo.changelog
104 if ancestorsof is None:
104 if ancestorsof is None:
105 ancestorsof = cl.heads()
105 ancestorsof = cl.heads()
106 if missingroots:
106 if missingroots:
107 # TODO remove call to nodesbetween.
107 # TODO remove call to nodesbetween.
108 # TODO populate attributes on outgoing instance instead of setting
108 # TODO populate attributes on outgoing instance instead of setting
109 # discbases.
109 # discbases.
110 csets, roots, heads = cl.nodesbetween(missingroots, ancestorsof)
110 csets, roots, heads = cl.nodesbetween(missingroots, ancestorsof)
111 included = set(csets)
111 included = set(csets)
112 discbases = []
112 discbases = []
113 for n in csets:
113 for n in csets:
114 discbases.extend([p for p in cl.parents(n) if p != repo.nullid])
114 discbases.extend([p for p in cl.parents(n) if p != repo.nullid])
115 ancestorsof = heads
115 ancestorsof = heads
116 commonheads = [n for n in discbases if n not in included]
116 commonheads = [n for n in discbases if n not in included]
117 elif not commonheads:
117 elif not commonheads:
118 commonheads = [repo.nullid]
118 commonheads = [repo.nullid]
119 self.commonheads = commonheads
119 self.commonheads = commonheads
120 self.ancestorsof = ancestorsof
120 self.ancestorsof = ancestorsof
121 self._revlog = cl
121 self._revlog = cl
122 self._common = None
122 self._common = None
123 self._missing = None
123 self._missing = None
124 self.excluded = []
124 self.excluded = []
125
125
126 def _computecommonmissing(self):
126 def _computecommonmissing(self):
127 sets = self._revlog.findcommonmissing(
127 sets = self._revlog.findcommonmissing(
128 self.commonheads, self.ancestorsof
128 self.commonheads, self.ancestorsof
129 )
129 )
130 self._common, self._missing = sets
130 self._common, self._missing = sets
131
131
132 @util.propertycache
132 @util.propertycache
133 def common(self):
133 def common(self):
134 if self._common is None:
134 if self._common is None:
135 self._computecommonmissing()
135 self._computecommonmissing()
136 return self._common
136 return self._common
137
137
138 @util.propertycache
138 @util.propertycache
139 def missing(self):
139 def missing(self):
140 if self._missing is None:
140 if self._missing is None:
141 self._computecommonmissing()
141 self._computecommonmissing()
142 return self._missing
142 return self._missing
143
143
144
144
145 def findcommonoutgoing(
145 def findcommonoutgoing(
146 repo, other, onlyheads=None, force=False, commoninc=None, portable=False
146 repo, other, onlyheads=None, force=False, commoninc=None, portable=False
147 ):
147 ):
148 """Return an outgoing instance to identify the nodes present in repo but
148 """Return an outgoing instance to identify the nodes present in repo but
149 not in other.
149 not in other.
150
150
151 If onlyheads is given, only nodes ancestral to nodes in onlyheads
151 If onlyheads is given, only nodes ancestral to nodes in onlyheads
152 (inclusive) are included. If you already know the local repo's heads,
152 (inclusive) are included. If you already know the local repo's heads,
153 passing them in onlyheads is faster than letting them be recomputed here.
153 passing them in onlyheads is faster than letting them be recomputed here.
154
154
155 If commoninc is given, it must be the result of a prior call to
155 If commoninc is given, it must be the result of a prior call to
156 findcommonincoming(repo, other, force) to avoid recomputing it here.
156 findcommonincoming(repo, other, force) to avoid recomputing it here.
157
157
158 If portable is given, compute more conservative common and ancestorsof,
158 If portable is given, compute more conservative common and ancestorsof,
159 to make bundles created from the instance more portable."""
159 to make bundles created from the instance more portable."""
160 # declare an empty outgoing object to be filled later
160 # declare an empty outgoing object to be filled later
161 og = outgoing(repo, None, None)
161 og = outgoing(repo, None, None)
162
162
163 # get common set if not provided
163 # get common set if not provided
164 if commoninc is None:
164 if commoninc is None:
165 commoninc = findcommonincoming(
165 commoninc = findcommonincoming(
166 repo, other, force=force, ancestorsof=onlyheads
166 repo, other, force=force, ancestorsof=onlyheads
167 )
167 )
168 og.commonheads, _any, _hds = commoninc
168 og.commonheads, _any, _hds = commoninc
169
169
170 # compute outgoing
170 # compute outgoing
171 mayexclude = repo._phasecache.phaseroots[phases.secret] or repo.obsstore
171 mayexclude = phases.hassecret(repo) or repo.obsstore
172 if not mayexclude:
172 if not mayexclude:
173 og.ancestorsof = onlyheads or repo.heads()
173 og.ancestorsof = onlyheads or repo.heads()
174 elif onlyheads is None:
174 elif onlyheads is None:
175 # use visible heads as it should be cached
175 # use visible heads as it should be cached
176 og.ancestorsof = repo.filtered(b"served").heads()
176 og.ancestorsof = repo.filtered(b"served").heads()
177 og.excluded = [ctx.node() for ctx in repo.set(b'secret() or extinct()')]
177 og.excluded = [ctx.node() for ctx in repo.set(b'secret() or extinct()')]
178 else:
178 else:
179 # compute common, missing and exclude secret stuff
179 # compute common, missing and exclude secret stuff
180 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
180 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
181 og._common, allmissing = sets
181 og._common, allmissing = sets
182 og._missing = missing = []
182 og._missing = missing = []
183 og.excluded = excluded = []
183 og.excluded = excluded = []
184 for node in allmissing:
184 for node in allmissing:
185 ctx = repo[node]
185 ctx = repo[node]
186 if ctx.phase() >= phases.secret or ctx.extinct():
186 if ctx.phase() >= phases.secret or ctx.extinct():
187 excluded.append(node)
187 excluded.append(node)
188 else:
188 else:
189 missing.append(node)
189 missing.append(node)
190 if len(missing) == len(allmissing):
190 if len(missing) == len(allmissing):
191 ancestorsof = onlyheads
191 ancestorsof = onlyheads
192 else: # update missing heads
192 else: # update missing heads
193 ancestorsof = phases.newheads(repo, onlyheads, excluded)
193 ancestorsof = phases.newheads(repo, onlyheads, excluded)
194 og.ancestorsof = ancestorsof
194 og.ancestorsof = ancestorsof
195 if portable:
195 if portable:
196 # recompute common and ancestorsof as if -r<rev> had been given for
196 # recompute common and ancestorsof as if -r<rev> had been given for
197 # each head of missing, and --base <rev> for each head of the proper
197 # each head of missing, and --base <rev> for each head of the proper
198 # ancestors of missing
198 # ancestors of missing
199 og._computecommonmissing()
199 og._computecommonmissing()
200 cl = repo.changelog
200 cl = repo.changelog
201 missingrevs = {cl.rev(n) for n in og._missing}
201 missingrevs = {cl.rev(n) for n in og._missing}
202 og._common = set(cl.ancestors(missingrevs)) - missingrevs
202 og._common = set(cl.ancestors(missingrevs)) - missingrevs
203 commonheads = set(og.commonheads)
203 commonheads = set(og.commonheads)
204 og.ancestorsof = [h for h in og.ancestorsof if h not in commonheads]
204 og.ancestorsof = [h for h in og.ancestorsof if h not in commonheads]
205
205
206 return og
206 return og
207
207
208
208
209 def _headssummary(pushop):
209 def _headssummary(pushop):
210 """compute a summary of branch and heads status before and after push
210 """compute a summary of branch and heads status before and after push
211
211
212 return {'branch': ([remoteheads], [newheads],
212 return {'branch': ([remoteheads], [newheads],
213 [unsyncedheads], [discardedheads])} mapping
213 [unsyncedheads], [discardedheads])} mapping
214
214
215 - branch: the branch name,
215 - branch: the branch name,
216 - remoteheads: the list of remote heads known locally
216 - remoteheads: the list of remote heads known locally
217 None if the branch is new,
217 None if the branch is new,
218 - newheads: the new remote heads (known locally) with outgoing pushed,
218 - newheads: the new remote heads (known locally) with outgoing pushed,
219 - unsyncedheads: the list of remote heads unknown locally,
219 - unsyncedheads: the list of remote heads unknown locally,
220 - discardedheads: the list of heads made obsolete by the push.
220 - discardedheads: the list of heads made obsolete by the push.
221 """
221 """
222 repo = pushop.repo.unfiltered()
222 repo = pushop.repo.unfiltered()
223 remote = pushop.remote
223 remote = pushop.remote
224 outgoing = pushop.outgoing
224 outgoing = pushop.outgoing
225 cl = repo.changelog
225 cl = repo.changelog
226 headssum = {}
226 headssum = {}
227 missingctx = set()
227 missingctx = set()
228 # A. Create set of branches involved in the push.
228 # A. Create set of branches involved in the push.
229 branches = set()
229 branches = set()
230 for n in outgoing.missing:
230 for n in outgoing.missing:
231 ctx = repo[n]
231 ctx = repo[n]
232 missingctx.add(ctx)
232 missingctx.add(ctx)
233 branches.add(ctx.branch())
233 branches.add(ctx.branch())
234
234
235 with remote.commandexecutor() as e:
235 with remote.commandexecutor() as e:
236 remotemap = e.callcommand(b'branchmap', {}).result()
236 remotemap = e.callcommand(b'branchmap', {}).result()
237
237
238 knownnode = cl.hasnode # do not use nodemap until it is filtered
238 knownnode = cl.hasnode # do not use nodemap until it is filtered
239 # A. register remote heads of branches which are in outgoing set
239 # A. register remote heads of branches which are in outgoing set
240 for branch, heads in remotemap.items():
240 for branch, heads in remotemap.items():
241 # don't add head info about branches which we don't have locally
241 # don't add head info about branches which we don't have locally
242 if branch not in branches:
242 if branch not in branches:
243 continue
243 continue
244 known = []
244 known = []
245 unsynced = []
245 unsynced = []
246 for h in heads:
246 for h in heads:
247 if knownnode(h):
247 if knownnode(h):
248 known.append(h)
248 known.append(h)
249 else:
249 else:
250 unsynced.append(h)
250 unsynced.append(h)
251 headssum[branch] = (known, list(known), unsynced)
251 headssum[branch] = (known, list(known), unsynced)
252
252
253 # B. add new branch data
253 # B. add new branch data
254 for branch in branches:
254 for branch in branches:
255 if branch not in headssum:
255 if branch not in headssum:
256 headssum[branch] = (None, [], [])
256 headssum[branch] = (None, [], [])
257
257
258 # C. Update newmap with outgoing changes.
258 # C. Update newmap with outgoing changes.
259 # This will possibly add new heads and remove existing ones.
259 # This will possibly add new heads and remove existing ones.
260 newmap = branchmap.remotebranchcache(
260 newmap = branchmap.remotebranchcache(
261 repo,
261 repo,
262 (
262 (
263 (branch, heads[1])
263 (branch, heads[1])
264 for branch, heads in headssum.items()
264 for branch, heads in headssum.items()
265 if heads[0] is not None
265 if heads[0] is not None
266 ),
266 ),
267 )
267 )
268 newmap.update(repo, (ctx.rev() for ctx in missingctx))
268 newmap.update(repo, (ctx.rev() for ctx in missingctx))
269 for branch, newheads in newmap.items():
269 for branch, newheads in newmap.items():
270 headssum[branch][1][:] = newheads
270 headssum[branch][1][:] = newheads
271 for branch, items in headssum.items():
271 for branch, items in headssum.items():
272 for l in items:
272 for l in items:
273 if l is not None:
273 if l is not None:
274 l.sort()
274 l.sort()
275 headssum[branch] = items + ([],)
275 headssum[branch] = items + ([],)
276
276
277 # If there are no obsstore, no post processing are needed.
277 # If there are no obsstore, no post processing are needed.
278 if repo.obsstore:
278 if repo.obsstore:
279 torev = repo.changelog.rev
279 torev = repo.changelog.rev
280 futureheads = {torev(h) for h in outgoing.ancestorsof}
280 futureheads = {torev(h) for h in outgoing.ancestorsof}
281 futureheads |= {torev(h) for h in outgoing.commonheads}
281 futureheads |= {torev(h) for h in outgoing.commonheads}
282 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
282 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
283 for branch, heads in sorted(pycompat.iteritems(headssum)):
283 for branch, heads in sorted(pycompat.iteritems(headssum)):
284 remoteheads, newheads, unsyncedheads, placeholder = heads
284 remoteheads, newheads, unsyncedheads, placeholder = heads
285 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
285 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
286 headssum[branch] = (
286 headssum[branch] = (
287 remoteheads,
287 remoteheads,
288 sorted(result[0]),
288 sorted(result[0]),
289 unsyncedheads,
289 unsyncedheads,
290 sorted(result[1]),
290 sorted(result[1]),
291 )
291 )
292 return headssum
292 return headssum
293
293
294
294
295 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
295 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
296 """Compute branchmapsummary for repo without branchmap support"""
296 """Compute branchmapsummary for repo without branchmap support"""
297
297
298 # 1-4b. old servers: Check for new topological heads.
298 # 1-4b. old servers: Check for new topological heads.
299 # Construct {old,new}map with branch = None (topological branch).
299 # Construct {old,new}map with branch = None (topological branch).
300 # (code based on update)
300 # (code based on update)
301 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
301 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
302 oldheads = sorted(h for h in remoteheads if knownnode(h))
302 oldheads = sorted(h for h in remoteheads if knownnode(h))
303 # all nodes in outgoing.missing are children of either:
303 # all nodes in outgoing.missing are children of either:
304 # - an element of oldheads
304 # - an element of oldheads
305 # - another element of outgoing.missing
305 # - another element of outgoing.missing
306 # - nullrev
306 # - nullrev
307 # This explains why the new head are very simple to compute.
307 # This explains why the new head are very simple to compute.
308 r = repo.set(b'heads(%ln + %ln)', oldheads, outgoing.missing)
308 r = repo.set(b'heads(%ln + %ln)', oldheads, outgoing.missing)
309 newheads = sorted(c.node() for c in r)
309 newheads = sorted(c.node() for c in r)
310 # set some unsynced head to issue the "unsynced changes" warning
310 # set some unsynced head to issue the "unsynced changes" warning
311 if inc:
311 if inc:
312 unsynced = [None]
312 unsynced = [None]
313 else:
313 else:
314 unsynced = []
314 unsynced = []
315 return {None: (oldheads, newheads, unsynced, [])}
315 return {None: (oldheads, newheads, unsynced, [])}
316
316
317
317
318 def _nowarnheads(pushop):
318 def _nowarnheads(pushop):
319 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
319 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
320 repo = pushop.repo.unfiltered()
320 repo = pushop.repo.unfiltered()
321 remote = pushop.remote
321 remote = pushop.remote
322 localbookmarks = repo._bookmarks
322 localbookmarks = repo._bookmarks
323
323
324 with remote.commandexecutor() as e:
324 with remote.commandexecutor() as e:
325 remotebookmarks = e.callcommand(
325 remotebookmarks = e.callcommand(
326 b'listkeys',
326 b'listkeys',
327 {
327 {
328 b'namespace': b'bookmarks',
328 b'namespace': b'bookmarks',
329 },
329 },
330 ).result()
330 ).result()
331
331
332 bookmarkedheads = set()
332 bookmarkedheads = set()
333
333
334 # internal config: bookmarks.pushing
334 # internal config: bookmarks.pushing
335 newbookmarks = [
335 newbookmarks = [
336 localbookmarks.expandname(b)
336 localbookmarks.expandname(b)
337 for b in pushop.ui.configlist(b'bookmarks', b'pushing')
337 for b in pushop.ui.configlist(b'bookmarks', b'pushing')
338 ]
338 ]
339
339
340 for bm in localbookmarks:
340 for bm in localbookmarks:
341 rnode = remotebookmarks.get(bm)
341 rnode = remotebookmarks.get(bm)
342 if rnode and rnode in repo:
342 if rnode and rnode in repo:
343 lctx, rctx = repo[localbookmarks[bm]], repo[rnode]
343 lctx, rctx = repo[localbookmarks[bm]], repo[rnode]
344 if bookmarks.validdest(repo, rctx, lctx):
344 if bookmarks.validdest(repo, rctx, lctx):
345 bookmarkedheads.add(lctx.node())
345 bookmarkedheads.add(lctx.node())
346 else:
346 else:
347 if bm in newbookmarks and bm not in remotebookmarks:
347 if bm in newbookmarks and bm not in remotebookmarks:
348 bookmarkedheads.add(localbookmarks[bm])
348 bookmarkedheads.add(localbookmarks[bm])
349
349
350 return bookmarkedheads
350 return bookmarkedheads
351
351
352
352
353 def checkheads(pushop):
353 def checkheads(pushop):
354 """Check that a push won't add any outgoing head
354 """Check that a push won't add any outgoing head
355
355
356 raise StateError error and display ui message as needed.
356 raise StateError error and display ui message as needed.
357 """
357 """
358
358
359 repo = pushop.repo.unfiltered()
359 repo = pushop.repo.unfiltered()
360 remote = pushop.remote
360 remote = pushop.remote
361 outgoing = pushop.outgoing
361 outgoing = pushop.outgoing
362 remoteheads = pushop.remoteheads
362 remoteheads = pushop.remoteheads
363 newbranch = pushop.newbranch
363 newbranch = pushop.newbranch
364 inc = bool(pushop.incoming)
364 inc = bool(pushop.incoming)
365
365
366 # Check for each named branch if we're creating new remote heads.
366 # Check for each named branch if we're creating new remote heads.
367 # To be a remote head after push, node must be either:
367 # To be a remote head after push, node must be either:
368 # - unknown locally
368 # - unknown locally
369 # - a local outgoing head descended from update
369 # - a local outgoing head descended from update
370 # - a remote head that's known locally and not
370 # - a remote head that's known locally and not
371 # ancestral to an outgoing head
371 # ancestral to an outgoing head
372 if remoteheads == [repo.nullid]:
372 if remoteheads == [repo.nullid]:
373 # remote is empty, nothing to check.
373 # remote is empty, nothing to check.
374 return
374 return
375
375
376 if remote.capable(b'branchmap'):
376 if remote.capable(b'branchmap'):
377 headssum = _headssummary(pushop)
377 headssum = _headssummary(pushop)
378 else:
378 else:
379 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
379 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
380 pushop.pushbranchmap = headssum
380 pushop.pushbranchmap = headssum
381 newbranches = [
381 newbranches = [
382 branch for branch, heads in headssum.items() if heads[0] is None
382 branch for branch, heads in headssum.items() if heads[0] is None
383 ]
383 ]
384 # 1. Check for new branches on the remote.
384 # 1. Check for new branches on the remote.
385 if newbranches and not newbranch: # new branch requires --new-branch
385 if newbranches and not newbranch: # new branch requires --new-branch
386 branchnames = b', '.join(sorted(newbranches))
386 branchnames = b', '.join(sorted(newbranches))
387 # Calculate how many of the new branches are closed branches
387 # Calculate how many of the new branches are closed branches
388 closedbranches = set()
388 closedbranches = set()
389 for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
389 for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
390 if isclosed:
390 if isclosed:
391 closedbranches.add(tag)
391 closedbranches.add(tag)
392 closedbranches = closedbranches & set(newbranches)
392 closedbranches = closedbranches & set(newbranches)
393 if closedbranches:
393 if closedbranches:
394 errmsg = _(b"push creates new remote branches: %s (%d closed)") % (
394 errmsg = _(b"push creates new remote branches: %s (%d closed)") % (
395 branchnames,
395 branchnames,
396 len(closedbranches),
396 len(closedbranches),
397 )
397 )
398 else:
398 else:
399 errmsg = _(b"push creates new remote branches: %s") % branchnames
399 errmsg = _(b"push creates new remote branches: %s") % branchnames
400 hint = _(b"use 'hg push --new-branch' to create new remote branches")
400 hint = _(b"use 'hg push --new-branch' to create new remote branches")
401 raise error.StateError(errmsg, hint=hint)
401 raise error.StateError(errmsg, hint=hint)
402
402
403 # 2. Find heads that we need not warn about
403 # 2. Find heads that we need not warn about
404 nowarnheads = _nowarnheads(pushop)
404 nowarnheads = _nowarnheads(pushop)
405
405
406 # 3. Check for new heads.
406 # 3. Check for new heads.
407 # If there are more heads after the push than before, a suitable
407 # If there are more heads after the push than before, a suitable
408 # error message, depending on unsynced status, is displayed.
408 # error message, depending on unsynced status, is displayed.
409 errormsg = None
409 errormsg = None
410 for branch, heads in sorted(pycompat.iteritems(headssum)):
410 for branch, heads in sorted(pycompat.iteritems(headssum)):
411 remoteheads, newheads, unsyncedheads, discardedheads = heads
411 remoteheads, newheads, unsyncedheads, discardedheads = heads
412 # add unsynced data
412 # add unsynced data
413 if remoteheads is None:
413 if remoteheads is None:
414 oldhs = set()
414 oldhs = set()
415 else:
415 else:
416 oldhs = set(remoteheads)
416 oldhs = set(remoteheads)
417 oldhs.update(unsyncedheads)
417 oldhs.update(unsyncedheads)
418 dhs = None # delta heads, the new heads on branch
418 dhs = None # delta heads, the new heads on branch
419 newhs = set(newheads)
419 newhs = set(newheads)
420 newhs.update(unsyncedheads)
420 newhs.update(unsyncedheads)
421 if unsyncedheads:
421 if unsyncedheads:
422 if None in unsyncedheads:
422 if None in unsyncedheads:
423 # old remote, no heads data
423 # old remote, no heads data
424 heads = None
424 heads = None
425 else:
425 else:
426 heads = scmutil.nodesummaries(repo, unsyncedheads)
426 heads = scmutil.nodesummaries(repo, unsyncedheads)
427 if heads is None:
427 if heads is None:
428 repo.ui.status(
428 repo.ui.status(
429 _(b"remote has heads that are not known locally\n")
429 _(b"remote has heads that are not known locally\n")
430 )
430 )
431 elif branch is None:
431 elif branch is None:
432 repo.ui.status(
432 repo.ui.status(
433 _(b"remote has heads that are not known locally: %s\n")
433 _(b"remote has heads that are not known locally: %s\n")
434 % heads
434 % heads
435 )
435 )
436 else:
436 else:
437 repo.ui.status(
437 repo.ui.status(
438 _(
438 _(
439 b"remote has heads on branch '%s' that are "
439 b"remote has heads on branch '%s' that are "
440 b"not known locally: %s\n"
440 b"not known locally: %s\n"
441 )
441 )
442 % (branch, heads)
442 % (branch, heads)
443 )
443 )
444 if remoteheads is None:
444 if remoteheads is None:
445 if len(newhs) > 1:
445 if len(newhs) > 1:
446 dhs = list(newhs)
446 dhs = list(newhs)
447 if errormsg is None:
447 if errormsg is None:
448 errormsg = (
448 errormsg = (
449 _(b"push creates new branch '%s' with multiple heads")
449 _(b"push creates new branch '%s' with multiple heads")
450 % branch
450 % branch
451 )
451 )
452 hint = _(
452 hint = _(
453 b"merge or"
453 b"merge or"
454 b" see 'hg help push' for details about"
454 b" see 'hg help push' for details about"
455 b" pushing new heads"
455 b" pushing new heads"
456 )
456 )
457 elif len(newhs) > len(oldhs):
457 elif len(newhs) > len(oldhs):
458 # remove bookmarked or existing remote heads from the new heads list
458 # remove bookmarked or existing remote heads from the new heads list
459 dhs = sorted(newhs - nowarnheads - oldhs)
459 dhs = sorted(newhs - nowarnheads - oldhs)
460 if dhs:
460 if dhs:
461 if errormsg is None:
461 if errormsg is None:
462 if branch not in (b'default', None):
462 if branch not in (b'default', None):
463 errormsg = _(
463 errormsg = _(
464 b"push creates new remote head %s on branch '%s'"
464 b"push creates new remote head %s on branch '%s'"
465 ) % (
465 ) % (
466 short(dhs[0]),
466 short(dhs[0]),
467 branch,
467 branch,
468 )
468 )
469 elif repo[dhs[0]].bookmarks():
469 elif repo[dhs[0]].bookmarks():
470 errormsg = _(
470 errormsg = _(
471 b"push creates new remote head %s "
471 b"push creates new remote head %s "
472 b"with bookmark '%s'"
472 b"with bookmark '%s'"
473 ) % (short(dhs[0]), repo[dhs[0]].bookmarks()[0])
473 ) % (short(dhs[0]), repo[dhs[0]].bookmarks()[0])
474 else:
474 else:
475 errormsg = _(b"push creates new remote head %s") % short(
475 errormsg = _(b"push creates new remote head %s") % short(
476 dhs[0]
476 dhs[0]
477 )
477 )
478 if unsyncedheads:
478 if unsyncedheads:
479 hint = _(
479 hint = _(
480 b"pull and merge or"
480 b"pull and merge or"
481 b" see 'hg help push' for details about"
481 b" see 'hg help push' for details about"
482 b" pushing new heads"
482 b" pushing new heads"
483 )
483 )
484 else:
484 else:
485 hint = _(
485 hint = _(
486 b"merge or"
486 b"merge or"
487 b" see 'hg help push' for details about"
487 b" see 'hg help push' for details about"
488 b" pushing new heads"
488 b" pushing new heads"
489 )
489 )
490 if branch is None:
490 if branch is None:
491 repo.ui.note(_(b"new remote heads:\n"))
491 repo.ui.note(_(b"new remote heads:\n"))
492 else:
492 else:
493 repo.ui.note(_(b"new remote heads on branch '%s':\n") % branch)
493 repo.ui.note(_(b"new remote heads on branch '%s':\n") % branch)
494 for h in dhs:
494 for h in dhs:
495 repo.ui.note(b" %s\n" % short(h))
495 repo.ui.note(b" %s\n" % short(h))
496 if errormsg:
496 if errormsg:
497 raise error.StateError(errormsg, hint=hint)
497 raise error.StateError(errormsg, hint=hint)
498
498
499
499
500 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
500 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
501 """post process the list of new heads with obsolescence information
501 """post process the list of new heads with obsolescence information
502
502
503 Exists as a sub-function to contain the complexity and allow extensions to
503 Exists as a sub-function to contain the complexity and allow extensions to
504 experiment with smarter logic.
504 experiment with smarter logic.
505
505
506 Returns (newheads, discarded_heads) tuple
506 Returns (newheads, discarded_heads) tuple
507 """
507 """
508 # known issue
508 # known issue
509 #
509 #
510 # * We "silently" skip processing on all changeset unknown locally
510 # * We "silently" skip processing on all changeset unknown locally
511 #
511 #
512 # * if <nh> is public on the remote, it won't be affected by obsolete
512 # * if <nh> is public on the remote, it won't be affected by obsolete
513 # marker and a new is created
513 # marker and a new is created
514
514
515 # define various utilities and containers
515 # define various utilities and containers
516 repo = pushop.repo
516 repo = pushop.repo
517 unfi = repo.unfiltered()
517 unfi = repo.unfiltered()
518 torev = unfi.changelog.index.get_rev
518 torev = unfi.changelog.index.get_rev
519 public = phases.public
519 public = phases.public
520 getphase = unfi._phasecache.phase
520 getphase = unfi._phasecache.phase
521 ispublic = lambda r: getphase(unfi, r) == public
521 ispublic = lambda r: getphase(unfi, r) == public
522 ispushed = lambda n: torev(n) in futurecommon
522 ispushed = lambda n: torev(n) in futurecommon
523 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
523 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
524 successorsmarkers = unfi.obsstore.successors
524 successorsmarkers = unfi.obsstore.successors
525 newhs = set() # final set of new heads
525 newhs = set() # final set of new heads
526 discarded = set() # new head of fully replaced branch
526 discarded = set() # new head of fully replaced branch
527
527
528 localcandidate = set() # candidate heads known locally
528 localcandidate = set() # candidate heads known locally
529 unknownheads = set() # candidate heads unknown locally
529 unknownheads = set() # candidate heads unknown locally
530 for h in candidate_newhs:
530 for h in candidate_newhs:
531 if h in unfi:
531 if h in unfi:
532 localcandidate.add(h)
532 localcandidate.add(h)
533 else:
533 else:
534 if successorsmarkers.get(h) is not None:
534 if successorsmarkers.get(h) is not None:
535 msg = (
535 msg = (
536 b'checkheads: remote head unknown locally has'
536 b'checkheads: remote head unknown locally has'
537 b' local marker: %s\n'
537 b' local marker: %s\n'
538 )
538 )
539 repo.ui.debug(msg % hex(h))
539 repo.ui.debug(msg % hex(h))
540 unknownheads.add(h)
540 unknownheads.add(h)
541
541
542 # fast path the simple case
542 # fast path the simple case
543 if len(localcandidate) == 1:
543 if len(localcandidate) == 1:
544 return unknownheads | set(candidate_newhs), set()
544 return unknownheads | set(candidate_newhs), set()
545
545
546 obsrevs = obsolete.getrevs(unfi, b'obsolete')
546 obsrevs = obsolete.getrevs(unfi, b'obsolete')
547 futurenonobsolete = frozenset(futurecommon) - obsrevs
547 futurenonobsolete = frozenset(futurecommon) - obsrevs
548
548
549 # actually process branch replacement
549 # actually process branch replacement
550 while localcandidate:
550 while localcandidate:
551 nh = localcandidate.pop()
551 nh = localcandidate.pop()
552 r = torev(nh)
552 r = torev(nh)
553 current_branch = unfi[nh].branch()
553 current_branch = unfi[nh].branch()
554 # run this check early to skip the evaluation of the whole branch
554 # run this check early to skip the evaluation of the whole branch
555 if ispublic(r) or r not in obsrevs:
555 if ispublic(r) or r not in obsrevs:
556 newhs.add(nh)
556 newhs.add(nh)
557 continue
557 continue
558
558
559 # Get all revs/nodes on the branch exclusive to this head
559 # Get all revs/nodes on the branch exclusive to this head
560 # (already filtered heads are "ignored"))
560 # (already filtered heads are "ignored"))
561 branchrevs = unfi.revs(
561 branchrevs = unfi.revs(
562 b'only(%n, (%ln+%ln))', nh, localcandidate, newhs
562 b'only(%n, (%ln+%ln))', nh, localcandidate, newhs
563 )
563 )
564
564
565 branchnodes = []
565 branchnodes = []
566 for r in branchrevs:
566 for r in branchrevs:
567 c = unfi[r]
567 c = unfi[r]
568 if c.branch() == current_branch:
568 if c.branch() == current_branch:
569 branchnodes.append(c.node())
569 branchnodes.append(c.node())
570
570
571 # The branch won't be hidden on the remote if
571 # The branch won't be hidden on the remote if
572 # * any part of it is public,
572 # * any part of it is public,
573 # * any part of it is considered part of the result by previous logic,
573 # * any part of it is considered part of the result by previous logic,
574 # * if we have no markers to push to obsolete it.
574 # * if we have no markers to push to obsolete it.
575 if (
575 if (
576 any(ispublic(r) for r in branchrevs)
576 any(ispublic(r) for r in branchrevs)
577 or any(torev(n) in futurenonobsolete for n in branchnodes)
577 or any(torev(n) in futurenonobsolete for n in branchnodes)
578 or any(not hasoutmarker(n) for n in branchnodes)
578 or any(not hasoutmarker(n) for n in branchnodes)
579 ):
579 ):
580 newhs.add(nh)
580 newhs.add(nh)
581 else:
581 else:
582 # note: there is a corner case if there is a merge in the branch.
582 # note: there is a corner case if there is a merge in the branch.
583 # we might end up with -more- heads. However, these heads are not
583 # we might end up with -more- heads. However, these heads are not
584 # "added" by the push, but more by the "removal" on the remote so I
584 # "added" by the push, but more by the "removal" on the remote so I
585 # think is a okay to ignore them,
585 # think is a okay to ignore them,
586 discarded.add(nh)
586 discarded.add(nh)
587 newhs |= unknownheads
587 newhs |= unknownheads
588 return newhs, discarded
588 return newhs, discarded
589
589
590
590
591 def pushingmarkerfor(obsstore, ispushed, node):
591 def pushingmarkerfor(obsstore, ispushed, node):
592 """true if some markers are to be pushed for node
592 """true if some markers are to be pushed for node
593
593
594 We cannot just look in to the pushed obsmarkers from the pushop because
594 We cannot just look in to the pushed obsmarkers from the pushop because
595 discovery might have filtered relevant markers. In addition listing all
595 discovery might have filtered relevant markers. In addition listing all
596 markers relevant to all changesets in the pushed set would be too expensive
596 markers relevant to all changesets in the pushed set would be too expensive
597 (O(len(repo)))
597 (O(len(repo)))
598
598
599 (note: There are cache opportunity in this function. but it would requires
599 (note: There are cache opportunity in this function. but it would requires
600 a two dimensional stack.)
600 a two dimensional stack.)
601 """
601 """
602 successorsmarkers = obsstore.successors
602 successorsmarkers = obsstore.successors
603 stack = [node]
603 stack = [node]
604 seen = set(stack)
604 seen = set(stack)
605 while stack:
605 while stack:
606 current = stack.pop()
606 current = stack.pop()
607 if ispushed(current):
607 if ispushed(current):
608 return True
608 return True
609 markers = successorsmarkers.get(current, ())
609 markers = successorsmarkers.get(current, ())
610 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
610 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
611 for m in markers:
611 for m in markers:
612 nexts = m[1] # successors
612 nexts = m[1] # successors
613 if not nexts: # this is a prune marker
613 if not nexts: # this is a prune marker
614 nexts = m[5] or () # parents
614 nexts = m[5] or () # parents
615 for n in nexts:
615 for n in nexts:
616 if n not in seen:
616 if n not in seen:
617 seen.add(n)
617 seen.add(n)
618 stack.append(n)
618 stack.append(n)
619 return False
619 return False
General Comments 0
You need to be logged in to leave comments. Login now