##// END OF EJS Templates
discovery: remove deprecated API...
Raphaël Gomès -
r49361:61fe7e17 default
parent child Browse files
Show More
@@ -1,628 +1,617
1 # discovery.py - protocol changeset discovery functions
1 # discovery.py - protocol changeset discovery functions
2 #
2 #
3 # Copyright 2010 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2010 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 hex,
14 hex,
15 short,
15 short,
16 )
16 )
17
17
18 from . import (
18 from . import (
19 bookmarks,
19 bookmarks,
20 branchmap,
20 branchmap,
21 error,
21 error,
22 phases,
22 phases,
23 pycompat,
23 pycompat,
24 scmutil,
24 scmutil,
25 setdiscovery,
25 setdiscovery,
26 treediscovery,
26 treediscovery,
27 util,
27 util,
28 )
28 )
29
29
30
30
31 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
31 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
32 """Return a tuple (common, anyincoming, heads) used to identify the common
32 """Return a tuple (common, anyincoming, heads) used to identify the common
33 subset of nodes between repo and remote.
33 subset of nodes between repo and remote.
34
34
35 "common" is a list of (at least) the heads of the common subset.
35 "common" is a list of (at least) the heads of the common subset.
36 "anyincoming" is testable as a boolean indicating if any nodes are missing
36 "anyincoming" is testable as a boolean indicating if any nodes are missing
37 locally. If remote does not support getbundle, this actually is a list of
37 locally. If remote does not support getbundle, this actually is a list of
38 roots of the nodes that would be incoming, to be supplied to
38 roots of the nodes that would be incoming, to be supplied to
39 changegroupsubset. No code except for pull should be relying on this fact
39 changegroupsubset. No code except for pull should be relying on this fact
40 any longer.
40 any longer.
41 "heads" is either the supplied heads, or else the remote's heads.
41 "heads" is either the supplied heads, or else the remote's heads.
42 "ancestorsof" if not None, restrict the discovery to a subset defined by
42 "ancestorsof" if not None, restrict the discovery to a subset defined by
43 these nodes. Changeset outside of this set won't be considered (but may
43 these nodes. Changeset outside of this set won't be considered (but may
44 still appear in "common").
44 still appear in "common").
45
45
46 If you pass heads and they are all known locally, the response lists just
46 If you pass heads and they are all known locally, the response lists just
47 these heads in "common" and in "heads".
47 these heads in "common" and in "heads".
48
48
49 Please use findcommonoutgoing to compute the set of outgoing nodes to give
49 Please use findcommonoutgoing to compute the set of outgoing nodes to give
50 extensions a good hook into outgoing.
50 extensions a good hook into outgoing.
51 """
51 """
52
52
53 if not remote.capable(b'getbundle'):
53 if not remote.capable(b'getbundle'):
54 return treediscovery.findcommonincoming(repo, remote, heads, force)
54 return treediscovery.findcommonincoming(repo, remote, heads, force)
55
55
56 if heads:
56 if heads:
57 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
57 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
58 if all(knownnode(h) for h in heads):
58 if all(knownnode(h) for h in heads):
59 return (heads, False, heads)
59 return (heads, False, heads)
60
60
61 res = setdiscovery.findcommonheads(
61 res = setdiscovery.findcommonheads(
62 repo.ui,
62 repo.ui,
63 repo,
63 repo,
64 remote,
64 remote,
65 abortwhenunrelated=not force,
65 abortwhenunrelated=not force,
66 ancestorsof=ancestorsof,
66 ancestorsof=ancestorsof,
67 )
67 )
68 common, anyinc, srvheads = res
68 common, anyinc, srvheads = res
69 if heads and not anyinc:
69 if heads and not anyinc:
70 # server could be lying on the advertised heads
70 # server could be lying on the advertised heads
71 has_node = repo.changelog.hasnode
71 has_node = repo.changelog.hasnode
72 anyinc = any(not has_node(n) for n in heads)
72 anyinc = any(not has_node(n) for n in heads)
73 return (list(common), anyinc, heads or list(srvheads))
73 return (list(common), anyinc, heads or list(srvheads))
74
74
75
75
76 class outgoing(object):
76 class outgoing(object):
77 """Represents the result of a findcommonoutgoing() call.
77 """Represents the result of a findcommonoutgoing() call.
78
78
79 Members:
79 Members:
80
80
81 ancestorsof is a list of the nodes whose ancestors are included in the
81 ancestorsof is a list of the nodes whose ancestors are included in the
82 outgoing operation.
82 outgoing operation.
83
83
84 missing is a list of those ancestors of ancestorsof that are present in
84 missing is a list of those ancestors of ancestorsof that are present in
85 local but not in remote.
85 local but not in remote.
86
86
87 common is a set containing revs common between the local and the remote
87 common is a set containing revs common between the local and the remote
88 repository (at least all of those that are ancestors of ancestorsof).
88 repository (at least all of those that are ancestors of ancestorsof).
89
89
90 commonheads is the list of heads of common.
90 commonheads is the list of heads of common.
91
91
92 excluded is the list of missing changeset that shouldn't be sent
92 excluded is the list of missing changeset that shouldn't be sent
93 remotely.
93 remotely.
94
94
95 Some members are computed on demand from the heads, unless provided upfront
95 Some members are computed on demand from the heads, unless provided upfront
96 by discovery."""
96 by discovery."""
97
97
98 def __init__(
98 def __init__(
99 self, repo, commonheads=None, ancestorsof=None, missingroots=None
99 self, repo, commonheads=None, ancestorsof=None, missingroots=None
100 ):
100 ):
101 # at least one of them must not be set
101 # at least one of them must not be set
102 assert None in (commonheads, missingroots)
102 assert None in (commonheads, missingroots)
103 cl = repo.changelog
103 cl = repo.changelog
104 if ancestorsof is None:
104 if ancestorsof is None:
105 ancestorsof = cl.heads()
105 ancestorsof = cl.heads()
106 if missingroots:
106 if missingroots:
107 discbases = []
107 discbases = []
108 for n in missingroots:
108 for n in missingroots:
109 discbases.extend([p for p in cl.parents(n) if p != repo.nullid])
109 discbases.extend([p for p in cl.parents(n) if p != repo.nullid])
110 # TODO remove call to nodesbetween.
110 # TODO remove call to nodesbetween.
111 # TODO populate attributes on outgoing instance instead of setting
111 # TODO populate attributes on outgoing instance instead of setting
112 # discbases.
112 # discbases.
113 csets, roots, heads = cl.nodesbetween(missingroots, ancestorsof)
113 csets, roots, heads = cl.nodesbetween(missingroots, ancestorsof)
114 included = set(csets)
114 included = set(csets)
115 ancestorsof = heads
115 ancestorsof = heads
116 commonheads = [n for n in discbases if n not in included]
116 commonheads = [n for n in discbases if n not in included]
117 elif not commonheads:
117 elif not commonheads:
118 commonheads = [repo.nullid]
118 commonheads = [repo.nullid]
119 self.commonheads = commonheads
119 self.commonheads = commonheads
120 self.ancestorsof = ancestorsof
120 self.ancestorsof = ancestorsof
121 self._revlog = cl
121 self._revlog = cl
122 self._common = None
122 self._common = None
123 self._missing = None
123 self._missing = None
124 self.excluded = []
124 self.excluded = []
125
125
126 def _computecommonmissing(self):
126 def _computecommonmissing(self):
127 sets = self._revlog.findcommonmissing(
127 sets = self._revlog.findcommonmissing(
128 self.commonheads, self.ancestorsof
128 self.commonheads, self.ancestorsof
129 )
129 )
130 self._common, self._missing = sets
130 self._common, self._missing = sets
131
131
132 @util.propertycache
132 @util.propertycache
133 def common(self):
133 def common(self):
134 if self._common is None:
134 if self._common is None:
135 self._computecommonmissing()
135 self._computecommonmissing()
136 return self._common
136 return self._common
137
137
138 @util.propertycache
138 @util.propertycache
139 def missing(self):
139 def missing(self):
140 if self._missing is None:
140 if self._missing is None:
141 self._computecommonmissing()
141 self._computecommonmissing()
142 return self._missing
142 return self._missing
143
143
144 @property
145 def missingheads(self):
146 util.nouideprecwarn(
147 b'outgoing.missingheads never contained what the name suggests and '
148 b'was renamed to outgoing.ancestorsof. check your code for '
149 b'correctness.',
150 b'5.5',
151 stacklevel=2,
152 )
153 return self.ancestorsof
154
155
144
156 def findcommonoutgoing(
145 def findcommonoutgoing(
157 repo, other, onlyheads=None, force=False, commoninc=None, portable=False
146 repo, other, onlyheads=None, force=False, commoninc=None, portable=False
158 ):
147 ):
159 """Return an outgoing instance to identify the nodes present in repo but
148 """Return an outgoing instance to identify the nodes present in repo but
160 not in other.
149 not in other.
161
150
162 If onlyheads is given, only nodes ancestral to nodes in onlyheads
151 If onlyheads is given, only nodes ancestral to nodes in onlyheads
163 (inclusive) are included. If you already know the local repo's heads,
152 (inclusive) are included. If you already know the local repo's heads,
164 passing them in onlyheads is faster than letting them be recomputed here.
153 passing them in onlyheads is faster than letting them be recomputed here.
165
154
166 If commoninc is given, it must be the result of a prior call to
155 If commoninc is given, it must be the result of a prior call to
167 findcommonincoming(repo, other, force) to avoid recomputing it here.
156 findcommonincoming(repo, other, force) to avoid recomputing it here.
168
157
169 If portable is given, compute more conservative common and ancestorsof,
158 If portable is given, compute more conservative common and ancestorsof,
170 to make bundles created from the instance more portable."""
159 to make bundles created from the instance more portable."""
171 # declare an empty outgoing object to be filled later
160 # declare an empty outgoing object to be filled later
172 og = outgoing(repo, None, None)
161 og = outgoing(repo, None, None)
173
162
174 # get common set if not provided
163 # get common set if not provided
175 if commoninc is None:
164 if commoninc is None:
176 commoninc = findcommonincoming(
165 commoninc = findcommonincoming(
177 repo, other, force=force, ancestorsof=onlyheads
166 repo, other, force=force, ancestorsof=onlyheads
178 )
167 )
179 og.commonheads, _any, _hds = commoninc
168 og.commonheads, _any, _hds = commoninc
180
169
181 # compute outgoing
170 # compute outgoing
182 mayexclude = repo._phasecache.phaseroots[phases.secret] or repo.obsstore
171 mayexclude = repo._phasecache.phaseroots[phases.secret] or repo.obsstore
183 if not mayexclude:
172 if not mayexclude:
184 og.ancestorsof = onlyheads or repo.heads()
173 og.ancestorsof = onlyheads or repo.heads()
185 elif onlyheads is None:
174 elif onlyheads is None:
186 # use visible heads as it should be cached
175 # use visible heads as it should be cached
187 og.ancestorsof = repo.filtered(b"served").heads()
176 og.ancestorsof = repo.filtered(b"served").heads()
188 og.excluded = [ctx.node() for ctx in repo.set(b'secret() or extinct()')]
177 og.excluded = [ctx.node() for ctx in repo.set(b'secret() or extinct()')]
189 else:
178 else:
190 # compute common, missing and exclude secret stuff
179 # compute common, missing and exclude secret stuff
191 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
180 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
192 og._common, allmissing = sets
181 og._common, allmissing = sets
193 og._missing = missing = []
182 og._missing = missing = []
194 og.excluded = excluded = []
183 og.excluded = excluded = []
195 for node in allmissing:
184 for node in allmissing:
196 ctx = repo[node]
185 ctx = repo[node]
197 if ctx.phase() >= phases.secret or ctx.extinct():
186 if ctx.phase() >= phases.secret or ctx.extinct():
198 excluded.append(node)
187 excluded.append(node)
199 else:
188 else:
200 missing.append(node)
189 missing.append(node)
201 if len(missing) == len(allmissing):
190 if len(missing) == len(allmissing):
202 ancestorsof = onlyheads
191 ancestorsof = onlyheads
203 else: # update missing heads
192 else: # update missing heads
204 ancestorsof = phases.newheads(repo, onlyheads, excluded)
193 ancestorsof = phases.newheads(repo, onlyheads, excluded)
205 og.ancestorsof = ancestorsof
194 og.ancestorsof = ancestorsof
206 if portable:
195 if portable:
207 # recompute common and ancestorsof as if -r<rev> had been given for
196 # recompute common and ancestorsof as if -r<rev> had been given for
208 # each head of missing, and --base <rev> for each head of the proper
197 # each head of missing, and --base <rev> for each head of the proper
209 # ancestors of missing
198 # ancestors of missing
210 og._computecommonmissing()
199 og._computecommonmissing()
211 cl = repo.changelog
200 cl = repo.changelog
212 missingrevs = {cl.rev(n) for n in og._missing}
201 missingrevs = {cl.rev(n) for n in og._missing}
213 og._common = set(cl.ancestors(missingrevs)) - missingrevs
202 og._common = set(cl.ancestors(missingrevs)) - missingrevs
214 commonheads = set(og.commonheads)
203 commonheads = set(og.commonheads)
215 og.ancestorsof = [h for h in og.ancestorsof if h not in commonheads]
204 og.ancestorsof = [h for h in og.ancestorsof if h not in commonheads]
216
205
217 return og
206 return og
218
207
219
208
220 def _headssummary(pushop):
209 def _headssummary(pushop):
221 """compute a summary of branch and heads status before and after push
210 """compute a summary of branch and heads status before and after push
222
211
223 return {'branch': ([remoteheads], [newheads],
212 return {'branch': ([remoteheads], [newheads],
224 [unsyncedheads], [discardedheads])} mapping
213 [unsyncedheads], [discardedheads])} mapping
225
214
226 - branch: the branch name,
215 - branch: the branch name,
227 - remoteheads: the list of remote heads known locally
216 - remoteheads: the list of remote heads known locally
228 None if the branch is new,
217 None if the branch is new,
229 - newheads: the new remote heads (known locally) with outgoing pushed,
218 - newheads: the new remote heads (known locally) with outgoing pushed,
230 - unsyncedheads: the list of remote heads unknown locally,
219 - unsyncedheads: the list of remote heads unknown locally,
231 - discardedheads: the list of heads made obsolete by the push.
220 - discardedheads: the list of heads made obsolete by the push.
232 """
221 """
233 repo = pushop.repo.unfiltered()
222 repo = pushop.repo.unfiltered()
234 remote = pushop.remote
223 remote = pushop.remote
235 outgoing = pushop.outgoing
224 outgoing = pushop.outgoing
236 cl = repo.changelog
225 cl = repo.changelog
237 headssum = {}
226 headssum = {}
238 missingctx = set()
227 missingctx = set()
239 # A. Create set of branches involved in the push.
228 # A. Create set of branches involved in the push.
240 branches = set()
229 branches = set()
241 for n in outgoing.missing:
230 for n in outgoing.missing:
242 ctx = repo[n]
231 ctx = repo[n]
243 missingctx.add(ctx)
232 missingctx.add(ctx)
244 branches.add(ctx.branch())
233 branches.add(ctx.branch())
245
234
246 with remote.commandexecutor() as e:
235 with remote.commandexecutor() as e:
247 remotemap = e.callcommand(b'branchmap', {}).result()
236 remotemap = e.callcommand(b'branchmap', {}).result()
248
237
249 knownnode = cl.hasnode # do not use nodemap until it is filtered
238 knownnode = cl.hasnode # do not use nodemap until it is filtered
250 # A. register remote heads of branches which are in outgoing set
239 # A. register remote heads of branches which are in outgoing set
251 for branch, heads in pycompat.iteritems(remotemap):
240 for branch, heads in pycompat.iteritems(remotemap):
252 # don't add head info about branches which we don't have locally
241 # don't add head info about branches which we don't have locally
253 if branch not in branches:
242 if branch not in branches:
254 continue
243 continue
255 known = []
244 known = []
256 unsynced = []
245 unsynced = []
257 for h in heads:
246 for h in heads:
258 if knownnode(h):
247 if knownnode(h):
259 known.append(h)
248 known.append(h)
260 else:
249 else:
261 unsynced.append(h)
250 unsynced.append(h)
262 headssum[branch] = (known, list(known), unsynced)
251 headssum[branch] = (known, list(known), unsynced)
263
252
264 # B. add new branch data
253 # B. add new branch data
265 for branch in branches:
254 for branch in branches:
266 if branch not in headssum:
255 if branch not in headssum:
267 headssum[branch] = (None, [], [])
256 headssum[branch] = (None, [], [])
268
257
269 # C. Update newmap with outgoing changes.
258 # C. Update newmap with outgoing changes.
270 # This will possibly add new heads and remove existing ones.
259 # This will possibly add new heads and remove existing ones.
271 newmap = branchmap.remotebranchcache(
260 newmap = branchmap.remotebranchcache(
272 repo,
261 repo,
273 (
262 (
274 (branch, heads[1])
263 (branch, heads[1])
275 for branch, heads in pycompat.iteritems(headssum)
264 for branch, heads in pycompat.iteritems(headssum)
276 if heads[0] is not None
265 if heads[0] is not None
277 ),
266 ),
278 )
267 )
279 newmap.update(repo, (ctx.rev() for ctx in missingctx))
268 newmap.update(repo, (ctx.rev() for ctx in missingctx))
280 for branch, newheads in pycompat.iteritems(newmap):
269 for branch, newheads in pycompat.iteritems(newmap):
281 headssum[branch][1][:] = newheads
270 headssum[branch][1][:] = newheads
282 for branch, items in pycompat.iteritems(headssum):
271 for branch, items in pycompat.iteritems(headssum):
283 for l in items:
272 for l in items:
284 if l is not None:
273 if l is not None:
285 l.sort()
274 l.sort()
286 headssum[branch] = items + ([],)
275 headssum[branch] = items + ([],)
287
276
288 # If there are no obsstore, no post processing are needed.
277 # If there are no obsstore, no post processing are needed.
289 if repo.obsstore:
278 if repo.obsstore:
290 torev = repo.changelog.rev
279 torev = repo.changelog.rev
291 futureheads = {torev(h) for h in outgoing.ancestorsof}
280 futureheads = {torev(h) for h in outgoing.ancestorsof}
292 futureheads |= {torev(h) for h in outgoing.commonheads}
281 futureheads |= {torev(h) for h in outgoing.commonheads}
293 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
282 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
294 for branch, heads in sorted(pycompat.iteritems(headssum)):
283 for branch, heads in sorted(pycompat.iteritems(headssum)):
295 remoteheads, newheads, unsyncedheads, placeholder = heads
284 remoteheads, newheads, unsyncedheads, placeholder = heads
296 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
285 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
297 headssum[branch] = (
286 headssum[branch] = (
298 remoteheads,
287 remoteheads,
299 sorted(result[0]),
288 sorted(result[0]),
300 unsyncedheads,
289 unsyncedheads,
301 sorted(result[1]),
290 sorted(result[1]),
302 )
291 )
303 return headssum
292 return headssum
304
293
305
294
306 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
295 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
307 """Compute branchmapsummary for repo without branchmap support"""
296 """Compute branchmapsummary for repo without branchmap support"""
308
297
309 # 1-4b. old servers: Check for new topological heads.
298 # 1-4b. old servers: Check for new topological heads.
310 # Construct {old,new}map with branch = None (topological branch).
299 # Construct {old,new}map with branch = None (topological branch).
311 # (code based on update)
300 # (code based on update)
312 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
301 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
313 oldheads = sorted(h for h in remoteheads if knownnode(h))
302 oldheads = sorted(h for h in remoteheads if knownnode(h))
314 # all nodes in outgoing.missing are children of either:
303 # all nodes in outgoing.missing are children of either:
315 # - an element of oldheads
304 # - an element of oldheads
316 # - another element of outgoing.missing
305 # - another element of outgoing.missing
317 # - nullrev
306 # - nullrev
318 # This explains why the new head are very simple to compute.
307 # This explains why the new head are very simple to compute.
319 r = repo.set(b'heads(%ln + %ln)', oldheads, outgoing.missing)
308 r = repo.set(b'heads(%ln + %ln)', oldheads, outgoing.missing)
320 newheads = sorted(c.node() for c in r)
309 newheads = sorted(c.node() for c in r)
321 # set some unsynced head to issue the "unsynced changes" warning
310 # set some unsynced head to issue the "unsynced changes" warning
322 if inc:
311 if inc:
323 unsynced = [None]
312 unsynced = [None]
324 else:
313 else:
325 unsynced = []
314 unsynced = []
326 return {None: (oldheads, newheads, unsynced, [])}
315 return {None: (oldheads, newheads, unsynced, [])}
327
316
328
317
329 def _nowarnheads(pushop):
318 def _nowarnheads(pushop):
330 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
319 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
331 repo = pushop.repo.unfiltered()
320 repo = pushop.repo.unfiltered()
332 remote = pushop.remote
321 remote = pushop.remote
333 localbookmarks = repo._bookmarks
322 localbookmarks = repo._bookmarks
334
323
335 with remote.commandexecutor() as e:
324 with remote.commandexecutor() as e:
336 remotebookmarks = e.callcommand(
325 remotebookmarks = e.callcommand(
337 b'listkeys',
326 b'listkeys',
338 {
327 {
339 b'namespace': b'bookmarks',
328 b'namespace': b'bookmarks',
340 },
329 },
341 ).result()
330 ).result()
342
331
343 bookmarkedheads = set()
332 bookmarkedheads = set()
344
333
345 # internal config: bookmarks.pushing
334 # internal config: bookmarks.pushing
346 newbookmarks = [
335 newbookmarks = [
347 localbookmarks.expandname(b)
336 localbookmarks.expandname(b)
348 for b in pushop.ui.configlist(b'bookmarks', b'pushing')
337 for b in pushop.ui.configlist(b'bookmarks', b'pushing')
349 ]
338 ]
350
339
351 for bm in localbookmarks:
340 for bm in localbookmarks:
352 rnode = remotebookmarks.get(bm)
341 rnode = remotebookmarks.get(bm)
353 if rnode and rnode in repo:
342 if rnode and rnode in repo:
354 lctx, rctx = repo[localbookmarks[bm]], repo[rnode]
343 lctx, rctx = repo[localbookmarks[bm]], repo[rnode]
355 if bookmarks.validdest(repo, rctx, lctx):
344 if bookmarks.validdest(repo, rctx, lctx):
356 bookmarkedheads.add(lctx.node())
345 bookmarkedheads.add(lctx.node())
357 else:
346 else:
358 if bm in newbookmarks and bm not in remotebookmarks:
347 if bm in newbookmarks and bm not in remotebookmarks:
359 bookmarkedheads.add(localbookmarks[bm])
348 bookmarkedheads.add(localbookmarks[bm])
360
349
361 return bookmarkedheads
350 return bookmarkedheads
362
351
363
352
364 def checkheads(pushop):
353 def checkheads(pushop):
365 """Check that a push won't add any outgoing head
354 """Check that a push won't add any outgoing head
366
355
367 raise StateError error and display ui message as needed.
356 raise StateError error and display ui message as needed.
368 """
357 """
369
358
370 repo = pushop.repo.unfiltered()
359 repo = pushop.repo.unfiltered()
371 remote = pushop.remote
360 remote = pushop.remote
372 outgoing = pushop.outgoing
361 outgoing = pushop.outgoing
373 remoteheads = pushop.remoteheads
362 remoteheads = pushop.remoteheads
374 newbranch = pushop.newbranch
363 newbranch = pushop.newbranch
375 inc = bool(pushop.incoming)
364 inc = bool(pushop.incoming)
376
365
377 # Check for each named branch if we're creating new remote heads.
366 # Check for each named branch if we're creating new remote heads.
378 # To be a remote head after push, node must be either:
367 # To be a remote head after push, node must be either:
379 # - unknown locally
368 # - unknown locally
380 # - a local outgoing head descended from update
369 # - a local outgoing head descended from update
381 # - a remote head that's known locally and not
370 # - a remote head that's known locally and not
382 # ancestral to an outgoing head
371 # ancestral to an outgoing head
383 if remoteheads == [repo.nullid]:
372 if remoteheads == [repo.nullid]:
384 # remote is empty, nothing to check.
373 # remote is empty, nothing to check.
385 return
374 return
386
375
387 if remote.capable(b'branchmap'):
376 if remote.capable(b'branchmap'):
388 headssum = _headssummary(pushop)
377 headssum = _headssummary(pushop)
389 else:
378 else:
390 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
379 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
391 pushop.pushbranchmap = headssum
380 pushop.pushbranchmap = headssum
392 newbranches = [
381 newbranches = [
393 branch
382 branch
394 for branch, heads in pycompat.iteritems(headssum)
383 for branch, heads in pycompat.iteritems(headssum)
395 if heads[0] is None
384 if heads[0] is None
396 ]
385 ]
397 # 1. Check for new branches on the remote.
386 # 1. Check for new branches on the remote.
398 if newbranches and not newbranch: # new branch requires --new-branch
387 if newbranches and not newbranch: # new branch requires --new-branch
399 branchnames = b', '.join(sorted(newbranches))
388 branchnames = b', '.join(sorted(newbranches))
400 # Calculate how many of the new branches are closed branches
389 # Calculate how many of the new branches are closed branches
401 closedbranches = set()
390 closedbranches = set()
402 for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
391 for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
403 if isclosed:
392 if isclosed:
404 closedbranches.add(tag)
393 closedbranches.add(tag)
405 closedbranches = closedbranches & set(newbranches)
394 closedbranches = closedbranches & set(newbranches)
406 if closedbranches:
395 if closedbranches:
407 errmsg = _(b"push creates new remote branches: %s (%d closed)") % (
396 errmsg = _(b"push creates new remote branches: %s (%d closed)") % (
408 branchnames,
397 branchnames,
409 len(closedbranches),
398 len(closedbranches),
410 )
399 )
411 else:
400 else:
412 errmsg = _(b"push creates new remote branches: %s") % branchnames
401 errmsg = _(b"push creates new remote branches: %s") % branchnames
413 hint = _(b"use 'hg push --new-branch' to create new remote branches")
402 hint = _(b"use 'hg push --new-branch' to create new remote branches")
414 raise error.StateError(errmsg, hint=hint)
403 raise error.StateError(errmsg, hint=hint)
415
404
416 # 2. Find heads that we need not warn about
405 # 2. Find heads that we need not warn about
417 nowarnheads = _nowarnheads(pushop)
406 nowarnheads = _nowarnheads(pushop)
418
407
419 # 3. Check for new heads.
408 # 3. Check for new heads.
420 # If there are more heads after the push than before, a suitable
409 # If there are more heads after the push than before, a suitable
421 # error message, depending on unsynced status, is displayed.
410 # error message, depending on unsynced status, is displayed.
422 errormsg = None
411 errormsg = None
423 for branch, heads in sorted(pycompat.iteritems(headssum)):
412 for branch, heads in sorted(pycompat.iteritems(headssum)):
424 remoteheads, newheads, unsyncedheads, discardedheads = heads
413 remoteheads, newheads, unsyncedheads, discardedheads = heads
425 # add unsynced data
414 # add unsynced data
426 if remoteheads is None:
415 if remoteheads is None:
427 oldhs = set()
416 oldhs = set()
428 else:
417 else:
429 oldhs = set(remoteheads)
418 oldhs = set(remoteheads)
430 oldhs.update(unsyncedheads)
419 oldhs.update(unsyncedheads)
431 dhs = None # delta heads, the new heads on branch
420 dhs = None # delta heads, the new heads on branch
432 newhs = set(newheads)
421 newhs = set(newheads)
433 newhs.update(unsyncedheads)
422 newhs.update(unsyncedheads)
434 if unsyncedheads:
423 if unsyncedheads:
435 if None in unsyncedheads:
424 if None in unsyncedheads:
436 # old remote, no heads data
425 # old remote, no heads data
437 heads = None
426 heads = None
438 else:
427 else:
439 heads = scmutil.nodesummaries(repo, unsyncedheads)
428 heads = scmutil.nodesummaries(repo, unsyncedheads)
440 if heads is None:
429 if heads is None:
441 repo.ui.status(
430 repo.ui.status(
442 _(b"remote has heads that are not known locally\n")
431 _(b"remote has heads that are not known locally\n")
443 )
432 )
444 elif branch is None:
433 elif branch is None:
445 repo.ui.status(
434 repo.ui.status(
446 _(b"remote has heads that are not known locally: %s\n")
435 _(b"remote has heads that are not known locally: %s\n")
447 % heads
436 % heads
448 )
437 )
449 else:
438 else:
450 repo.ui.status(
439 repo.ui.status(
451 _(
440 _(
452 b"remote has heads on branch '%s' that are "
441 b"remote has heads on branch '%s' that are "
453 b"not known locally: %s\n"
442 b"not known locally: %s\n"
454 )
443 )
455 % (branch, heads)
444 % (branch, heads)
456 )
445 )
457 if remoteheads is None:
446 if remoteheads is None:
458 if len(newhs) > 1:
447 if len(newhs) > 1:
459 dhs = list(newhs)
448 dhs = list(newhs)
460 if errormsg is None:
449 if errormsg is None:
461 errormsg = (
450 errormsg = (
462 _(b"push creates new branch '%s' with multiple heads")
451 _(b"push creates new branch '%s' with multiple heads")
463 % branch
452 % branch
464 )
453 )
465 hint = _(
454 hint = _(
466 b"merge or"
455 b"merge or"
467 b" see 'hg help push' for details about"
456 b" see 'hg help push' for details about"
468 b" pushing new heads"
457 b" pushing new heads"
469 )
458 )
470 elif len(newhs) > len(oldhs):
459 elif len(newhs) > len(oldhs):
471 # remove bookmarked or existing remote heads from the new heads list
460 # remove bookmarked or existing remote heads from the new heads list
472 dhs = sorted(newhs - nowarnheads - oldhs)
461 dhs = sorted(newhs - nowarnheads - oldhs)
473 if dhs:
462 if dhs:
474 if errormsg is None:
463 if errormsg is None:
475 if branch not in (b'default', None):
464 if branch not in (b'default', None):
476 errormsg = _(
465 errormsg = _(
477 b"push creates new remote head %s on branch '%s'"
466 b"push creates new remote head %s on branch '%s'"
478 ) % (
467 ) % (
479 short(dhs[0]),
468 short(dhs[0]),
480 branch,
469 branch,
481 )
470 )
482 elif repo[dhs[0]].bookmarks():
471 elif repo[dhs[0]].bookmarks():
483 errormsg = _(
472 errormsg = _(
484 b"push creates new remote head %s "
473 b"push creates new remote head %s "
485 b"with bookmark '%s'"
474 b"with bookmark '%s'"
486 ) % (short(dhs[0]), repo[dhs[0]].bookmarks()[0])
475 ) % (short(dhs[0]), repo[dhs[0]].bookmarks()[0])
487 else:
476 else:
488 errormsg = _(b"push creates new remote head %s") % short(
477 errormsg = _(b"push creates new remote head %s") % short(
489 dhs[0]
478 dhs[0]
490 )
479 )
491 if unsyncedheads:
480 if unsyncedheads:
492 hint = _(
481 hint = _(
493 b"pull and merge or"
482 b"pull and merge or"
494 b" see 'hg help push' for details about"
483 b" see 'hg help push' for details about"
495 b" pushing new heads"
484 b" pushing new heads"
496 )
485 )
497 else:
486 else:
498 hint = _(
487 hint = _(
499 b"merge or"
488 b"merge or"
500 b" see 'hg help push' for details about"
489 b" see 'hg help push' for details about"
501 b" pushing new heads"
490 b" pushing new heads"
502 )
491 )
503 if branch is None:
492 if branch is None:
504 repo.ui.note(_(b"new remote heads:\n"))
493 repo.ui.note(_(b"new remote heads:\n"))
505 else:
494 else:
506 repo.ui.note(_(b"new remote heads on branch '%s':\n") % branch)
495 repo.ui.note(_(b"new remote heads on branch '%s':\n") % branch)
507 for h in dhs:
496 for h in dhs:
508 repo.ui.note(b" %s\n" % short(h))
497 repo.ui.note(b" %s\n" % short(h))
509 if errormsg:
498 if errormsg:
510 raise error.StateError(errormsg, hint=hint)
499 raise error.StateError(errormsg, hint=hint)
511
500
512
501
513 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
502 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
514 """post process the list of new heads with obsolescence information
503 """post process the list of new heads with obsolescence information
515
504
516 Exists as a sub-function to contain the complexity and allow extensions to
505 Exists as a sub-function to contain the complexity and allow extensions to
517 experiment with smarter logic.
506 experiment with smarter logic.
518
507
519 Returns (newheads, discarded_heads) tuple
508 Returns (newheads, discarded_heads) tuple
520 """
509 """
521 # known issue
510 # known issue
522 #
511 #
523 # * We "silently" skip processing on all changeset unknown locally
512 # * We "silently" skip processing on all changeset unknown locally
524 #
513 #
525 # * if <nh> is public on the remote, it won't be affected by obsolete
514 # * if <nh> is public on the remote, it won't be affected by obsolete
526 # marker and a new is created
515 # marker and a new is created
527
516
528 # define various utilities and containers
517 # define various utilities and containers
529 repo = pushop.repo
518 repo = pushop.repo
530 unfi = repo.unfiltered()
519 unfi = repo.unfiltered()
531 torev = unfi.changelog.index.get_rev
520 torev = unfi.changelog.index.get_rev
532 public = phases.public
521 public = phases.public
533 getphase = unfi._phasecache.phase
522 getphase = unfi._phasecache.phase
534 ispublic = lambda r: getphase(unfi, r) == public
523 ispublic = lambda r: getphase(unfi, r) == public
535 ispushed = lambda n: torev(n) in futurecommon
524 ispushed = lambda n: torev(n) in futurecommon
536 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
525 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
537 successorsmarkers = unfi.obsstore.successors
526 successorsmarkers = unfi.obsstore.successors
538 newhs = set() # final set of new heads
527 newhs = set() # final set of new heads
539 discarded = set() # new head of fully replaced branch
528 discarded = set() # new head of fully replaced branch
540
529
541 localcandidate = set() # candidate heads known locally
530 localcandidate = set() # candidate heads known locally
542 unknownheads = set() # candidate heads unknown locally
531 unknownheads = set() # candidate heads unknown locally
543 for h in candidate_newhs:
532 for h in candidate_newhs:
544 if h in unfi:
533 if h in unfi:
545 localcandidate.add(h)
534 localcandidate.add(h)
546 else:
535 else:
547 if successorsmarkers.get(h) is not None:
536 if successorsmarkers.get(h) is not None:
548 msg = (
537 msg = (
549 b'checkheads: remote head unknown locally has'
538 b'checkheads: remote head unknown locally has'
550 b' local marker: %s\n'
539 b' local marker: %s\n'
551 )
540 )
552 repo.ui.debug(msg % hex(h))
541 repo.ui.debug(msg % hex(h))
553 unknownheads.add(h)
542 unknownheads.add(h)
554
543
555 # fast path the simple case
544 # fast path the simple case
556 if len(localcandidate) == 1:
545 if len(localcandidate) == 1:
557 return unknownheads | set(candidate_newhs), set()
546 return unknownheads | set(candidate_newhs), set()
558
547
559 # actually process branch replacement
548 # actually process branch replacement
560 while localcandidate:
549 while localcandidate:
561 nh = localcandidate.pop()
550 nh = localcandidate.pop()
562 current_branch = unfi[nh].branch()
551 current_branch = unfi[nh].branch()
563 # run this check early to skip the evaluation of the whole branch
552 # run this check early to skip the evaluation of the whole branch
564 if torev(nh) in futurecommon or ispublic(torev(nh)):
553 if torev(nh) in futurecommon or ispublic(torev(nh)):
565 newhs.add(nh)
554 newhs.add(nh)
566 continue
555 continue
567
556
568 # Get all revs/nodes on the branch exclusive to this head
557 # Get all revs/nodes on the branch exclusive to this head
569 # (already filtered heads are "ignored"))
558 # (already filtered heads are "ignored"))
570 branchrevs = unfi.revs(
559 branchrevs = unfi.revs(
571 b'only(%n, (%ln+%ln))', nh, localcandidate, newhs
560 b'only(%n, (%ln+%ln))', nh, localcandidate, newhs
572 )
561 )
573
562
574 branchnodes = []
563 branchnodes = []
575 for r in branchrevs:
564 for r in branchrevs:
576 c = unfi[r]
565 c = unfi[r]
577 if c.branch() == current_branch:
566 if c.branch() == current_branch:
578 branchnodes.append(c.node())
567 branchnodes.append(c.node())
579
568
580 # The branch won't be hidden on the remote if
569 # The branch won't be hidden on the remote if
581 # * any part of it is public,
570 # * any part of it is public,
582 # * any part of it is considered part of the result by previous logic,
571 # * any part of it is considered part of the result by previous logic,
583 # * if we have no markers to push to obsolete it.
572 # * if we have no markers to push to obsolete it.
584 if (
573 if (
585 any(ispublic(r) for r in branchrevs)
574 any(ispublic(r) for r in branchrevs)
586 or any(torev(n) in futurecommon for n in branchnodes)
575 or any(torev(n) in futurecommon for n in branchnodes)
587 or any(not hasoutmarker(n) for n in branchnodes)
576 or any(not hasoutmarker(n) for n in branchnodes)
588 ):
577 ):
589 newhs.add(nh)
578 newhs.add(nh)
590 else:
579 else:
591 # note: there is a corner case if there is a merge in the branch.
580 # note: there is a corner case if there is a merge in the branch.
592 # we might end up with -more- heads. However, these heads are not
581 # we might end up with -more- heads. However, these heads are not
593 # "added" by the push, but more by the "removal" on the remote so I
582 # "added" by the push, but more by the "removal" on the remote so I
594 # think is a okay to ignore them,
583 # think is a okay to ignore them,
595 discarded.add(nh)
584 discarded.add(nh)
596 newhs |= unknownheads
585 newhs |= unknownheads
597 return newhs, discarded
586 return newhs, discarded
598
587
599
588
600 def pushingmarkerfor(obsstore, ispushed, node):
589 def pushingmarkerfor(obsstore, ispushed, node):
601 """true if some markers are to be pushed for node
590 """true if some markers are to be pushed for node
602
591
603 We cannot just look in to the pushed obsmarkers from the pushop because
592 We cannot just look in to the pushed obsmarkers from the pushop because
604 discovery might have filtered relevant markers. In addition listing all
593 discovery might have filtered relevant markers. In addition listing all
605 markers relevant to all changesets in the pushed set would be too expensive
594 markers relevant to all changesets in the pushed set would be too expensive
606 (O(len(repo)))
595 (O(len(repo)))
607
596
608 (note: There are cache opportunity in this function. but it would requires
597 (note: There are cache opportunity in this function. but it would requires
609 a two dimensional stack.)
598 a two dimensional stack.)
610 """
599 """
611 successorsmarkers = obsstore.successors
600 successorsmarkers = obsstore.successors
612 stack = [node]
601 stack = [node]
613 seen = set(stack)
602 seen = set(stack)
614 while stack:
603 while stack:
615 current = stack.pop()
604 current = stack.pop()
616 if ispushed(current):
605 if ispushed(current):
617 return True
606 return True
618 markers = successorsmarkers.get(current, ())
607 markers = successorsmarkers.get(current, ())
619 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
608 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
620 for m in markers:
609 for m in markers:
621 nexts = m[1] # successors
610 nexts = m[1] # successors
622 if not nexts: # this is a prune marker
611 if not nexts: # this is a prune marker
623 nexts = m[5] or () # parents
612 nexts = m[5] or () # parents
624 for n in nexts:
613 for n in nexts:
625 if n not in seen:
614 if n not in seen:
626 seen.add(n)
615 seen.add(n)
627 stack.append(n)
616 stack.append(n)
628 return False
617 return False
General Comments 0
You need to be logged in to leave comments. Login now