##// END OF EJS Templates
outgoing: rework the handling of the `missingroots` case to be faster...
marmoute -
r52487:e3a5ec2d default
parent child Browse files
Show More
@@ -1,624 +1,646 b''
1 # discovery.py - protocol changeset discovery functions
1 # discovery.py - protocol changeset discovery functions
2 #
2 #
3 # Copyright 2010 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2010 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import functools
9 import functools
10
10
11 from .i18n import _
11 from .i18n import _
12 from .node import (
12 from .node import (
13 hex,
13 hex,
14 short,
14 short,
15 )
15 )
16
16
17 from . import (
17 from . import (
18 bookmarks,
18 bookmarks,
19 branchmap,
19 branchmap,
20 error,
20 error,
21 node as nodemod,
21 obsolete,
22 obsolete,
22 phases,
23 phases,
23 pycompat,
24 pycompat,
24 scmutil,
25 scmutil,
25 setdiscovery,
26 setdiscovery,
26 treediscovery,
27 treediscovery,
27 util,
28 util,
28 )
29 )
29
30
30
31
31 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
32 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
32 """Return a tuple (common, anyincoming, heads) used to identify the common
33 """Return a tuple (common, anyincoming, heads) used to identify the common
33 subset of nodes between repo and remote.
34 subset of nodes between repo and remote.
34
35
35 "common" is a list of (at least) the heads of the common subset.
36 "common" is a list of (at least) the heads of the common subset.
36 "anyincoming" is testable as a boolean indicating if any nodes are missing
37 "anyincoming" is testable as a boolean indicating if any nodes are missing
37 locally. If remote does not support getbundle, this actually is a list of
38 locally. If remote does not support getbundle, this actually is a list of
38 roots of the nodes that would be incoming, to be supplied to
39 roots of the nodes that would be incoming, to be supplied to
39 changegroupsubset. No code except for pull should be relying on this fact
40 changegroupsubset. No code except for pull should be relying on this fact
40 any longer.
41 any longer.
41 "heads" is either the supplied heads, or else the remote's heads.
42 "heads" is either the supplied heads, or else the remote's heads.
42 "ancestorsof" if not None, restrict the discovery to a subset defined by
43 "ancestorsof" if not None, restrict the discovery to a subset defined by
43 these nodes. Changeset outside of this set won't be considered (but may
44 these nodes. Changeset outside of this set won't be considered (but may
44 still appear in "common").
45 still appear in "common").
45
46
46 If you pass heads and they are all known locally, the response lists just
47 If you pass heads and they are all known locally, the response lists just
47 these heads in "common" and in "heads".
48 these heads in "common" and in "heads".
48
49
49 Please use findcommonoutgoing to compute the set of outgoing nodes to give
50 Please use findcommonoutgoing to compute the set of outgoing nodes to give
50 extensions a good hook into outgoing.
51 extensions a good hook into outgoing.
51 """
52 """
52
53
53 if not remote.capable(b'getbundle'):
54 if not remote.capable(b'getbundle'):
54 return treediscovery.findcommonincoming(repo, remote, heads, force)
55 return treediscovery.findcommonincoming(repo, remote, heads, force)
55
56
56 if heads:
57 if heads:
57 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
58 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
58 if all(knownnode(h) for h in heads):
59 if all(knownnode(h) for h in heads):
59 return (heads, False, heads)
60 return (heads, False, heads)
60
61
61 res = setdiscovery.findcommonheads(
62 res = setdiscovery.findcommonheads(
62 repo.ui,
63 repo.ui,
63 repo,
64 repo,
64 remote,
65 remote,
65 abortwhenunrelated=not force,
66 abortwhenunrelated=not force,
66 ancestorsof=ancestorsof,
67 ancestorsof=ancestorsof,
67 )
68 )
68 common, anyinc, srvheads = res
69 common, anyinc, srvheads = res
69 if heads and not anyinc:
70 if heads and not anyinc:
70 # server could be lying on the advertised heads
71 # server could be lying on the advertised heads
71 has_node = repo.changelog.hasnode
72 has_node = repo.changelog.hasnode
72 anyinc = any(not has_node(n) for n in heads)
73 anyinc = any(not has_node(n) for n in heads)
73 return (list(common), anyinc, heads or list(srvheads))
74 return (list(common), anyinc, heads or list(srvheads))
74
75
75
76
76 class outgoing:
77 class outgoing:
77 """Represents the result of a findcommonoutgoing() call.
78 """Represents the result of a findcommonoutgoing() call.
78
79
79 Members:
80 Members:
80
81
81 ancestorsof is a list of the nodes whose ancestors are included in the
82 ancestorsof is a list of the nodes whose ancestors are included in the
82 outgoing operation.
83 outgoing operation.
83
84
84 missing is a list of those ancestors of ancestorsof that are present in
85 missing is a list of those ancestors of ancestorsof that are present in
85 local but not in remote.
86 local but not in remote.
86
87
87 common is a set containing revs common between the local and the remote
88 common is a set containing revs common between the local and the remote
88 repository (at least all of those that are ancestors of ancestorsof).
89 repository (at least all of those that are ancestors of ancestorsof).
89
90
90 commonheads is the list of heads of common.
91 commonheads is the list of heads of common.
91
92
92 excluded is the list of missing changeset that shouldn't be sent
93 excluded is the list of missing changeset that shouldn't be sent
93 remotely.
94 remotely.
94
95
95 Some members are computed on demand from the heads, unless provided upfront
96 Some members are computed on demand from the heads, unless provided upfront
96 by discovery."""
97 by discovery."""
97
98
98 def __init__(
99 def __init__(
99 self, repo, commonheads=None, ancestorsof=None, missingroots=None
100 self, repo, commonheads=None, ancestorsof=None, missingroots=None
100 ):
101 ):
101 # at least one of them must not be set
102 # at most one of them must not be set
102 assert None in (commonheads, missingroots)
103 if commonheads is not None and missingroots is not None:
104 m = 'commonheads and missingroots arguments are mutually exclusive'
105 raise error.ProgrammingError(m)
103 cl = repo.changelog
106 cl = repo.changelog
107 missing = None
108 common = None
104 if ancestorsof is None:
109 if ancestorsof is None:
105 ancestorsof = cl.heads()
110 ancestorsof = cl.heads()
106 if missingroots:
111 if missingroots:
107 # TODO remove call to nodesbetween.
112 # TODO remove call to nodesbetween.
108 # TODO populate attributes on outgoing instance instead of setting
113 missing_rev = repo.revs('%ln::%ln', missingroots, ancestorsof)
109 # discbases.
114 unfi = repo.unfiltered()
110 csets, roots, heads = cl.nodesbetween(missingroots, ancestorsof)
115 ucl = unfi.changelog
111 included = set(csets)
116 to_node = ucl.node
112 discbases = []
117 ancestorsof = [to_node(r) for r in ucl.headrevs(missing_rev)]
113 for n in csets:
118 parent_revs = ucl.parentrevs
114 discbases.extend([p for p in cl.parents(n) if p != repo.nullid])
119 common_legs = set()
115 ancestorsof = heads
120 for r in missing_rev:
116 commonheads = [n for n in discbases if n not in included]
121 p1, p2 = parent_revs(r)
122 if p1 not in missing_rev:
123 common_legs.add(p1)
124 if p2 not in missing_rev:
125 common_legs.add(p2)
126 common_legs.discard(nodemod.nullrev)
127 if not common_legs:
128 commonheads = [repo.nullid]
129 common = set()
130 else:
131 commonheads_revs = unfi.revs(
132 'heads(%ld::%ld)',
133 common_legs,
134 common_legs,
135 )
136 commonheads = [to_node(r) for r in commonheads_revs]
137 common = ucl.ancestors(commonheads_revs, inclusive=True)
138 missing = [to_node(r) for r in missing_rev]
117 elif not commonheads:
139 elif not commonheads:
118 commonheads = [repo.nullid]
140 commonheads = [repo.nullid]
119 self.commonheads = commonheads
141 self.commonheads = commonheads
120 self.ancestorsof = ancestorsof
142 self.ancestorsof = ancestorsof
121 self._revlog = cl
143 self._revlog = cl
122 self._common = None
144 self._common = common
123 self._missing = None
145 self._missing = missing
124 self.excluded = []
146 self.excluded = []
125
147
126 def _computecommonmissing(self):
148 def _computecommonmissing(self):
127 sets = self._revlog.findcommonmissing(
149 sets = self._revlog.findcommonmissing(
128 self.commonheads, self.ancestorsof
150 self.commonheads, self.ancestorsof
129 )
151 )
130 self._common, self._missing = sets
152 self._common, self._missing = sets
131
153
132 @util.propertycache
154 @util.propertycache
133 def common(self):
155 def common(self):
134 if self._common is None:
156 if self._common is None:
135 self._computecommonmissing()
157 self._computecommonmissing()
136 return self._common
158 return self._common
137
159
138 @util.propertycache
160 @util.propertycache
139 def missing(self):
161 def missing(self):
140 if self._missing is None:
162 if self._missing is None:
141 self._computecommonmissing()
163 self._computecommonmissing()
142 return self._missing
164 return self._missing
143
165
144
166
145 def findcommonoutgoing(
167 def findcommonoutgoing(
146 repo, other, onlyheads=None, force=False, commoninc=None, portable=False
168 repo, other, onlyheads=None, force=False, commoninc=None, portable=False
147 ):
169 ):
148 """Return an outgoing instance to identify the nodes present in repo but
170 """Return an outgoing instance to identify the nodes present in repo but
149 not in other.
171 not in other.
150
172
151 If onlyheads is given, only nodes ancestral to nodes in onlyheads
173 If onlyheads is given, only nodes ancestral to nodes in onlyheads
152 (inclusive) are included. If you already know the local repo's heads,
174 (inclusive) are included. If you already know the local repo's heads,
153 passing them in onlyheads is faster than letting them be recomputed here.
175 passing them in onlyheads is faster than letting them be recomputed here.
154
176
155 If commoninc is given, it must be the result of a prior call to
177 If commoninc is given, it must be the result of a prior call to
156 findcommonincoming(repo, other, force) to avoid recomputing it here.
178 findcommonincoming(repo, other, force) to avoid recomputing it here.
157
179
158 If portable is given, compute more conservative common and ancestorsof,
180 If portable is given, compute more conservative common and ancestorsof,
159 to make bundles created from the instance more portable."""
181 to make bundles created from the instance more portable."""
160 # declare an empty outgoing object to be filled later
182 # declare an empty outgoing object to be filled later
161 og = outgoing(repo, None, None)
183 og = outgoing(repo, None, None)
162
184
163 # get common set if not provided
185 # get common set if not provided
164 if commoninc is None:
186 if commoninc is None:
165 commoninc = findcommonincoming(
187 commoninc = findcommonincoming(
166 repo, other, force=force, ancestorsof=onlyheads
188 repo, other, force=force, ancestorsof=onlyheads
167 )
189 )
168 og.commonheads, _any, _hds = commoninc
190 og.commonheads, _any, _hds = commoninc
169
191
170 # compute outgoing
192 # compute outgoing
171 mayexclude = phases.hassecret(repo) or repo.obsstore
193 mayexclude = phases.hassecret(repo) or repo.obsstore
172 if not mayexclude:
194 if not mayexclude:
173 og.ancestorsof = onlyheads or repo.heads()
195 og.ancestorsof = onlyheads or repo.heads()
174 elif onlyheads is None:
196 elif onlyheads is None:
175 # use visible heads as it should be cached
197 # use visible heads as it should be cached
176 og.ancestorsof = repo.filtered(b"served").heads()
198 og.ancestorsof = repo.filtered(b"served").heads()
177 og.excluded = [ctx.node() for ctx in repo.set(b'secret() or extinct()')]
199 og.excluded = [ctx.node() for ctx in repo.set(b'secret() or extinct()')]
178 else:
200 else:
179 # compute common, missing and exclude secret stuff
201 # compute common, missing and exclude secret stuff
180 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
202 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
181 og._common, allmissing = sets
203 og._common, allmissing = sets
182 og._missing = missing = []
204 og._missing = missing = []
183 og.excluded = excluded = []
205 og.excluded = excluded = []
184 for node in allmissing:
206 for node in allmissing:
185 ctx = repo[node]
207 ctx = repo[node]
186 if ctx.phase() >= phases.secret or ctx.extinct():
208 if ctx.phase() >= phases.secret or ctx.extinct():
187 excluded.append(node)
209 excluded.append(node)
188 else:
210 else:
189 missing.append(node)
211 missing.append(node)
190 if len(missing) == len(allmissing):
212 if len(missing) == len(allmissing):
191 ancestorsof = onlyheads
213 ancestorsof = onlyheads
192 else: # update missing heads
214 else: # update missing heads
193 to_rev = repo.changelog.index.rev
215 to_rev = repo.changelog.index.rev
194 to_node = repo.changelog.node
216 to_node = repo.changelog.node
195 excluded_revs = [to_rev(r) for r in excluded]
217 excluded_revs = [to_rev(r) for r in excluded]
196 onlyheads_revs = [to_rev(r) for r in onlyheads]
218 onlyheads_revs = [to_rev(r) for r in onlyheads]
197 new_heads = phases.new_heads(repo, onlyheads_revs, excluded_revs)
219 new_heads = phases.new_heads(repo, onlyheads_revs, excluded_revs)
198 ancestorsof = [to_node(r) for r in new_heads]
220 ancestorsof = [to_node(r) for r in new_heads]
199 og.ancestorsof = ancestorsof
221 og.ancestorsof = ancestorsof
200 if portable:
222 if portable:
201 # recompute common and ancestorsof as if -r<rev> had been given for
223 # recompute common and ancestorsof as if -r<rev> had been given for
202 # each head of missing, and --base <rev> for each head of the proper
224 # each head of missing, and --base <rev> for each head of the proper
203 # ancestors of missing
225 # ancestors of missing
204 og._computecommonmissing()
226 og._computecommonmissing()
205 cl = repo.changelog
227 cl = repo.changelog
206 missingrevs = {cl.rev(n) for n in og._missing}
228 missingrevs = {cl.rev(n) for n in og._missing}
207 og._common = set(cl.ancestors(missingrevs)) - missingrevs
229 og._common = set(cl.ancestors(missingrevs)) - missingrevs
208 commonheads = set(og.commonheads)
230 commonheads = set(og.commonheads)
209 og.ancestorsof = [h for h in og.ancestorsof if h not in commonheads]
231 og.ancestorsof = [h for h in og.ancestorsof if h not in commonheads]
210
232
211 return og
233 return og
212
234
213
235
214 def _headssummary(pushop):
236 def _headssummary(pushop):
215 """compute a summary of branch and heads status before and after push
237 """compute a summary of branch and heads status before and after push
216
238
217 return {'branch': ([remoteheads], [newheads],
239 return {'branch': ([remoteheads], [newheads],
218 [unsyncedheads], [discardedheads])} mapping
240 [unsyncedheads], [discardedheads])} mapping
219
241
220 - branch: the branch name,
242 - branch: the branch name,
221 - remoteheads: the list of remote heads known locally
243 - remoteheads: the list of remote heads known locally
222 None if the branch is new,
244 None if the branch is new,
223 - newheads: the new remote heads (known locally) with outgoing pushed,
245 - newheads: the new remote heads (known locally) with outgoing pushed,
224 - unsyncedheads: the list of remote heads unknown locally,
246 - unsyncedheads: the list of remote heads unknown locally,
225 - discardedheads: the list of heads made obsolete by the push.
247 - discardedheads: the list of heads made obsolete by the push.
226 """
248 """
227 repo = pushop.repo.unfiltered()
249 repo = pushop.repo.unfiltered()
228 remote = pushop.remote
250 remote = pushop.remote
229 outgoing = pushop.outgoing
251 outgoing = pushop.outgoing
230 cl = repo.changelog
252 cl = repo.changelog
231 headssum = {}
253 headssum = {}
232 missingctx = set()
254 missingctx = set()
233 # A. Create set of branches involved in the push.
255 # A. Create set of branches involved in the push.
234 branches = set()
256 branches = set()
235 for n in outgoing.missing:
257 for n in outgoing.missing:
236 ctx = repo[n]
258 ctx = repo[n]
237 missingctx.add(ctx)
259 missingctx.add(ctx)
238 branches.add(ctx.branch())
260 branches.add(ctx.branch())
239
261
240 with remote.commandexecutor() as e:
262 with remote.commandexecutor() as e:
241 remotemap = e.callcommand(b'branchmap', {}).result()
263 remotemap = e.callcommand(b'branchmap', {}).result()
242
264
243 knownnode = cl.hasnode # do not use nodemap until it is filtered
265 knownnode = cl.hasnode # do not use nodemap until it is filtered
244 # A. register remote heads of branches which are in outgoing set
266 # A. register remote heads of branches which are in outgoing set
245 for branch, heads in remotemap.items():
267 for branch, heads in remotemap.items():
246 # don't add head info about branches which we don't have locally
268 # don't add head info about branches which we don't have locally
247 if branch not in branches:
269 if branch not in branches:
248 continue
270 continue
249 known = []
271 known = []
250 unsynced = []
272 unsynced = []
251 for h in heads:
273 for h in heads:
252 if knownnode(h):
274 if knownnode(h):
253 known.append(h)
275 known.append(h)
254 else:
276 else:
255 unsynced.append(h)
277 unsynced.append(h)
256 headssum[branch] = (known, list(known), unsynced)
278 headssum[branch] = (known, list(known), unsynced)
257
279
258 # B. add new branch data
280 # B. add new branch data
259 for branch in branches:
281 for branch in branches:
260 if branch not in headssum:
282 if branch not in headssum:
261 headssum[branch] = (None, [], [])
283 headssum[branch] = (None, [], [])
262
284
263 # C. Update newmap with outgoing changes.
285 # C. Update newmap with outgoing changes.
264 # This will possibly add new heads and remove existing ones.
286 # This will possibly add new heads and remove existing ones.
265 newmap = branchmap.remotebranchcache(
287 newmap = branchmap.remotebranchcache(
266 repo,
288 repo,
267 (
289 (
268 (branch, heads[1])
290 (branch, heads[1])
269 for branch, heads in headssum.items()
291 for branch, heads in headssum.items()
270 if heads[0] is not None
292 if heads[0] is not None
271 ),
293 ),
272 )
294 )
273 newmap.update(repo, (ctx.rev() for ctx in missingctx))
295 newmap.update(repo, (ctx.rev() for ctx in missingctx))
274 for branch, newheads in newmap.items():
296 for branch, newheads in newmap.items():
275 headssum[branch][1][:] = newheads
297 headssum[branch][1][:] = newheads
276 for branch, items in headssum.items():
298 for branch, items in headssum.items():
277 for l in items:
299 for l in items:
278 if l is not None:
300 if l is not None:
279 l.sort()
301 l.sort()
280 headssum[branch] = items + ([],)
302 headssum[branch] = items + ([],)
281
303
282 # If there are no obsstore, no post processing are needed.
304 # If there are no obsstore, no post processing are needed.
283 if repo.obsstore:
305 if repo.obsstore:
284 torev = repo.changelog.rev
306 torev = repo.changelog.rev
285 futureheads = {torev(h) for h in outgoing.ancestorsof}
307 futureheads = {torev(h) for h in outgoing.ancestorsof}
286 futureheads |= {torev(h) for h in outgoing.commonheads}
308 futureheads |= {torev(h) for h in outgoing.commonheads}
287 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
309 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
288 for branch, heads in sorted(pycompat.iteritems(headssum)):
310 for branch, heads in sorted(pycompat.iteritems(headssum)):
289 remoteheads, newheads, unsyncedheads, placeholder = heads
311 remoteheads, newheads, unsyncedheads, placeholder = heads
290 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
312 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
291 headssum[branch] = (
313 headssum[branch] = (
292 remoteheads,
314 remoteheads,
293 sorted(result[0]),
315 sorted(result[0]),
294 unsyncedheads,
316 unsyncedheads,
295 sorted(result[1]),
317 sorted(result[1]),
296 )
318 )
297 return headssum
319 return headssum
298
320
299
321
300 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
322 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
301 """Compute branchmapsummary for repo without branchmap support"""
323 """Compute branchmapsummary for repo without branchmap support"""
302
324
303 # 1-4b. old servers: Check for new topological heads.
325 # 1-4b. old servers: Check for new topological heads.
304 # Construct {old,new}map with branch = None (topological branch).
326 # Construct {old,new}map with branch = None (topological branch).
305 # (code based on update)
327 # (code based on update)
306 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
328 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
307 oldheads = sorted(h for h in remoteheads if knownnode(h))
329 oldheads = sorted(h for h in remoteheads if knownnode(h))
308 # all nodes in outgoing.missing are children of either:
330 # all nodes in outgoing.missing are children of either:
309 # - an element of oldheads
331 # - an element of oldheads
310 # - another element of outgoing.missing
332 # - another element of outgoing.missing
311 # - nullrev
333 # - nullrev
312 # This explains why the new head are very simple to compute.
334 # This explains why the new head are very simple to compute.
313 r = repo.set(b'heads(%ln + %ln)', oldheads, outgoing.missing)
335 r = repo.set(b'heads(%ln + %ln)', oldheads, outgoing.missing)
314 newheads = sorted(c.node() for c in r)
336 newheads = sorted(c.node() for c in r)
315 # set some unsynced head to issue the "unsynced changes" warning
337 # set some unsynced head to issue the "unsynced changes" warning
316 if inc:
338 if inc:
317 unsynced = [None]
339 unsynced = [None]
318 else:
340 else:
319 unsynced = []
341 unsynced = []
320 return {None: (oldheads, newheads, unsynced, [])}
342 return {None: (oldheads, newheads, unsynced, [])}
321
343
322
344
323 def _nowarnheads(pushop):
345 def _nowarnheads(pushop):
324 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
346 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
325 repo = pushop.repo.unfiltered()
347 repo = pushop.repo.unfiltered()
326 remote = pushop.remote
348 remote = pushop.remote
327 localbookmarks = repo._bookmarks
349 localbookmarks = repo._bookmarks
328
350
329 with remote.commandexecutor() as e:
351 with remote.commandexecutor() as e:
330 remotebookmarks = e.callcommand(
352 remotebookmarks = e.callcommand(
331 b'listkeys',
353 b'listkeys',
332 {
354 {
333 b'namespace': b'bookmarks',
355 b'namespace': b'bookmarks',
334 },
356 },
335 ).result()
357 ).result()
336
358
337 bookmarkedheads = set()
359 bookmarkedheads = set()
338
360
339 # internal config: bookmarks.pushing
361 # internal config: bookmarks.pushing
340 newbookmarks = [
362 newbookmarks = [
341 localbookmarks.expandname(b)
363 localbookmarks.expandname(b)
342 for b in pushop.ui.configlist(b'bookmarks', b'pushing')
364 for b in pushop.ui.configlist(b'bookmarks', b'pushing')
343 ]
365 ]
344
366
345 for bm in localbookmarks:
367 for bm in localbookmarks:
346 rnode = remotebookmarks.get(bm)
368 rnode = remotebookmarks.get(bm)
347 if rnode and rnode in repo:
369 if rnode and rnode in repo:
348 lctx, rctx = repo[localbookmarks[bm]], repo[rnode]
370 lctx, rctx = repo[localbookmarks[bm]], repo[rnode]
349 if bookmarks.validdest(repo, rctx, lctx):
371 if bookmarks.validdest(repo, rctx, lctx):
350 bookmarkedheads.add(lctx.node())
372 bookmarkedheads.add(lctx.node())
351 else:
373 else:
352 if bm in newbookmarks and bm not in remotebookmarks:
374 if bm in newbookmarks and bm not in remotebookmarks:
353 bookmarkedheads.add(localbookmarks[bm])
375 bookmarkedheads.add(localbookmarks[bm])
354
376
355 return bookmarkedheads
377 return bookmarkedheads
356
378
357
379
358 def checkheads(pushop):
380 def checkheads(pushop):
359 """Check that a push won't add any outgoing head
381 """Check that a push won't add any outgoing head
360
382
361 raise StateError error and display ui message as needed.
383 raise StateError error and display ui message as needed.
362 """
384 """
363
385
364 repo = pushop.repo.unfiltered()
386 repo = pushop.repo.unfiltered()
365 remote = pushop.remote
387 remote = pushop.remote
366 outgoing = pushop.outgoing
388 outgoing = pushop.outgoing
367 remoteheads = pushop.remoteheads
389 remoteheads = pushop.remoteheads
368 newbranch = pushop.newbranch
390 newbranch = pushop.newbranch
369 inc = bool(pushop.incoming)
391 inc = bool(pushop.incoming)
370
392
371 # Check for each named branch if we're creating new remote heads.
393 # Check for each named branch if we're creating new remote heads.
372 # To be a remote head after push, node must be either:
394 # To be a remote head after push, node must be either:
373 # - unknown locally
395 # - unknown locally
374 # - a local outgoing head descended from update
396 # - a local outgoing head descended from update
375 # - a remote head that's known locally and not
397 # - a remote head that's known locally and not
376 # ancestral to an outgoing head
398 # ancestral to an outgoing head
377 if remoteheads == [repo.nullid]:
399 if remoteheads == [repo.nullid]:
378 # remote is empty, nothing to check.
400 # remote is empty, nothing to check.
379 return
401 return
380
402
381 if remote.capable(b'branchmap'):
403 if remote.capable(b'branchmap'):
382 headssum = _headssummary(pushop)
404 headssum = _headssummary(pushop)
383 else:
405 else:
384 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
406 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
385 pushop.pushbranchmap = headssum
407 pushop.pushbranchmap = headssum
386 newbranches = [
408 newbranches = [
387 branch for branch, heads in headssum.items() if heads[0] is None
409 branch for branch, heads in headssum.items() if heads[0] is None
388 ]
410 ]
389 # 1. Check for new branches on the remote.
411 # 1. Check for new branches on the remote.
390 if newbranches and not newbranch: # new branch requires --new-branch
412 if newbranches and not newbranch: # new branch requires --new-branch
391 branchnames = b', '.join(sorted(newbranches))
413 branchnames = b', '.join(sorted(newbranches))
392 # Calculate how many of the new branches are closed branches
414 # Calculate how many of the new branches are closed branches
393 closedbranches = set()
415 closedbranches = set()
394 for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
416 for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
395 if isclosed:
417 if isclosed:
396 closedbranches.add(tag)
418 closedbranches.add(tag)
397 closedbranches = closedbranches & set(newbranches)
419 closedbranches = closedbranches & set(newbranches)
398 if closedbranches:
420 if closedbranches:
399 errmsg = _(b"push creates new remote branches: %s (%d closed)") % (
421 errmsg = _(b"push creates new remote branches: %s (%d closed)") % (
400 branchnames,
422 branchnames,
401 len(closedbranches),
423 len(closedbranches),
402 )
424 )
403 else:
425 else:
404 errmsg = _(b"push creates new remote branches: %s") % branchnames
426 errmsg = _(b"push creates new remote branches: %s") % branchnames
405 hint = _(b"use 'hg push --new-branch' to create new remote branches")
427 hint = _(b"use 'hg push --new-branch' to create new remote branches")
406 raise error.StateError(errmsg, hint=hint)
428 raise error.StateError(errmsg, hint=hint)
407
429
408 # 2. Find heads that we need not warn about
430 # 2. Find heads that we need not warn about
409 nowarnheads = _nowarnheads(pushop)
431 nowarnheads = _nowarnheads(pushop)
410
432
411 # 3. Check for new heads.
433 # 3. Check for new heads.
412 # If there are more heads after the push than before, a suitable
434 # If there are more heads after the push than before, a suitable
413 # error message, depending on unsynced status, is displayed.
435 # error message, depending on unsynced status, is displayed.
414 errormsg = None
436 errormsg = None
415 for branch, heads in sorted(pycompat.iteritems(headssum)):
437 for branch, heads in sorted(pycompat.iteritems(headssum)):
416 remoteheads, newheads, unsyncedheads, discardedheads = heads
438 remoteheads, newheads, unsyncedheads, discardedheads = heads
417 # add unsynced data
439 # add unsynced data
418 if remoteheads is None:
440 if remoteheads is None:
419 oldhs = set()
441 oldhs = set()
420 else:
442 else:
421 oldhs = set(remoteheads)
443 oldhs = set(remoteheads)
422 oldhs.update(unsyncedheads)
444 oldhs.update(unsyncedheads)
423 dhs = None # delta heads, the new heads on branch
445 dhs = None # delta heads, the new heads on branch
424 newhs = set(newheads)
446 newhs = set(newheads)
425 newhs.update(unsyncedheads)
447 newhs.update(unsyncedheads)
426 if unsyncedheads:
448 if unsyncedheads:
427 if None in unsyncedheads:
449 if None in unsyncedheads:
428 # old remote, no heads data
450 # old remote, no heads data
429 heads = None
451 heads = None
430 else:
452 else:
431 heads = scmutil.nodesummaries(repo, unsyncedheads)
453 heads = scmutil.nodesummaries(repo, unsyncedheads)
432 if heads is None:
454 if heads is None:
433 repo.ui.status(
455 repo.ui.status(
434 _(b"remote has heads that are not known locally\n")
456 _(b"remote has heads that are not known locally\n")
435 )
457 )
436 elif branch is None:
458 elif branch is None:
437 repo.ui.status(
459 repo.ui.status(
438 _(b"remote has heads that are not known locally: %s\n")
460 _(b"remote has heads that are not known locally: %s\n")
439 % heads
461 % heads
440 )
462 )
441 else:
463 else:
442 repo.ui.status(
464 repo.ui.status(
443 _(
465 _(
444 b"remote has heads on branch '%s' that are "
466 b"remote has heads on branch '%s' that are "
445 b"not known locally: %s\n"
467 b"not known locally: %s\n"
446 )
468 )
447 % (branch, heads)
469 % (branch, heads)
448 )
470 )
449 if remoteheads is None:
471 if remoteheads is None:
450 if len(newhs) > 1:
472 if len(newhs) > 1:
451 dhs = list(newhs)
473 dhs = list(newhs)
452 if errormsg is None:
474 if errormsg is None:
453 errormsg = (
475 errormsg = (
454 _(b"push creates new branch '%s' with multiple heads")
476 _(b"push creates new branch '%s' with multiple heads")
455 % branch
477 % branch
456 )
478 )
457 hint = _(
479 hint = _(
458 b"merge or"
480 b"merge or"
459 b" see 'hg help push' for details about"
481 b" see 'hg help push' for details about"
460 b" pushing new heads"
482 b" pushing new heads"
461 )
483 )
462 elif len(newhs) > len(oldhs):
484 elif len(newhs) > len(oldhs):
463 # remove bookmarked or existing remote heads from the new heads list
485 # remove bookmarked or existing remote heads from the new heads list
464 dhs = sorted(newhs - nowarnheads - oldhs)
486 dhs = sorted(newhs - nowarnheads - oldhs)
465 if dhs:
487 if dhs:
466 if errormsg is None:
488 if errormsg is None:
467 if branch not in (b'default', None):
489 if branch not in (b'default', None):
468 errormsg = _(
490 errormsg = _(
469 b"push creates new remote head %s on branch '%s'"
491 b"push creates new remote head %s on branch '%s'"
470 ) % (
492 ) % (
471 short(dhs[0]),
493 short(dhs[0]),
472 branch,
494 branch,
473 )
495 )
474 elif repo[dhs[0]].bookmarks():
496 elif repo[dhs[0]].bookmarks():
475 errormsg = _(
497 errormsg = _(
476 b"push creates new remote head %s "
498 b"push creates new remote head %s "
477 b"with bookmark '%s'"
499 b"with bookmark '%s'"
478 ) % (short(dhs[0]), repo[dhs[0]].bookmarks()[0])
500 ) % (short(dhs[0]), repo[dhs[0]].bookmarks()[0])
479 else:
501 else:
480 errormsg = _(b"push creates new remote head %s") % short(
502 errormsg = _(b"push creates new remote head %s") % short(
481 dhs[0]
503 dhs[0]
482 )
504 )
483 if unsyncedheads:
505 if unsyncedheads:
484 hint = _(
506 hint = _(
485 b"pull and merge or"
507 b"pull and merge or"
486 b" see 'hg help push' for details about"
508 b" see 'hg help push' for details about"
487 b" pushing new heads"
509 b" pushing new heads"
488 )
510 )
489 else:
511 else:
490 hint = _(
512 hint = _(
491 b"merge or"
513 b"merge or"
492 b" see 'hg help push' for details about"
514 b" see 'hg help push' for details about"
493 b" pushing new heads"
515 b" pushing new heads"
494 )
516 )
495 if branch is None:
517 if branch is None:
496 repo.ui.note(_(b"new remote heads:\n"))
518 repo.ui.note(_(b"new remote heads:\n"))
497 else:
519 else:
498 repo.ui.note(_(b"new remote heads on branch '%s':\n") % branch)
520 repo.ui.note(_(b"new remote heads on branch '%s':\n") % branch)
499 for h in dhs:
521 for h in dhs:
500 repo.ui.note(b" %s\n" % short(h))
522 repo.ui.note(b" %s\n" % short(h))
501 if errormsg:
523 if errormsg:
502 raise error.StateError(errormsg, hint=hint)
524 raise error.StateError(errormsg, hint=hint)
503
525
504
526
505 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
527 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
506 """post process the list of new heads with obsolescence information
528 """post process the list of new heads with obsolescence information
507
529
508 Exists as a sub-function to contain the complexity and allow extensions to
530 Exists as a sub-function to contain the complexity and allow extensions to
509 experiment with smarter logic.
531 experiment with smarter logic.
510
532
511 Returns (newheads, discarded_heads) tuple
533 Returns (newheads, discarded_heads) tuple
512 """
534 """
513 # known issue
535 # known issue
514 #
536 #
515 # * We "silently" skip processing on all changeset unknown locally
537 # * We "silently" skip processing on all changeset unknown locally
516 #
538 #
517 # * if <nh> is public on the remote, it won't be affected by obsolete
539 # * if <nh> is public on the remote, it won't be affected by obsolete
518 # marker and a new is created
540 # marker and a new is created
519
541
520 # define various utilities and containers
542 # define various utilities and containers
521 repo = pushop.repo
543 repo = pushop.repo
522 unfi = repo.unfiltered()
544 unfi = repo.unfiltered()
523 torev = unfi.changelog.index.get_rev
545 torev = unfi.changelog.index.get_rev
524 public = phases.public
546 public = phases.public
525 getphase = unfi._phasecache.phase
547 getphase = unfi._phasecache.phase
526 ispublic = lambda r: getphase(unfi, r) == public
548 ispublic = lambda r: getphase(unfi, r) == public
527 ispushed = lambda n: torev(n) in futurecommon
549 ispushed = lambda n: torev(n) in futurecommon
528 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
550 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
529 successorsmarkers = unfi.obsstore.successors
551 successorsmarkers = unfi.obsstore.successors
530 newhs = set() # final set of new heads
552 newhs = set() # final set of new heads
531 discarded = set() # new head of fully replaced branch
553 discarded = set() # new head of fully replaced branch
532
554
533 localcandidate = set() # candidate heads known locally
555 localcandidate = set() # candidate heads known locally
534 unknownheads = set() # candidate heads unknown locally
556 unknownheads = set() # candidate heads unknown locally
535 for h in candidate_newhs:
557 for h in candidate_newhs:
536 if h in unfi:
558 if h in unfi:
537 localcandidate.add(h)
559 localcandidate.add(h)
538 else:
560 else:
539 if successorsmarkers.get(h) is not None:
561 if successorsmarkers.get(h) is not None:
540 msg = (
562 msg = (
541 b'checkheads: remote head unknown locally has'
563 b'checkheads: remote head unknown locally has'
542 b' local marker: %s\n'
564 b' local marker: %s\n'
543 )
565 )
544 repo.ui.debug(msg % hex(h))
566 repo.ui.debug(msg % hex(h))
545 unknownheads.add(h)
567 unknownheads.add(h)
546
568
547 # fast path the simple case
569 # fast path the simple case
548 if len(localcandidate) == 1:
570 if len(localcandidate) == 1:
549 return unknownheads | set(candidate_newhs), set()
571 return unknownheads | set(candidate_newhs), set()
550
572
551 obsrevs = obsolete.getrevs(unfi, b'obsolete')
573 obsrevs = obsolete.getrevs(unfi, b'obsolete')
552 futurenonobsolete = frozenset(futurecommon) - obsrevs
574 futurenonobsolete = frozenset(futurecommon) - obsrevs
553
575
554 # actually process branch replacement
576 # actually process branch replacement
555 while localcandidate:
577 while localcandidate:
556 nh = localcandidate.pop()
578 nh = localcandidate.pop()
557 r = torev(nh)
579 r = torev(nh)
558 current_branch = unfi[nh].branch()
580 current_branch = unfi[nh].branch()
559 # run this check early to skip the evaluation of the whole branch
581 # run this check early to skip the evaluation of the whole branch
560 if ispublic(r) or r not in obsrevs:
582 if ispublic(r) or r not in obsrevs:
561 newhs.add(nh)
583 newhs.add(nh)
562 continue
584 continue
563
585
564 # Get all revs/nodes on the branch exclusive to this head
586 # Get all revs/nodes on the branch exclusive to this head
565 # (already filtered heads are "ignored"))
587 # (already filtered heads are "ignored"))
566 branchrevs = unfi.revs(
588 branchrevs = unfi.revs(
567 b'only(%n, (%ln+%ln))', nh, localcandidate, newhs
589 b'only(%n, (%ln+%ln))', nh, localcandidate, newhs
568 )
590 )
569
591
570 branchnodes = []
592 branchnodes = []
571 for r in branchrevs:
593 for r in branchrevs:
572 c = unfi[r]
594 c = unfi[r]
573 if c.branch() == current_branch:
595 if c.branch() == current_branch:
574 branchnodes.append(c.node())
596 branchnodes.append(c.node())
575
597
576 # The branch won't be hidden on the remote if
598 # The branch won't be hidden on the remote if
577 # * any part of it is public,
599 # * any part of it is public,
578 # * any part of it is considered part of the result by previous logic,
600 # * any part of it is considered part of the result by previous logic,
579 # * if we have no markers to push to obsolete it.
601 # * if we have no markers to push to obsolete it.
580 if (
602 if (
581 any(ispublic(r) for r in branchrevs)
603 any(ispublic(r) for r in branchrevs)
582 or any(torev(n) in futurenonobsolete for n in branchnodes)
604 or any(torev(n) in futurenonobsolete for n in branchnodes)
583 or any(not hasoutmarker(n) for n in branchnodes)
605 or any(not hasoutmarker(n) for n in branchnodes)
584 ):
606 ):
585 newhs.add(nh)
607 newhs.add(nh)
586 else:
608 else:
587 # note: there is a corner case if there is a merge in the branch.
609 # note: there is a corner case if there is a merge in the branch.
588 # we might end up with -more- heads. However, these heads are not
610 # we might end up with -more- heads. However, these heads are not
589 # "added" by the push, but more by the "removal" on the remote so I
611 # "added" by the push, but more by the "removal" on the remote so I
590 # think is a okay to ignore them,
612 # think is a okay to ignore them,
591 discarded.add(nh)
613 discarded.add(nh)
592 newhs |= unknownheads
614 newhs |= unknownheads
593 return newhs, discarded
615 return newhs, discarded
594
616
595
617
596 def pushingmarkerfor(obsstore, ispushed, node):
618 def pushingmarkerfor(obsstore, ispushed, node):
597 """true if some markers are to be pushed for node
619 """true if some markers are to be pushed for node
598
620
599 We cannot just look in to the pushed obsmarkers from the pushop because
621 We cannot just look in to the pushed obsmarkers from the pushop because
600 discovery might have filtered relevant markers. In addition listing all
622 discovery might have filtered relevant markers. In addition listing all
601 markers relevant to all changesets in the pushed set would be too expensive
623 markers relevant to all changesets in the pushed set would be too expensive
602 (O(len(repo)))
624 (O(len(repo)))
603
625
604 (note: There are cache opportunity in this function. but it would requires
626 (note: There are cache opportunity in this function. but it would requires
605 a two dimensional stack.)
627 a two dimensional stack.)
606 """
628 """
607 successorsmarkers = obsstore.successors
629 successorsmarkers = obsstore.successors
608 stack = [node]
630 stack = [node]
609 seen = set(stack)
631 seen = set(stack)
610 while stack:
632 while stack:
611 current = stack.pop()
633 current = stack.pop()
612 if ispushed(current):
634 if ispushed(current):
613 return True
635 return True
614 markers = successorsmarkers.get(current, ())
636 markers = successorsmarkers.get(current, ())
615 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
637 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
616 for m in markers:
638 for m in markers:
617 nexts = m[1] # successors
639 nexts = m[1] # successors
618 if not nexts: # this is a prune marker
640 if not nexts: # this is a prune marker
619 nexts = m[5] or () # parents
641 nexts = m[5] or () # parents
620 for n in nexts:
642 for n in nexts:
621 if n not in seen:
643 if n not in seen:
622 seen.add(n)
644 seen.add(n)
623 stack.append(n)
645 stack.append(n)
624 return False
646 return False
General Comments 0
You need to be logged in to leave comments. Login now