##// END OF EJS Templates
index: use `index.get_rev` in `discovery._postprocessobsolete`...
marmoute -
r43960:65d67702 default
parent child Browse files
Show More
@@ -1,593 +1,593
1 # discovery.py - protocol changeset discovery functions
1 # discovery.py - protocol changeset discovery functions
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 hex,
14 hex,
15 nullid,
15 nullid,
16 short,
16 short,
17 )
17 )
18
18
19 from . import (
19 from . import (
20 bookmarks,
20 bookmarks,
21 branchmap,
21 branchmap,
22 error,
22 error,
23 phases,
23 phases,
24 pycompat,
24 pycompat,
25 scmutil,
25 scmutil,
26 setdiscovery,
26 setdiscovery,
27 treediscovery,
27 treediscovery,
28 util,
28 util,
29 )
29 )
30
30
31
31
32 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
32 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
33 """Return a tuple (common, anyincoming, heads) used to identify the common
33 """Return a tuple (common, anyincoming, heads) used to identify the common
34 subset of nodes between repo and remote.
34 subset of nodes between repo and remote.
35
35
36 "common" is a list of (at least) the heads of the common subset.
36 "common" is a list of (at least) the heads of the common subset.
37 "anyincoming" is testable as a boolean indicating if any nodes are missing
37 "anyincoming" is testable as a boolean indicating if any nodes are missing
38 locally. If remote does not support getbundle, this actually is a list of
38 locally. If remote does not support getbundle, this actually is a list of
39 roots of the nodes that would be incoming, to be supplied to
39 roots of the nodes that would be incoming, to be supplied to
40 changegroupsubset. No code except for pull should be relying on this fact
40 changegroupsubset. No code except for pull should be relying on this fact
41 any longer.
41 any longer.
42 "heads" is either the supplied heads, or else the remote's heads.
42 "heads" is either the supplied heads, or else the remote's heads.
43 "ancestorsof" if not None, restrict the discovery to a subset defined by
43 "ancestorsof" if not None, restrict the discovery to a subset defined by
44 these nodes. Changeset outside of this set won't be considered (and
44 these nodes. Changeset outside of this set won't be considered (and
45 won't appears in "common")
45 won't appears in "common")
46
46
47 If you pass heads and they are all known locally, the response lists just
47 If you pass heads and they are all known locally, the response lists just
48 these heads in "common" and in "heads".
48 these heads in "common" and in "heads".
49
49
50 Please use findcommonoutgoing to compute the set of outgoing nodes to give
50 Please use findcommonoutgoing to compute the set of outgoing nodes to give
51 extensions a good hook into outgoing.
51 extensions a good hook into outgoing.
52 """
52 """
53
53
54 if not remote.capable(b'getbundle'):
54 if not remote.capable(b'getbundle'):
55 return treediscovery.findcommonincoming(repo, remote, heads, force)
55 return treediscovery.findcommonincoming(repo, remote, heads, force)
56
56
57 if heads:
57 if heads:
58 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
58 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
59 if all(knownnode(h) for h in heads):
59 if all(knownnode(h) for h in heads):
60 return (heads, False, heads)
60 return (heads, False, heads)
61
61
62 res = setdiscovery.findcommonheads(
62 res = setdiscovery.findcommonheads(
63 repo.ui,
63 repo.ui,
64 repo,
64 repo,
65 remote,
65 remote,
66 abortwhenunrelated=not force,
66 abortwhenunrelated=not force,
67 ancestorsof=ancestorsof,
67 ancestorsof=ancestorsof,
68 )
68 )
69 common, anyinc, srvheads = res
69 common, anyinc, srvheads = res
70 return (list(common), anyinc, heads or list(srvheads))
70 return (list(common), anyinc, heads or list(srvheads))
71
71
72
72
73 class outgoing(object):
73 class outgoing(object):
74 '''Represents the set of nodes present in a local repo but not in a
74 '''Represents the set of nodes present in a local repo but not in a
75 (possibly) remote one.
75 (possibly) remote one.
76
76
77 Members:
77 Members:
78
78
79 missing is a list of all nodes present in local but not in remote.
79 missing is a list of all nodes present in local but not in remote.
80 common is a list of all nodes shared between the two repos.
80 common is a list of all nodes shared between the two repos.
81 excluded is the list of missing changeset that shouldn't be sent remotely.
81 excluded is the list of missing changeset that shouldn't be sent remotely.
82 missingheads is the list of heads of missing.
82 missingheads is the list of heads of missing.
83 commonheads is the list of heads of common.
83 commonheads is the list of heads of common.
84
84
85 The sets are computed on demand from the heads, unless provided upfront
85 The sets are computed on demand from the heads, unless provided upfront
86 by discovery.'''
86 by discovery.'''
87
87
88 def __init__(
88 def __init__(
89 self, repo, commonheads=None, missingheads=None, missingroots=None
89 self, repo, commonheads=None, missingheads=None, missingroots=None
90 ):
90 ):
91 # at least one of them must not be set
91 # at least one of them must not be set
92 assert None in (commonheads, missingroots)
92 assert None in (commonheads, missingroots)
93 cl = repo.changelog
93 cl = repo.changelog
94 if missingheads is None:
94 if missingheads is None:
95 missingheads = cl.heads()
95 missingheads = cl.heads()
96 if missingroots:
96 if missingroots:
97 discbases = []
97 discbases = []
98 for n in missingroots:
98 for n in missingroots:
99 discbases.extend([p for p in cl.parents(n) if p != nullid])
99 discbases.extend([p for p in cl.parents(n) if p != nullid])
100 # TODO remove call to nodesbetween.
100 # TODO remove call to nodesbetween.
101 # TODO populate attributes on outgoing instance instead of setting
101 # TODO populate attributes on outgoing instance instead of setting
102 # discbases.
102 # discbases.
103 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
103 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
104 included = set(csets)
104 included = set(csets)
105 missingheads = heads
105 missingheads = heads
106 commonheads = [n for n in discbases if n not in included]
106 commonheads = [n for n in discbases if n not in included]
107 elif not commonheads:
107 elif not commonheads:
108 commonheads = [nullid]
108 commonheads = [nullid]
109 self.commonheads = commonheads
109 self.commonheads = commonheads
110 self.missingheads = missingheads
110 self.missingheads = missingheads
111 self._revlog = cl
111 self._revlog = cl
112 self._common = None
112 self._common = None
113 self._missing = None
113 self._missing = None
114 self.excluded = []
114 self.excluded = []
115
115
116 def _computecommonmissing(self):
116 def _computecommonmissing(self):
117 sets = self._revlog.findcommonmissing(
117 sets = self._revlog.findcommonmissing(
118 self.commonheads, self.missingheads
118 self.commonheads, self.missingheads
119 )
119 )
120 self._common, self._missing = sets
120 self._common, self._missing = sets
121
121
122 @util.propertycache
122 @util.propertycache
123 def common(self):
123 def common(self):
124 if self._common is None:
124 if self._common is None:
125 self._computecommonmissing()
125 self._computecommonmissing()
126 return self._common
126 return self._common
127
127
128 @util.propertycache
128 @util.propertycache
129 def missing(self):
129 def missing(self):
130 if self._missing is None:
130 if self._missing is None:
131 self._computecommonmissing()
131 self._computecommonmissing()
132 return self._missing
132 return self._missing
133
133
134
134
135 def findcommonoutgoing(
135 def findcommonoutgoing(
136 repo, other, onlyheads=None, force=False, commoninc=None, portable=False
136 repo, other, onlyheads=None, force=False, commoninc=None, portable=False
137 ):
137 ):
138 '''Return an outgoing instance to identify the nodes present in repo but
138 '''Return an outgoing instance to identify the nodes present in repo but
139 not in other.
139 not in other.
140
140
141 If onlyheads is given, only nodes ancestral to nodes in onlyheads
141 If onlyheads is given, only nodes ancestral to nodes in onlyheads
142 (inclusive) are included. If you already know the local repo's heads,
142 (inclusive) are included. If you already know the local repo's heads,
143 passing them in onlyheads is faster than letting them be recomputed here.
143 passing them in onlyheads is faster than letting them be recomputed here.
144
144
145 If commoninc is given, it must be the result of a prior call to
145 If commoninc is given, it must be the result of a prior call to
146 findcommonincoming(repo, other, force) to avoid recomputing it here.
146 findcommonincoming(repo, other, force) to avoid recomputing it here.
147
147
148 If portable is given, compute more conservative common and missingheads,
148 If portable is given, compute more conservative common and missingheads,
149 to make bundles created from the instance more portable.'''
149 to make bundles created from the instance more portable.'''
150 # declare an empty outgoing object to be filled later
150 # declare an empty outgoing object to be filled later
151 og = outgoing(repo, None, None)
151 og = outgoing(repo, None, None)
152
152
153 # get common set if not provided
153 # get common set if not provided
154 if commoninc is None:
154 if commoninc is None:
155 commoninc = findcommonincoming(
155 commoninc = findcommonincoming(
156 repo, other, force=force, ancestorsof=onlyheads
156 repo, other, force=force, ancestorsof=onlyheads
157 )
157 )
158 og.commonheads, _any, _hds = commoninc
158 og.commonheads, _any, _hds = commoninc
159
159
160 # compute outgoing
160 # compute outgoing
161 mayexclude = repo._phasecache.phaseroots[phases.secret] or repo.obsstore
161 mayexclude = repo._phasecache.phaseroots[phases.secret] or repo.obsstore
162 if not mayexclude:
162 if not mayexclude:
163 og.missingheads = onlyheads or repo.heads()
163 og.missingheads = onlyheads or repo.heads()
164 elif onlyheads is None:
164 elif onlyheads is None:
165 # use visible heads as it should be cached
165 # use visible heads as it should be cached
166 og.missingheads = repo.filtered(b"served").heads()
166 og.missingheads = repo.filtered(b"served").heads()
167 og.excluded = [ctx.node() for ctx in repo.set(b'secret() or extinct()')]
167 og.excluded = [ctx.node() for ctx in repo.set(b'secret() or extinct()')]
168 else:
168 else:
169 # compute common, missing and exclude secret stuff
169 # compute common, missing and exclude secret stuff
170 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
170 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
171 og._common, allmissing = sets
171 og._common, allmissing = sets
172 og._missing = missing = []
172 og._missing = missing = []
173 og.excluded = excluded = []
173 og.excluded = excluded = []
174 for node in allmissing:
174 for node in allmissing:
175 ctx = repo[node]
175 ctx = repo[node]
176 if ctx.phase() >= phases.secret or ctx.extinct():
176 if ctx.phase() >= phases.secret or ctx.extinct():
177 excluded.append(node)
177 excluded.append(node)
178 else:
178 else:
179 missing.append(node)
179 missing.append(node)
180 if len(missing) == len(allmissing):
180 if len(missing) == len(allmissing):
181 missingheads = onlyheads
181 missingheads = onlyheads
182 else: # update missing heads
182 else: # update missing heads
183 missingheads = phases.newheads(repo, onlyheads, excluded)
183 missingheads = phases.newheads(repo, onlyheads, excluded)
184 og.missingheads = missingheads
184 og.missingheads = missingheads
185 if portable:
185 if portable:
186 # recompute common and missingheads as if -r<rev> had been given for
186 # recompute common and missingheads as if -r<rev> had been given for
187 # each head of missing, and --base <rev> for each head of the proper
187 # each head of missing, and --base <rev> for each head of the proper
188 # ancestors of missing
188 # ancestors of missing
189 og._computecommonmissing()
189 og._computecommonmissing()
190 cl = repo.changelog
190 cl = repo.changelog
191 missingrevs = set(cl.rev(n) for n in og._missing)
191 missingrevs = set(cl.rev(n) for n in og._missing)
192 og._common = set(cl.ancestors(missingrevs)) - missingrevs
192 og._common = set(cl.ancestors(missingrevs)) - missingrevs
193 commonheads = set(og.commonheads)
193 commonheads = set(og.commonheads)
194 og.missingheads = [h for h in og.missingheads if h not in commonheads]
194 og.missingheads = [h for h in og.missingheads if h not in commonheads]
195
195
196 return og
196 return og
197
197
198
198
199 def _headssummary(pushop):
199 def _headssummary(pushop):
200 """compute a summary of branch and heads status before and after push
200 """compute a summary of branch and heads status before and after push
201
201
202 return {'branch': ([remoteheads], [newheads],
202 return {'branch': ([remoteheads], [newheads],
203 [unsyncedheads], [discardedheads])} mapping
203 [unsyncedheads], [discardedheads])} mapping
204
204
205 - branch: the branch name,
205 - branch: the branch name,
206 - remoteheads: the list of remote heads known locally
206 - remoteheads: the list of remote heads known locally
207 None if the branch is new,
207 None if the branch is new,
208 - newheads: the new remote heads (known locally) with outgoing pushed,
208 - newheads: the new remote heads (known locally) with outgoing pushed,
209 - unsyncedheads: the list of remote heads unknown locally,
209 - unsyncedheads: the list of remote heads unknown locally,
210 - discardedheads: the list of heads made obsolete by the push.
210 - discardedheads: the list of heads made obsolete by the push.
211 """
211 """
212 repo = pushop.repo.unfiltered()
212 repo = pushop.repo.unfiltered()
213 remote = pushop.remote
213 remote = pushop.remote
214 outgoing = pushop.outgoing
214 outgoing = pushop.outgoing
215 cl = repo.changelog
215 cl = repo.changelog
216 headssum = {}
216 headssum = {}
217 missingctx = set()
217 missingctx = set()
218 # A. Create set of branches involved in the push.
218 # A. Create set of branches involved in the push.
219 branches = set()
219 branches = set()
220 for n in outgoing.missing:
220 for n in outgoing.missing:
221 ctx = repo[n]
221 ctx = repo[n]
222 missingctx.add(ctx)
222 missingctx.add(ctx)
223 branches.add(ctx.branch())
223 branches.add(ctx.branch())
224
224
225 with remote.commandexecutor() as e:
225 with remote.commandexecutor() as e:
226 remotemap = e.callcommand(b'branchmap', {}).result()
226 remotemap = e.callcommand(b'branchmap', {}).result()
227
227
228 knownnode = cl.hasnode # do not use nodemap until it is filtered
228 knownnode = cl.hasnode # do not use nodemap until it is filtered
229 # A. register remote heads of branches which are in outgoing set
229 # A. register remote heads of branches which are in outgoing set
230 for branch, heads in pycompat.iteritems(remotemap):
230 for branch, heads in pycompat.iteritems(remotemap):
231 # don't add head info about branches which we don't have locally
231 # don't add head info about branches which we don't have locally
232 if branch not in branches:
232 if branch not in branches:
233 continue
233 continue
234 known = []
234 known = []
235 unsynced = []
235 unsynced = []
236 for h in heads:
236 for h in heads:
237 if knownnode(h):
237 if knownnode(h):
238 known.append(h)
238 known.append(h)
239 else:
239 else:
240 unsynced.append(h)
240 unsynced.append(h)
241 headssum[branch] = (known, list(known), unsynced)
241 headssum[branch] = (known, list(known), unsynced)
242
242
243 # B. add new branch data
243 # B. add new branch data
244 for branch in branches:
244 for branch in branches:
245 if branch not in headssum:
245 if branch not in headssum:
246 headssum[branch] = (None, [], [])
246 headssum[branch] = (None, [], [])
247
247
248 # C. Update newmap with outgoing changes.
248 # C. Update newmap with outgoing changes.
249 # This will possibly add new heads and remove existing ones.
249 # This will possibly add new heads and remove existing ones.
250 newmap = branchmap.remotebranchcache(
250 newmap = branchmap.remotebranchcache(
251 (branch, heads[1])
251 (branch, heads[1])
252 for branch, heads in pycompat.iteritems(headssum)
252 for branch, heads in pycompat.iteritems(headssum)
253 if heads[0] is not None
253 if heads[0] is not None
254 )
254 )
255 newmap.update(repo, (ctx.rev() for ctx in missingctx))
255 newmap.update(repo, (ctx.rev() for ctx in missingctx))
256 for branch, newheads in pycompat.iteritems(newmap):
256 for branch, newheads in pycompat.iteritems(newmap):
257 headssum[branch][1][:] = newheads
257 headssum[branch][1][:] = newheads
258 for branch, items in pycompat.iteritems(headssum):
258 for branch, items in pycompat.iteritems(headssum):
259 for l in items:
259 for l in items:
260 if l is not None:
260 if l is not None:
261 l.sort()
261 l.sort()
262 headssum[branch] = items + ([],)
262 headssum[branch] = items + ([],)
263
263
264 # If there are no obsstore, no post processing are needed.
264 # If there are no obsstore, no post processing are needed.
265 if repo.obsstore:
265 if repo.obsstore:
266 torev = repo.changelog.rev
266 torev = repo.changelog.rev
267 futureheads = set(torev(h) for h in outgoing.missingheads)
267 futureheads = set(torev(h) for h in outgoing.missingheads)
268 futureheads |= set(torev(h) for h in outgoing.commonheads)
268 futureheads |= set(torev(h) for h in outgoing.commonheads)
269 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
269 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
270 for branch, heads in sorted(pycompat.iteritems(headssum)):
270 for branch, heads in sorted(pycompat.iteritems(headssum)):
271 remoteheads, newheads, unsyncedheads, placeholder = heads
271 remoteheads, newheads, unsyncedheads, placeholder = heads
272 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
272 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
273 headssum[branch] = (
273 headssum[branch] = (
274 remoteheads,
274 remoteheads,
275 sorted(result[0]),
275 sorted(result[0]),
276 unsyncedheads,
276 unsyncedheads,
277 sorted(result[1]),
277 sorted(result[1]),
278 )
278 )
279 return headssum
279 return headssum
280
280
281
281
282 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
282 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
283 """Compute branchmapsummary for repo without branchmap support"""
283 """Compute branchmapsummary for repo without branchmap support"""
284
284
285 # 1-4b. old servers: Check for new topological heads.
285 # 1-4b. old servers: Check for new topological heads.
286 # Construct {old,new}map with branch = None (topological branch).
286 # Construct {old,new}map with branch = None (topological branch).
287 # (code based on update)
287 # (code based on update)
288 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
288 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
289 oldheads = sorted(h for h in remoteheads if knownnode(h))
289 oldheads = sorted(h for h in remoteheads if knownnode(h))
290 # all nodes in outgoing.missing are children of either:
290 # all nodes in outgoing.missing are children of either:
291 # - an element of oldheads
291 # - an element of oldheads
292 # - another element of outgoing.missing
292 # - another element of outgoing.missing
293 # - nullrev
293 # - nullrev
294 # This explains why the new head are very simple to compute.
294 # This explains why the new head are very simple to compute.
295 r = repo.set(b'heads(%ln + %ln)', oldheads, outgoing.missing)
295 r = repo.set(b'heads(%ln + %ln)', oldheads, outgoing.missing)
296 newheads = sorted(c.node() for c in r)
296 newheads = sorted(c.node() for c in r)
297 # set some unsynced head to issue the "unsynced changes" warning
297 # set some unsynced head to issue the "unsynced changes" warning
298 if inc:
298 if inc:
299 unsynced = [None]
299 unsynced = [None]
300 else:
300 else:
301 unsynced = []
301 unsynced = []
302 return {None: (oldheads, newheads, unsynced, [])}
302 return {None: (oldheads, newheads, unsynced, [])}
303
303
304
304
305 def _nowarnheads(pushop):
305 def _nowarnheads(pushop):
306 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
306 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
307 repo = pushop.repo.unfiltered()
307 repo = pushop.repo.unfiltered()
308 remote = pushop.remote
308 remote = pushop.remote
309 localbookmarks = repo._bookmarks
309 localbookmarks = repo._bookmarks
310
310
311 with remote.commandexecutor() as e:
311 with remote.commandexecutor() as e:
312 remotebookmarks = e.callcommand(
312 remotebookmarks = e.callcommand(
313 b'listkeys', {b'namespace': b'bookmarks',}
313 b'listkeys', {b'namespace': b'bookmarks',}
314 ).result()
314 ).result()
315
315
316 bookmarkedheads = set()
316 bookmarkedheads = set()
317
317
318 # internal config: bookmarks.pushing
318 # internal config: bookmarks.pushing
319 newbookmarks = [
319 newbookmarks = [
320 localbookmarks.expandname(b)
320 localbookmarks.expandname(b)
321 for b in pushop.ui.configlist(b'bookmarks', b'pushing')
321 for b in pushop.ui.configlist(b'bookmarks', b'pushing')
322 ]
322 ]
323
323
324 for bm in localbookmarks:
324 for bm in localbookmarks:
325 rnode = remotebookmarks.get(bm)
325 rnode = remotebookmarks.get(bm)
326 if rnode and rnode in repo:
326 if rnode and rnode in repo:
327 lctx, rctx = repo[localbookmarks[bm]], repo[rnode]
327 lctx, rctx = repo[localbookmarks[bm]], repo[rnode]
328 if bookmarks.validdest(repo, rctx, lctx):
328 if bookmarks.validdest(repo, rctx, lctx):
329 bookmarkedheads.add(lctx.node())
329 bookmarkedheads.add(lctx.node())
330 else:
330 else:
331 if bm in newbookmarks and bm not in remotebookmarks:
331 if bm in newbookmarks and bm not in remotebookmarks:
332 bookmarkedheads.add(localbookmarks[bm])
332 bookmarkedheads.add(localbookmarks[bm])
333
333
334 return bookmarkedheads
334 return bookmarkedheads
335
335
336
336
337 def checkheads(pushop):
337 def checkheads(pushop):
338 """Check that a push won't add any outgoing head
338 """Check that a push won't add any outgoing head
339
339
340 raise Abort error and display ui message as needed.
340 raise Abort error and display ui message as needed.
341 """
341 """
342
342
343 repo = pushop.repo.unfiltered()
343 repo = pushop.repo.unfiltered()
344 remote = pushop.remote
344 remote = pushop.remote
345 outgoing = pushop.outgoing
345 outgoing = pushop.outgoing
346 remoteheads = pushop.remoteheads
346 remoteheads = pushop.remoteheads
347 newbranch = pushop.newbranch
347 newbranch = pushop.newbranch
348 inc = bool(pushop.incoming)
348 inc = bool(pushop.incoming)
349
349
350 # Check for each named branch if we're creating new remote heads.
350 # Check for each named branch if we're creating new remote heads.
351 # To be a remote head after push, node must be either:
351 # To be a remote head after push, node must be either:
352 # - unknown locally
352 # - unknown locally
353 # - a local outgoing head descended from update
353 # - a local outgoing head descended from update
354 # - a remote head that's known locally and not
354 # - a remote head that's known locally and not
355 # ancestral to an outgoing head
355 # ancestral to an outgoing head
356 if remoteheads == [nullid]:
356 if remoteheads == [nullid]:
357 # remote is empty, nothing to check.
357 # remote is empty, nothing to check.
358 return
358 return
359
359
360 if remote.capable(b'branchmap'):
360 if remote.capable(b'branchmap'):
361 headssum = _headssummary(pushop)
361 headssum = _headssummary(pushop)
362 else:
362 else:
363 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
363 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
364 pushop.pushbranchmap = headssum
364 pushop.pushbranchmap = headssum
365 newbranches = [
365 newbranches = [
366 branch
366 branch
367 for branch, heads in pycompat.iteritems(headssum)
367 for branch, heads in pycompat.iteritems(headssum)
368 if heads[0] is None
368 if heads[0] is None
369 ]
369 ]
370 # 1. Check for new branches on the remote.
370 # 1. Check for new branches on the remote.
371 if newbranches and not newbranch: # new branch requires --new-branch
371 if newbranches and not newbranch: # new branch requires --new-branch
372 branchnames = b', '.join(sorted(newbranches))
372 branchnames = b', '.join(sorted(newbranches))
373 # Calculate how many of the new branches are closed branches
373 # Calculate how many of the new branches are closed branches
374 closedbranches = set()
374 closedbranches = set()
375 for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
375 for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
376 if isclosed:
376 if isclosed:
377 closedbranches.add(tag)
377 closedbranches.add(tag)
378 closedbranches = closedbranches & set(newbranches)
378 closedbranches = closedbranches & set(newbranches)
379 if closedbranches:
379 if closedbranches:
380 errmsg = _(b"push creates new remote branches: %s (%d closed)!") % (
380 errmsg = _(b"push creates new remote branches: %s (%d closed)!") % (
381 branchnames,
381 branchnames,
382 len(closedbranches),
382 len(closedbranches),
383 )
383 )
384 else:
384 else:
385 errmsg = _(b"push creates new remote branches: %s!") % branchnames
385 errmsg = _(b"push creates new remote branches: %s!") % branchnames
386 hint = _(b"use 'hg push --new-branch' to create new remote branches")
386 hint = _(b"use 'hg push --new-branch' to create new remote branches")
387 raise error.Abort(errmsg, hint=hint)
387 raise error.Abort(errmsg, hint=hint)
388
388
389 # 2. Find heads that we need not warn about
389 # 2. Find heads that we need not warn about
390 nowarnheads = _nowarnheads(pushop)
390 nowarnheads = _nowarnheads(pushop)
391
391
392 # 3. Check for new heads.
392 # 3. Check for new heads.
393 # If there are more heads after the push than before, a suitable
393 # If there are more heads after the push than before, a suitable
394 # error message, depending on unsynced status, is displayed.
394 # error message, depending on unsynced status, is displayed.
395 errormsg = None
395 errormsg = None
396 for branch, heads in sorted(pycompat.iteritems(headssum)):
396 for branch, heads in sorted(pycompat.iteritems(headssum)):
397 remoteheads, newheads, unsyncedheads, discardedheads = heads
397 remoteheads, newheads, unsyncedheads, discardedheads = heads
398 # add unsynced data
398 # add unsynced data
399 if remoteheads is None:
399 if remoteheads is None:
400 oldhs = set()
400 oldhs = set()
401 else:
401 else:
402 oldhs = set(remoteheads)
402 oldhs = set(remoteheads)
403 oldhs.update(unsyncedheads)
403 oldhs.update(unsyncedheads)
404 dhs = None # delta heads, the new heads on branch
404 dhs = None # delta heads, the new heads on branch
405 newhs = set(newheads)
405 newhs = set(newheads)
406 newhs.update(unsyncedheads)
406 newhs.update(unsyncedheads)
407 if unsyncedheads:
407 if unsyncedheads:
408 if None in unsyncedheads:
408 if None in unsyncedheads:
409 # old remote, no heads data
409 # old remote, no heads data
410 heads = None
410 heads = None
411 else:
411 else:
412 heads = scmutil.nodesummaries(repo, unsyncedheads)
412 heads = scmutil.nodesummaries(repo, unsyncedheads)
413 if heads is None:
413 if heads is None:
414 repo.ui.status(
414 repo.ui.status(
415 _(b"remote has heads that are not known locally\n")
415 _(b"remote has heads that are not known locally\n")
416 )
416 )
417 elif branch is None:
417 elif branch is None:
418 repo.ui.status(
418 repo.ui.status(
419 _(b"remote has heads that are not known locally: %s\n")
419 _(b"remote has heads that are not known locally: %s\n")
420 % heads
420 % heads
421 )
421 )
422 else:
422 else:
423 repo.ui.status(
423 repo.ui.status(
424 _(
424 _(
425 b"remote has heads on branch '%s' that are "
425 b"remote has heads on branch '%s' that are "
426 b"not known locally: %s\n"
426 b"not known locally: %s\n"
427 )
427 )
428 % (branch, heads)
428 % (branch, heads)
429 )
429 )
430 if remoteheads is None:
430 if remoteheads is None:
431 if len(newhs) > 1:
431 if len(newhs) > 1:
432 dhs = list(newhs)
432 dhs = list(newhs)
433 if errormsg is None:
433 if errormsg is None:
434 errormsg = (
434 errormsg = (
435 _(b"push creates new branch '%s' with multiple heads")
435 _(b"push creates new branch '%s' with multiple heads")
436 % branch
436 % branch
437 )
437 )
438 hint = _(
438 hint = _(
439 b"merge or"
439 b"merge or"
440 b" see 'hg help push' for details about"
440 b" see 'hg help push' for details about"
441 b" pushing new heads"
441 b" pushing new heads"
442 )
442 )
443 elif len(newhs) > len(oldhs):
443 elif len(newhs) > len(oldhs):
444 # remove bookmarked or existing remote heads from the new heads list
444 # remove bookmarked or existing remote heads from the new heads list
445 dhs = sorted(newhs - nowarnheads - oldhs)
445 dhs = sorted(newhs - nowarnheads - oldhs)
446 if dhs:
446 if dhs:
447 if errormsg is None:
447 if errormsg is None:
448 if branch not in (b'default', None):
448 if branch not in (b'default', None):
449 errormsg = _(
449 errormsg = _(
450 b"push creates new remote head %s on branch '%s'!"
450 b"push creates new remote head %s on branch '%s'!"
451 ) % (short(dhs[0]), branch)
451 ) % (short(dhs[0]), branch)
452 elif repo[dhs[0]].bookmarks():
452 elif repo[dhs[0]].bookmarks():
453 errormsg = _(
453 errormsg = _(
454 b"push creates new remote head %s "
454 b"push creates new remote head %s "
455 b"with bookmark '%s'!"
455 b"with bookmark '%s'!"
456 ) % (short(dhs[0]), repo[dhs[0]].bookmarks()[0])
456 ) % (short(dhs[0]), repo[dhs[0]].bookmarks()[0])
457 else:
457 else:
458 errormsg = _(b"push creates new remote head %s!") % short(
458 errormsg = _(b"push creates new remote head %s!") % short(
459 dhs[0]
459 dhs[0]
460 )
460 )
461 if unsyncedheads:
461 if unsyncedheads:
462 hint = _(
462 hint = _(
463 b"pull and merge or"
463 b"pull and merge or"
464 b" see 'hg help push' for details about"
464 b" see 'hg help push' for details about"
465 b" pushing new heads"
465 b" pushing new heads"
466 )
466 )
467 else:
467 else:
468 hint = _(
468 hint = _(
469 b"merge or"
469 b"merge or"
470 b" see 'hg help push' for details about"
470 b" see 'hg help push' for details about"
471 b" pushing new heads"
471 b" pushing new heads"
472 )
472 )
473 if branch is None:
473 if branch is None:
474 repo.ui.note(_(b"new remote heads:\n"))
474 repo.ui.note(_(b"new remote heads:\n"))
475 else:
475 else:
476 repo.ui.note(_(b"new remote heads on branch '%s':\n") % branch)
476 repo.ui.note(_(b"new remote heads on branch '%s':\n") % branch)
477 for h in dhs:
477 for h in dhs:
478 repo.ui.note(b" %s\n" % short(h))
478 repo.ui.note(b" %s\n" % short(h))
479 if errormsg:
479 if errormsg:
480 raise error.Abort(errormsg, hint=hint)
480 raise error.Abort(errormsg, hint=hint)
481
481
482
482
483 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
483 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
484 """post process the list of new heads with obsolescence information
484 """post process the list of new heads with obsolescence information
485
485
486 Exists as a sub-function to contain the complexity and allow extensions to
486 Exists as a sub-function to contain the complexity and allow extensions to
487 experiment with smarter logic.
487 experiment with smarter logic.
488
488
489 Returns (newheads, discarded_heads) tuple
489 Returns (newheads, discarded_heads) tuple
490 """
490 """
491 # known issue
491 # known issue
492 #
492 #
493 # * We "silently" skip processing on all changeset unknown locally
493 # * We "silently" skip processing on all changeset unknown locally
494 #
494 #
495 # * if <nh> is public on the remote, it won't be affected by obsolete
495 # * if <nh> is public on the remote, it won't be affected by obsolete
496 # marker and a new is created
496 # marker and a new is created
497
497
498 # define various utilities and containers
498 # define various utilities and containers
499 repo = pushop.repo
499 repo = pushop.repo
500 unfi = repo.unfiltered()
500 unfi = repo.unfiltered()
501 tonode = unfi.changelog.node
501 tonode = unfi.changelog.node
502 torev = unfi.changelog.nodemap.get
502 torev = unfi.changelog.index.get_rev
503 public = phases.public
503 public = phases.public
504 getphase = unfi._phasecache.phase
504 getphase = unfi._phasecache.phase
505 ispublic = lambda r: getphase(unfi, r) == public
505 ispublic = lambda r: getphase(unfi, r) == public
506 ispushed = lambda n: torev(n) in futurecommon
506 ispushed = lambda n: torev(n) in futurecommon
507 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
507 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
508 successorsmarkers = unfi.obsstore.successors
508 successorsmarkers = unfi.obsstore.successors
509 newhs = set() # final set of new heads
509 newhs = set() # final set of new heads
510 discarded = set() # new head of fully replaced branch
510 discarded = set() # new head of fully replaced branch
511
511
512 localcandidate = set() # candidate heads known locally
512 localcandidate = set() # candidate heads known locally
513 unknownheads = set() # candidate heads unknown locally
513 unknownheads = set() # candidate heads unknown locally
514 for h in candidate_newhs:
514 for h in candidate_newhs:
515 if h in unfi:
515 if h in unfi:
516 localcandidate.add(h)
516 localcandidate.add(h)
517 else:
517 else:
518 if successorsmarkers.get(h) is not None:
518 if successorsmarkers.get(h) is not None:
519 msg = (
519 msg = (
520 b'checkheads: remote head unknown locally has'
520 b'checkheads: remote head unknown locally has'
521 b' local marker: %s\n'
521 b' local marker: %s\n'
522 )
522 )
523 repo.ui.debug(msg % hex(h))
523 repo.ui.debug(msg % hex(h))
524 unknownheads.add(h)
524 unknownheads.add(h)
525
525
526 # fast path the simple case
526 # fast path the simple case
527 if len(localcandidate) == 1:
527 if len(localcandidate) == 1:
528 return unknownheads | set(candidate_newhs), set()
528 return unknownheads | set(candidate_newhs), set()
529
529
530 # actually process branch replacement
530 # actually process branch replacement
531 while localcandidate:
531 while localcandidate:
532 nh = localcandidate.pop()
532 nh = localcandidate.pop()
533 # run this check early to skip the evaluation of the whole branch
533 # run this check early to skip the evaluation of the whole branch
534 if torev(nh) in futurecommon or ispublic(torev(nh)):
534 if torev(nh) in futurecommon or ispublic(torev(nh)):
535 newhs.add(nh)
535 newhs.add(nh)
536 continue
536 continue
537
537
538 # Get all revs/nodes on the branch exclusive to this head
538 # Get all revs/nodes on the branch exclusive to this head
539 # (already filtered heads are "ignored"))
539 # (already filtered heads are "ignored"))
540 branchrevs = unfi.revs(
540 branchrevs = unfi.revs(
541 b'only(%n, (%ln+%ln))', nh, localcandidate, newhs
541 b'only(%n, (%ln+%ln))', nh, localcandidate, newhs
542 )
542 )
543 branchnodes = [tonode(r) for r in branchrevs]
543 branchnodes = [tonode(r) for r in branchrevs]
544
544
545 # The branch won't be hidden on the remote if
545 # The branch won't be hidden on the remote if
546 # * any part of it is public,
546 # * any part of it is public,
547 # * any part of it is considered part of the result by previous logic,
547 # * any part of it is considered part of the result by previous logic,
548 # * if we have no markers to push to obsolete it.
548 # * if we have no markers to push to obsolete it.
549 if (
549 if (
550 any(ispublic(r) for r in branchrevs)
550 any(ispublic(r) for r in branchrevs)
551 or any(torev(n) in futurecommon for n in branchnodes)
551 or any(torev(n) in futurecommon for n in branchnodes)
552 or any(not hasoutmarker(n) for n in branchnodes)
552 or any(not hasoutmarker(n) for n in branchnodes)
553 ):
553 ):
554 newhs.add(nh)
554 newhs.add(nh)
555 else:
555 else:
556 # note: there is a corner case if there is a merge in the branch.
556 # note: there is a corner case if there is a merge in the branch.
557 # we might end up with -more- heads. However, these heads are not
557 # we might end up with -more- heads. However, these heads are not
558 # "added" by the push, but more by the "removal" on the remote so I
558 # "added" by the push, but more by the "removal" on the remote so I
559 # think is a okay to ignore them,
559 # think is a okay to ignore them,
560 discarded.add(nh)
560 discarded.add(nh)
561 newhs |= unknownheads
561 newhs |= unknownheads
562 return newhs, discarded
562 return newhs, discarded
563
563
564
564
565 def pushingmarkerfor(obsstore, ispushed, node):
565 def pushingmarkerfor(obsstore, ispushed, node):
566 """true if some markers are to be pushed for node
566 """true if some markers are to be pushed for node
567
567
568 We cannot just look in to the pushed obsmarkers from the pushop because
568 We cannot just look in to the pushed obsmarkers from the pushop because
569 discovery might have filtered relevant markers. In addition listing all
569 discovery might have filtered relevant markers. In addition listing all
570 markers relevant to all changesets in the pushed set would be too expensive
570 markers relevant to all changesets in the pushed set would be too expensive
571 (O(len(repo)))
571 (O(len(repo)))
572
572
573 (note: There are cache opportunity in this function. but it would requires
573 (note: There are cache opportunity in this function. but it would requires
574 a two dimensional stack.)
574 a two dimensional stack.)
575 """
575 """
576 successorsmarkers = obsstore.successors
576 successorsmarkers = obsstore.successors
577 stack = [node]
577 stack = [node]
578 seen = set(stack)
578 seen = set(stack)
579 while stack:
579 while stack:
580 current = stack.pop()
580 current = stack.pop()
581 if ispushed(current):
581 if ispushed(current):
582 return True
582 return True
583 markers = successorsmarkers.get(current, ())
583 markers = successorsmarkers.get(current, ())
584 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
584 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
585 for m in markers:
585 for m in markers:
586 nexts = m[1] # successors
586 nexts = m[1] # successors
587 if not nexts: # this is a prune marker
587 if not nexts: # this is a prune marker
588 nexts = m[5] or () # parents
588 nexts = m[5] or () # parents
589 for n in nexts:
589 for n in nexts:
590 if n not in seen:
590 if n not in seen:
591 seen.add(n)
591 seen.add(n)
592 stack.append(n)
592 stack.append(n)
593 return False
593 return False
General Comments 0
You need to be logged in to leave comments. Login now