##// END OF EJS Templates
outgoing: add a simple fastpath when there is no common...
marmoute -
r52488:3a6fae3b default
parent child Browse files
Show More
@@ -1,646 +1,658 b''
1 # discovery.py - protocol changeset discovery functions
1 # discovery.py - protocol changeset discovery functions
2 #
2 #
3 # Copyright 2010 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2010 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import functools
9 import functools
10
10
11 from .i18n import _
11 from .i18n import _
12 from .node import (
12 from .node import (
13 hex,
13 hex,
14 short,
14 short,
15 )
15 )
16
16
17 from . import (
17 from . import (
18 bookmarks,
18 bookmarks,
19 branchmap,
19 branchmap,
20 error,
20 error,
21 node as nodemod,
21 node as nodemod,
22 obsolete,
22 obsolete,
23 phases,
23 phases,
24 pycompat,
24 pycompat,
25 scmutil,
25 scmutil,
26 setdiscovery,
26 setdiscovery,
27 treediscovery,
27 treediscovery,
28 util,
28 util,
29 )
29 )
30
30
31
31
32 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
32 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
33 """Return a tuple (common, anyincoming, heads) used to identify the common
33 """Return a tuple (common, anyincoming, heads) used to identify the common
34 subset of nodes between repo and remote.
34 subset of nodes between repo and remote.
35
35
36 "common" is a list of (at least) the heads of the common subset.
36 "common" is a list of (at least) the heads of the common subset.
37 "anyincoming" is testable as a boolean indicating if any nodes are missing
37 "anyincoming" is testable as a boolean indicating if any nodes are missing
38 locally. If remote does not support getbundle, this actually is a list of
38 locally. If remote does not support getbundle, this actually is a list of
39 roots of the nodes that would be incoming, to be supplied to
39 roots of the nodes that would be incoming, to be supplied to
40 changegroupsubset. No code except for pull should be relying on this fact
40 changegroupsubset. No code except for pull should be relying on this fact
41 any longer.
41 any longer.
42 "heads" is either the supplied heads, or else the remote's heads.
42 "heads" is either the supplied heads, or else the remote's heads.
43 "ancestorsof" if not None, restrict the discovery to a subset defined by
43 "ancestorsof" if not None, restrict the discovery to a subset defined by
44 these nodes. Changeset outside of this set won't be considered (but may
44 these nodes. Changeset outside of this set won't be considered (but may
45 still appear in "common").
45 still appear in "common").
46
46
47 If you pass heads and they are all known locally, the response lists just
47 If you pass heads and they are all known locally, the response lists just
48 these heads in "common" and in "heads".
48 these heads in "common" and in "heads".
49
49
50 Please use findcommonoutgoing to compute the set of outgoing nodes to give
50 Please use findcommonoutgoing to compute the set of outgoing nodes to give
51 extensions a good hook into outgoing.
51 extensions a good hook into outgoing.
52 """
52 """
53
53
54 if not remote.capable(b'getbundle'):
54 if not remote.capable(b'getbundle'):
55 return treediscovery.findcommonincoming(repo, remote, heads, force)
55 return treediscovery.findcommonincoming(repo, remote, heads, force)
56
56
57 if heads:
57 if heads:
58 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
58 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
59 if all(knownnode(h) for h in heads):
59 if all(knownnode(h) for h in heads):
60 return (heads, False, heads)
60 return (heads, False, heads)
61
61
62 res = setdiscovery.findcommonheads(
62 res = setdiscovery.findcommonheads(
63 repo.ui,
63 repo.ui,
64 repo,
64 repo,
65 remote,
65 remote,
66 abortwhenunrelated=not force,
66 abortwhenunrelated=not force,
67 ancestorsof=ancestorsof,
67 ancestorsof=ancestorsof,
68 )
68 )
69 common, anyinc, srvheads = res
69 common, anyinc, srvheads = res
70 if heads and not anyinc:
70 if heads and not anyinc:
71 # server could be lying on the advertised heads
71 # server could be lying on the advertised heads
72 has_node = repo.changelog.hasnode
72 has_node = repo.changelog.hasnode
73 anyinc = any(not has_node(n) for n in heads)
73 anyinc = any(not has_node(n) for n in heads)
74 return (list(common), anyinc, heads or list(srvheads))
74 return (list(common), anyinc, heads or list(srvheads))
75
75
76
76
77 class outgoing:
77 class outgoing:
78 """Represents the result of a findcommonoutgoing() call.
78 """Represents the result of a findcommonoutgoing() call.
79
79
80 Members:
80 Members:
81
81
82 ancestorsof is a list of the nodes whose ancestors are included in the
82 ancestorsof is a list of the nodes whose ancestors are included in the
83 outgoing operation.
83 outgoing operation.
84
84
85 missing is a list of those ancestors of ancestorsof that are present in
85 missing is a list of those ancestors of ancestorsof that are present in
86 local but not in remote.
86 local but not in remote.
87
87
88 common is a set containing revs common between the local and the remote
88 common is a set containing revs common between the local and the remote
89 repository (at least all of those that are ancestors of ancestorsof).
89 repository (at least all of those that are ancestors of ancestorsof).
90
90
91 commonheads is the list of heads of common.
91 commonheads is the list of heads of common.
92
92
93 excluded is the list of missing changeset that shouldn't be sent
93 excluded is the list of missing changeset that shouldn't be sent
94 remotely.
94 remotely.
95
95
96 Some members are computed on demand from the heads, unless provided upfront
96 Some members are computed on demand from the heads, unless provided upfront
97 by discovery."""
97 by discovery."""
98
98
99 def __init__(
99 def __init__(
100 self, repo, commonheads=None, ancestorsof=None, missingroots=None
100 self, repo, commonheads=None, ancestorsof=None, missingroots=None
101 ):
101 ):
102 # at most one of them must not be set
102 # at most one of them must not be set
103 if commonheads is not None and missingroots is not None:
103 if commonheads is not None and missingroots is not None:
104 m = 'commonheads and missingroots arguments are mutually exclusive'
104 m = 'commonheads and missingroots arguments are mutually exclusive'
105 raise error.ProgrammingError(m)
105 raise error.ProgrammingError(m)
106 cl = repo.changelog
106 cl = repo.changelog
107 unfi = repo.unfiltered()
108 ucl = unfi.changelog
109 to_node = ucl.node
107 missing = None
110 missing = None
108 common = None
111 common = None
112 arg_anc = ancestorsof
109 if ancestorsof is None:
113 if ancestorsof is None:
110 ancestorsof = cl.heads()
114 ancestorsof = cl.heads()
111 if missingroots:
115
116 # XXX-perf: do we need all this to be node-list? They would be simpler
117 # as rev-num sets (and smartset)
118 if missingroots == [nodemod.nullrev] or missingroots == []:
119 commonheads = [repo.nullid]
120 common = set()
121 if arg_anc is None:
122 missing = [to_node(r) for r in cl]
123 else:
124 missing_rev = repo.revs('::%ln', missingroots, ancestorsof)
125 missing = [to_node(r) for r in missing_rev]
126 elif missingroots is not None:
112 # TODO remove call to nodesbetween.
127 # TODO remove call to nodesbetween.
113 missing_rev = repo.revs('%ln::%ln', missingroots, ancestorsof)
128 missing_rev = repo.revs('%ln::%ln', missingroots, ancestorsof)
114 unfi = repo.unfiltered()
115 ucl = unfi.changelog
116 to_node = ucl.node
117 ancestorsof = [to_node(r) for r in ucl.headrevs(missing_rev)]
129 ancestorsof = [to_node(r) for r in ucl.headrevs(missing_rev)]
118 parent_revs = ucl.parentrevs
130 parent_revs = ucl.parentrevs
119 common_legs = set()
131 common_legs = set()
120 for r in missing_rev:
132 for r in missing_rev:
121 p1, p2 = parent_revs(r)
133 p1, p2 = parent_revs(r)
122 if p1 not in missing_rev:
134 if p1 not in missing_rev:
123 common_legs.add(p1)
135 common_legs.add(p1)
124 if p2 not in missing_rev:
136 if p2 not in missing_rev:
125 common_legs.add(p2)
137 common_legs.add(p2)
126 common_legs.discard(nodemod.nullrev)
138 common_legs.discard(nodemod.nullrev)
127 if not common_legs:
139 if not common_legs:
128 commonheads = [repo.nullid]
140 commonheads = [repo.nullid]
129 common = set()
141 common = set()
130 else:
142 else:
131 commonheads_revs = unfi.revs(
143 commonheads_revs = unfi.revs(
132 'heads(%ld::%ld)',
144 'heads(%ld::%ld)',
133 common_legs,
145 common_legs,
134 common_legs,
146 common_legs,
135 )
147 )
136 commonheads = [to_node(r) for r in commonheads_revs]
148 commonheads = [to_node(r) for r in commonheads_revs]
137 common = ucl.ancestors(commonheads_revs, inclusive=True)
149 common = ucl.ancestors(commonheads_revs, inclusive=True)
138 missing = [to_node(r) for r in missing_rev]
150 missing = [to_node(r) for r in missing_rev]
139 elif not commonheads:
151 elif not commonheads:
140 commonheads = [repo.nullid]
152 commonheads = [repo.nullid]
141 self.commonheads = commonheads
153 self.commonheads = commonheads
142 self.ancestorsof = ancestorsof
154 self.ancestorsof = ancestorsof
143 self._revlog = cl
155 self._revlog = cl
144 self._common = common
156 self._common = common
145 self._missing = missing
157 self._missing = missing
146 self.excluded = []
158 self.excluded = []
147
159
148 def _computecommonmissing(self):
160 def _computecommonmissing(self):
149 sets = self._revlog.findcommonmissing(
161 sets = self._revlog.findcommonmissing(
150 self.commonheads, self.ancestorsof
162 self.commonheads, self.ancestorsof
151 )
163 )
152 self._common, self._missing = sets
164 self._common, self._missing = sets
153
165
154 @util.propertycache
166 @util.propertycache
155 def common(self):
167 def common(self):
156 if self._common is None:
168 if self._common is None:
157 self._computecommonmissing()
169 self._computecommonmissing()
158 return self._common
170 return self._common
159
171
160 @util.propertycache
172 @util.propertycache
161 def missing(self):
173 def missing(self):
162 if self._missing is None:
174 if self._missing is None:
163 self._computecommonmissing()
175 self._computecommonmissing()
164 return self._missing
176 return self._missing
165
177
166
178
167 def findcommonoutgoing(
179 def findcommonoutgoing(
168 repo, other, onlyheads=None, force=False, commoninc=None, portable=False
180 repo, other, onlyheads=None, force=False, commoninc=None, portable=False
169 ):
181 ):
170 """Return an outgoing instance to identify the nodes present in repo but
182 """Return an outgoing instance to identify the nodes present in repo but
171 not in other.
183 not in other.
172
184
173 If onlyheads is given, only nodes ancestral to nodes in onlyheads
185 If onlyheads is given, only nodes ancestral to nodes in onlyheads
174 (inclusive) are included. If you already know the local repo's heads,
186 (inclusive) are included. If you already know the local repo's heads,
175 passing them in onlyheads is faster than letting them be recomputed here.
187 passing them in onlyheads is faster than letting them be recomputed here.
176
188
177 If commoninc is given, it must be the result of a prior call to
189 If commoninc is given, it must be the result of a prior call to
178 findcommonincoming(repo, other, force) to avoid recomputing it here.
190 findcommonincoming(repo, other, force) to avoid recomputing it here.
179
191
180 If portable is given, compute more conservative common and ancestorsof,
192 If portable is given, compute more conservative common and ancestorsof,
181 to make bundles created from the instance more portable."""
193 to make bundles created from the instance more portable."""
182 # declare an empty outgoing object to be filled later
194 # declare an empty outgoing object to be filled later
183 og = outgoing(repo, None, None)
195 og = outgoing(repo, None, None)
184
196
185 # get common set if not provided
197 # get common set if not provided
186 if commoninc is None:
198 if commoninc is None:
187 commoninc = findcommonincoming(
199 commoninc = findcommonincoming(
188 repo, other, force=force, ancestorsof=onlyheads
200 repo, other, force=force, ancestorsof=onlyheads
189 )
201 )
190 og.commonheads, _any, _hds = commoninc
202 og.commonheads, _any, _hds = commoninc
191
203
192 # compute outgoing
204 # compute outgoing
193 mayexclude = phases.hassecret(repo) or repo.obsstore
205 mayexclude = phases.hassecret(repo) or repo.obsstore
194 if not mayexclude:
206 if not mayexclude:
195 og.ancestorsof = onlyheads or repo.heads()
207 og.ancestorsof = onlyheads or repo.heads()
196 elif onlyheads is None:
208 elif onlyheads is None:
197 # use visible heads as it should be cached
209 # use visible heads as it should be cached
198 og.ancestorsof = repo.filtered(b"served").heads()
210 og.ancestorsof = repo.filtered(b"served").heads()
199 og.excluded = [ctx.node() for ctx in repo.set(b'secret() or extinct()')]
211 og.excluded = [ctx.node() for ctx in repo.set(b'secret() or extinct()')]
200 else:
212 else:
201 # compute common, missing and exclude secret stuff
213 # compute common, missing and exclude secret stuff
202 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
214 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
203 og._common, allmissing = sets
215 og._common, allmissing = sets
204 og._missing = missing = []
216 og._missing = missing = []
205 og.excluded = excluded = []
217 og.excluded = excluded = []
206 for node in allmissing:
218 for node in allmissing:
207 ctx = repo[node]
219 ctx = repo[node]
208 if ctx.phase() >= phases.secret or ctx.extinct():
220 if ctx.phase() >= phases.secret or ctx.extinct():
209 excluded.append(node)
221 excluded.append(node)
210 else:
222 else:
211 missing.append(node)
223 missing.append(node)
212 if len(missing) == len(allmissing):
224 if len(missing) == len(allmissing):
213 ancestorsof = onlyheads
225 ancestorsof = onlyheads
214 else: # update missing heads
226 else: # update missing heads
215 to_rev = repo.changelog.index.rev
227 to_rev = repo.changelog.index.rev
216 to_node = repo.changelog.node
228 to_node = repo.changelog.node
217 excluded_revs = [to_rev(r) for r in excluded]
229 excluded_revs = [to_rev(r) for r in excluded]
218 onlyheads_revs = [to_rev(r) for r in onlyheads]
230 onlyheads_revs = [to_rev(r) for r in onlyheads]
219 new_heads = phases.new_heads(repo, onlyheads_revs, excluded_revs)
231 new_heads = phases.new_heads(repo, onlyheads_revs, excluded_revs)
220 ancestorsof = [to_node(r) for r in new_heads]
232 ancestorsof = [to_node(r) for r in new_heads]
221 og.ancestorsof = ancestorsof
233 og.ancestorsof = ancestorsof
222 if portable:
234 if portable:
223 # recompute common and ancestorsof as if -r<rev> had been given for
235 # recompute common and ancestorsof as if -r<rev> had been given for
224 # each head of missing, and --base <rev> for each head of the proper
236 # each head of missing, and --base <rev> for each head of the proper
225 # ancestors of missing
237 # ancestors of missing
226 og._computecommonmissing()
238 og._computecommonmissing()
227 cl = repo.changelog
239 cl = repo.changelog
228 missingrevs = {cl.rev(n) for n in og._missing}
240 missingrevs = {cl.rev(n) for n in og._missing}
229 og._common = set(cl.ancestors(missingrevs)) - missingrevs
241 og._common = set(cl.ancestors(missingrevs)) - missingrevs
230 commonheads = set(og.commonheads)
242 commonheads = set(og.commonheads)
231 og.ancestorsof = [h for h in og.ancestorsof if h not in commonheads]
243 og.ancestorsof = [h for h in og.ancestorsof if h not in commonheads]
232
244
233 return og
245 return og
234
246
235
247
236 def _headssummary(pushop):
248 def _headssummary(pushop):
237 """compute a summary of branch and heads status before and after push
249 """compute a summary of branch and heads status before and after push
238
250
239 return {'branch': ([remoteheads], [newheads],
251 return {'branch': ([remoteheads], [newheads],
240 [unsyncedheads], [discardedheads])} mapping
252 [unsyncedheads], [discardedheads])} mapping
241
253
242 - branch: the branch name,
254 - branch: the branch name,
243 - remoteheads: the list of remote heads known locally
255 - remoteheads: the list of remote heads known locally
244 None if the branch is new,
256 None if the branch is new,
245 - newheads: the new remote heads (known locally) with outgoing pushed,
257 - newheads: the new remote heads (known locally) with outgoing pushed,
246 - unsyncedheads: the list of remote heads unknown locally,
258 - unsyncedheads: the list of remote heads unknown locally,
247 - discardedheads: the list of heads made obsolete by the push.
259 - discardedheads: the list of heads made obsolete by the push.
248 """
260 """
249 repo = pushop.repo.unfiltered()
261 repo = pushop.repo.unfiltered()
250 remote = pushop.remote
262 remote = pushop.remote
251 outgoing = pushop.outgoing
263 outgoing = pushop.outgoing
252 cl = repo.changelog
264 cl = repo.changelog
253 headssum = {}
265 headssum = {}
254 missingctx = set()
266 missingctx = set()
255 # A. Create set of branches involved in the push.
267 # A. Create set of branches involved in the push.
256 branches = set()
268 branches = set()
257 for n in outgoing.missing:
269 for n in outgoing.missing:
258 ctx = repo[n]
270 ctx = repo[n]
259 missingctx.add(ctx)
271 missingctx.add(ctx)
260 branches.add(ctx.branch())
272 branches.add(ctx.branch())
261
273
262 with remote.commandexecutor() as e:
274 with remote.commandexecutor() as e:
263 remotemap = e.callcommand(b'branchmap', {}).result()
275 remotemap = e.callcommand(b'branchmap', {}).result()
264
276
265 knownnode = cl.hasnode # do not use nodemap until it is filtered
277 knownnode = cl.hasnode # do not use nodemap until it is filtered
266 # A. register remote heads of branches which are in outgoing set
278 # A. register remote heads of branches which are in outgoing set
267 for branch, heads in remotemap.items():
279 for branch, heads in remotemap.items():
268 # don't add head info about branches which we don't have locally
280 # don't add head info about branches which we don't have locally
269 if branch not in branches:
281 if branch not in branches:
270 continue
282 continue
271 known = []
283 known = []
272 unsynced = []
284 unsynced = []
273 for h in heads:
285 for h in heads:
274 if knownnode(h):
286 if knownnode(h):
275 known.append(h)
287 known.append(h)
276 else:
288 else:
277 unsynced.append(h)
289 unsynced.append(h)
278 headssum[branch] = (known, list(known), unsynced)
290 headssum[branch] = (known, list(known), unsynced)
279
291
280 # B. add new branch data
292 # B. add new branch data
281 for branch in branches:
293 for branch in branches:
282 if branch not in headssum:
294 if branch not in headssum:
283 headssum[branch] = (None, [], [])
295 headssum[branch] = (None, [], [])
284
296
285 # C. Update newmap with outgoing changes.
297 # C. Update newmap with outgoing changes.
286 # This will possibly add new heads and remove existing ones.
298 # This will possibly add new heads and remove existing ones.
287 newmap = branchmap.remotebranchcache(
299 newmap = branchmap.remotebranchcache(
288 repo,
300 repo,
289 (
301 (
290 (branch, heads[1])
302 (branch, heads[1])
291 for branch, heads in headssum.items()
303 for branch, heads in headssum.items()
292 if heads[0] is not None
304 if heads[0] is not None
293 ),
305 ),
294 )
306 )
295 newmap.update(repo, (ctx.rev() for ctx in missingctx))
307 newmap.update(repo, (ctx.rev() for ctx in missingctx))
296 for branch, newheads in newmap.items():
308 for branch, newheads in newmap.items():
297 headssum[branch][1][:] = newheads
309 headssum[branch][1][:] = newheads
298 for branch, items in headssum.items():
310 for branch, items in headssum.items():
299 for l in items:
311 for l in items:
300 if l is not None:
312 if l is not None:
301 l.sort()
313 l.sort()
302 headssum[branch] = items + ([],)
314 headssum[branch] = items + ([],)
303
315
304 # If there are no obsstore, no post processing are needed.
316 # If there are no obsstore, no post processing are needed.
305 if repo.obsstore:
317 if repo.obsstore:
306 torev = repo.changelog.rev
318 torev = repo.changelog.rev
307 futureheads = {torev(h) for h in outgoing.ancestorsof}
319 futureheads = {torev(h) for h in outgoing.ancestorsof}
308 futureheads |= {torev(h) for h in outgoing.commonheads}
320 futureheads |= {torev(h) for h in outgoing.commonheads}
309 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
321 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
310 for branch, heads in sorted(pycompat.iteritems(headssum)):
322 for branch, heads in sorted(pycompat.iteritems(headssum)):
311 remoteheads, newheads, unsyncedheads, placeholder = heads
323 remoteheads, newheads, unsyncedheads, placeholder = heads
312 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
324 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
313 headssum[branch] = (
325 headssum[branch] = (
314 remoteheads,
326 remoteheads,
315 sorted(result[0]),
327 sorted(result[0]),
316 unsyncedheads,
328 unsyncedheads,
317 sorted(result[1]),
329 sorted(result[1]),
318 )
330 )
319 return headssum
331 return headssum
320
332
321
333
322 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
334 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
323 """Compute branchmapsummary for repo without branchmap support"""
335 """Compute branchmapsummary for repo without branchmap support"""
324
336
325 # 1-4b. old servers: Check for new topological heads.
337 # 1-4b. old servers: Check for new topological heads.
326 # Construct {old,new}map with branch = None (topological branch).
338 # Construct {old,new}map with branch = None (topological branch).
327 # (code based on update)
339 # (code based on update)
328 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
340 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
329 oldheads = sorted(h for h in remoteheads if knownnode(h))
341 oldheads = sorted(h for h in remoteheads if knownnode(h))
330 # all nodes in outgoing.missing are children of either:
342 # all nodes in outgoing.missing are children of either:
331 # - an element of oldheads
343 # - an element of oldheads
332 # - another element of outgoing.missing
344 # - another element of outgoing.missing
333 # - nullrev
345 # - nullrev
334 # This explains why the new head are very simple to compute.
346 # This explains why the new head are very simple to compute.
335 r = repo.set(b'heads(%ln + %ln)', oldheads, outgoing.missing)
347 r = repo.set(b'heads(%ln + %ln)', oldheads, outgoing.missing)
336 newheads = sorted(c.node() for c in r)
348 newheads = sorted(c.node() for c in r)
337 # set some unsynced head to issue the "unsynced changes" warning
349 # set some unsynced head to issue the "unsynced changes" warning
338 if inc:
350 if inc:
339 unsynced = [None]
351 unsynced = [None]
340 else:
352 else:
341 unsynced = []
353 unsynced = []
342 return {None: (oldheads, newheads, unsynced, [])}
354 return {None: (oldheads, newheads, unsynced, [])}
343
355
344
356
345 def _nowarnheads(pushop):
357 def _nowarnheads(pushop):
346 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
358 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
347 repo = pushop.repo.unfiltered()
359 repo = pushop.repo.unfiltered()
348 remote = pushop.remote
360 remote = pushop.remote
349 localbookmarks = repo._bookmarks
361 localbookmarks = repo._bookmarks
350
362
351 with remote.commandexecutor() as e:
363 with remote.commandexecutor() as e:
352 remotebookmarks = e.callcommand(
364 remotebookmarks = e.callcommand(
353 b'listkeys',
365 b'listkeys',
354 {
366 {
355 b'namespace': b'bookmarks',
367 b'namespace': b'bookmarks',
356 },
368 },
357 ).result()
369 ).result()
358
370
359 bookmarkedheads = set()
371 bookmarkedheads = set()
360
372
361 # internal config: bookmarks.pushing
373 # internal config: bookmarks.pushing
362 newbookmarks = [
374 newbookmarks = [
363 localbookmarks.expandname(b)
375 localbookmarks.expandname(b)
364 for b in pushop.ui.configlist(b'bookmarks', b'pushing')
376 for b in pushop.ui.configlist(b'bookmarks', b'pushing')
365 ]
377 ]
366
378
367 for bm in localbookmarks:
379 for bm in localbookmarks:
368 rnode = remotebookmarks.get(bm)
380 rnode = remotebookmarks.get(bm)
369 if rnode and rnode in repo:
381 if rnode and rnode in repo:
370 lctx, rctx = repo[localbookmarks[bm]], repo[rnode]
382 lctx, rctx = repo[localbookmarks[bm]], repo[rnode]
371 if bookmarks.validdest(repo, rctx, lctx):
383 if bookmarks.validdest(repo, rctx, lctx):
372 bookmarkedheads.add(lctx.node())
384 bookmarkedheads.add(lctx.node())
373 else:
385 else:
374 if bm in newbookmarks and bm not in remotebookmarks:
386 if bm in newbookmarks and bm not in remotebookmarks:
375 bookmarkedheads.add(localbookmarks[bm])
387 bookmarkedheads.add(localbookmarks[bm])
376
388
377 return bookmarkedheads
389 return bookmarkedheads
378
390
379
391
380 def checkheads(pushop):
392 def checkheads(pushop):
381 """Check that a push won't add any outgoing head
393 """Check that a push won't add any outgoing head
382
394
383 raise StateError error and display ui message as needed.
395 raise StateError error and display ui message as needed.
384 """
396 """
385
397
386 repo = pushop.repo.unfiltered()
398 repo = pushop.repo.unfiltered()
387 remote = pushop.remote
399 remote = pushop.remote
388 outgoing = pushop.outgoing
400 outgoing = pushop.outgoing
389 remoteheads = pushop.remoteheads
401 remoteheads = pushop.remoteheads
390 newbranch = pushop.newbranch
402 newbranch = pushop.newbranch
391 inc = bool(pushop.incoming)
403 inc = bool(pushop.incoming)
392
404
393 # Check for each named branch if we're creating new remote heads.
405 # Check for each named branch if we're creating new remote heads.
394 # To be a remote head after push, node must be either:
406 # To be a remote head after push, node must be either:
395 # - unknown locally
407 # - unknown locally
396 # - a local outgoing head descended from update
408 # - a local outgoing head descended from update
397 # - a remote head that's known locally and not
409 # - a remote head that's known locally and not
398 # ancestral to an outgoing head
410 # ancestral to an outgoing head
399 if remoteheads == [repo.nullid]:
411 if remoteheads == [repo.nullid]:
400 # remote is empty, nothing to check.
412 # remote is empty, nothing to check.
401 return
413 return
402
414
403 if remote.capable(b'branchmap'):
415 if remote.capable(b'branchmap'):
404 headssum = _headssummary(pushop)
416 headssum = _headssummary(pushop)
405 else:
417 else:
406 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
418 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
407 pushop.pushbranchmap = headssum
419 pushop.pushbranchmap = headssum
408 newbranches = [
420 newbranches = [
409 branch for branch, heads in headssum.items() if heads[0] is None
421 branch for branch, heads in headssum.items() if heads[0] is None
410 ]
422 ]
411 # 1. Check for new branches on the remote.
423 # 1. Check for new branches on the remote.
412 if newbranches and not newbranch: # new branch requires --new-branch
424 if newbranches and not newbranch: # new branch requires --new-branch
413 branchnames = b', '.join(sorted(newbranches))
425 branchnames = b', '.join(sorted(newbranches))
414 # Calculate how many of the new branches are closed branches
426 # Calculate how many of the new branches are closed branches
415 closedbranches = set()
427 closedbranches = set()
416 for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
428 for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
417 if isclosed:
429 if isclosed:
418 closedbranches.add(tag)
430 closedbranches.add(tag)
419 closedbranches = closedbranches & set(newbranches)
431 closedbranches = closedbranches & set(newbranches)
420 if closedbranches:
432 if closedbranches:
421 errmsg = _(b"push creates new remote branches: %s (%d closed)") % (
433 errmsg = _(b"push creates new remote branches: %s (%d closed)") % (
422 branchnames,
434 branchnames,
423 len(closedbranches),
435 len(closedbranches),
424 )
436 )
425 else:
437 else:
426 errmsg = _(b"push creates new remote branches: %s") % branchnames
438 errmsg = _(b"push creates new remote branches: %s") % branchnames
427 hint = _(b"use 'hg push --new-branch' to create new remote branches")
439 hint = _(b"use 'hg push --new-branch' to create new remote branches")
428 raise error.StateError(errmsg, hint=hint)
440 raise error.StateError(errmsg, hint=hint)
429
441
430 # 2. Find heads that we need not warn about
442 # 2. Find heads that we need not warn about
431 nowarnheads = _nowarnheads(pushop)
443 nowarnheads = _nowarnheads(pushop)
432
444
433 # 3. Check for new heads.
445 # 3. Check for new heads.
434 # If there are more heads after the push than before, a suitable
446 # If there are more heads after the push than before, a suitable
435 # error message, depending on unsynced status, is displayed.
447 # error message, depending on unsynced status, is displayed.
436 errormsg = None
448 errormsg = None
437 for branch, heads in sorted(pycompat.iteritems(headssum)):
449 for branch, heads in sorted(pycompat.iteritems(headssum)):
438 remoteheads, newheads, unsyncedheads, discardedheads = heads
450 remoteheads, newheads, unsyncedheads, discardedheads = heads
439 # add unsynced data
451 # add unsynced data
440 if remoteheads is None:
452 if remoteheads is None:
441 oldhs = set()
453 oldhs = set()
442 else:
454 else:
443 oldhs = set(remoteheads)
455 oldhs = set(remoteheads)
444 oldhs.update(unsyncedheads)
456 oldhs.update(unsyncedheads)
445 dhs = None # delta heads, the new heads on branch
457 dhs = None # delta heads, the new heads on branch
446 newhs = set(newheads)
458 newhs = set(newheads)
447 newhs.update(unsyncedheads)
459 newhs.update(unsyncedheads)
448 if unsyncedheads:
460 if unsyncedheads:
449 if None in unsyncedheads:
461 if None in unsyncedheads:
450 # old remote, no heads data
462 # old remote, no heads data
451 heads = None
463 heads = None
452 else:
464 else:
453 heads = scmutil.nodesummaries(repo, unsyncedheads)
465 heads = scmutil.nodesummaries(repo, unsyncedheads)
454 if heads is None:
466 if heads is None:
455 repo.ui.status(
467 repo.ui.status(
456 _(b"remote has heads that are not known locally\n")
468 _(b"remote has heads that are not known locally\n")
457 )
469 )
458 elif branch is None:
470 elif branch is None:
459 repo.ui.status(
471 repo.ui.status(
460 _(b"remote has heads that are not known locally: %s\n")
472 _(b"remote has heads that are not known locally: %s\n")
461 % heads
473 % heads
462 )
474 )
463 else:
475 else:
464 repo.ui.status(
476 repo.ui.status(
465 _(
477 _(
466 b"remote has heads on branch '%s' that are "
478 b"remote has heads on branch '%s' that are "
467 b"not known locally: %s\n"
479 b"not known locally: %s\n"
468 )
480 )
469 % (branch, heads)
481 % (branch, heads)
470 )
482 )
471 if remoteheads is None:
483 if remoteheads is None:
472 if len(newhs) > 1:
484 if len(newhs) > 1:
473 dhs = list(newhs)
485 dhs = list(newhs)
474 if errormsg is None:
486 if errormsg is None:
475 errormsg = (
487 errormsg = (
476 _(b"push creates new branch '%s' with multiple heads")
488 _(b"push creates new branch '%s' with multiple heads")
477 % branch
489 % branch
478 )
490 )
479 hint = _(
491 hint = _(
480 b"merge or"
492 b"merge or"
481 b" see 'hg help push' for details about"
493 b" see 'hg help push' for details about"
482 b" pushing new heads"
494 b" pushing new heads"
483 )
495 )
484 elif len(newhs) > len(oldhs):
496 elif len(newhs) > len(oldhs):
485 # remove bookmarked or existing remote heads from the new heads list
497 # remove bookmarked or existing remote heads from the new heads list
486 dhs = sorted(newhs - nowarnheads - oldhs)
498 dhs = sorted(newhs - nowarnheads - oldhs)
487 if dhs:
499 if dhs:
488 if errormsg is None:
500 if errormsg is None:
489 if branch not in (b'default', None):
501 if branch not in (b'default', None):
490 errormsg = _(
502 errormsg = _(
491 b"push creates new remote head %s on branch '%s'"
503 b"push creates new remote head %s on branch '%s'"
492 ) % (
504 ) % (
493 short(dhs[0]),
505 short(dhs[0]),
494 branch,
506 branch,
495 )
507 )
496 elif repo[dhs[0]].bookmarks():
508 elif repo[dhs[0]].bookmarks():
497 errormsg = _(
509 errormsg = _(
498 b"push creates new remote head %s "
510 b"push creates new remote head %s "
499 b"with bookmark '%s'"
511 b"with bookmark '%s'"
500 ) % (short(dhs[0]), repo[dhs[0]].bookmarks()[0])
512 ) % (short(dhs[0]), repo[dhs[0]].bookmarks()[0])
501 else:
513 else:
502 errormsg = _(b"push creates new remote head %s") % short(
514 errormsg = _(b"push creates new remote head %s") % short(
503 dhs[0]
515 dhs[0]
504 )
516 )
505 if unsyncedheads:
517 if unsyncedheads:
506 hint = _(
518 hint = _(
507 b"pull and merge or"
519 b"pull and merge or"
508 b" see 'hg help push' for details about"
520 b" see 'hg help push' for details about"
509 b" pushing new heads"
521 b" pushing new heads"
510 )
522 )
511 else:
523 else:
512 hint = _(
524 hint = _(
513 b"merge or"
525 b"merge or"
514 b" see 'hg help push' for details about"
526 b" see 'hg help push' for details about"
515 b" pushing new heads"
527 b" pushing new heads"
516 )
528 )
517 if branch is None:
529 if branch is None:
518 repo.ui.note(_(b"new remote heads:\n"))
530 repo.ui.note(_(b"new remote heads:\n"))
519 else:
531 else:
520 repo.ui.note(_(b"new remote heads on branch '%s':\n") % branch)
532 repo.ui.note(_(b"new remote heads on branch '%s':\n") % branch)
521 for h in dhs:
533 for h in dhs:
522 repo.ui.note(b" %s\n" % short(h))
534 repo.ui.note(b" %s\n" % short(h))
523 if errormsg:
535 if errormsg:
524 raise error.StateError(errormsg, hint=hint)
536 raise error.StateError(errormsg, hint=hint)
525
537
526
538
527 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
539 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
528 """post process the list of new heads with obsolescence information
540 """post process the list of new heads with obsolescence information
529
541
530 Exists as a sub-function to contain the complexity and allow extensions to
542 Exists as a sub-function to contain the complexity and allow extensions to
531 experiment with smarter logic.
543 experiment with smarter logic.
532
544
533 Returns (newheads, discarded_heads) tuple
545 Returns (newheads, discarded_heads) tuple
534 """
546 """
535 # known issue
547 # known issue
536 #
548 #
537 # * We "silently" skip processing on all changeset unknown locally
549 # * We "silently" skip processing on all changeset unknown locally
538 #
550 #
539 # * if <nh> is public on the remote, it won't be affected by obsolete
551 # * if <nh> is public on the remote, it won't be affected by obsolete
540 # marker and a new is created
552 # marker and a new is created
541
553
542 # define various utilities and containers
554 # define various utilities and containers
543 repo = pushop.repo
555 repo = pushop.repo
544 unfi = repo.unfiltered()
556 unfi = repo.unfiltered()
545 torev = unfi.changelog.index.get_rev
557 torev = unfi.changelog.index.get_rev
546 public = phases.public
558 public = phases.public
547 getphase = unfi._phasecache.phase
559 getphase = unfi._phasecache.phase
548 ispublic = lambda r: getphase(unfi, r) == public
560 ispublic = lambda r: getphase(unfi, r) == public
549 ispushed = lambda n: torev(n) in futurecommon
561 ispushed = lambda n: torev(n) in futurecommon
550 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
562 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
551 successorsmarkers = unfi.obsstore.successors
563 successorsmarkers = unfi.obsstore.successors
552 newhs = set() # final set of new heads
564 newhs = set() # final set of new heads
553 discarded = set() # new head of fully replaced branch
565 discarded = set() # new head of fully replaced branch
554
566
555 localcandidate = set() # candidate heads known locally
567 localcandidate = set() # candidate heads known locally
556 unknownheads = set() # candidate heads unknown locally
568 unknownheads = set() # candidate heads unknown locally
557 for h in candidate_newhs:
569 for h in candidate_newhs:
558 if h in unfi:
570 if h in unfi:
559 localcandidate.add(h)
571 localcandidate.add(h)
560 else:
572 else:
561 if successorsmarkers.get(h) is not None:
573 if successorsmarkers.get(h) is not None:
562 msg = (
574 msg = (
563 b'checkheads: remote head unknown locally has'
575 b'checkheads: remote head unknown locally has'
564 b' local marker: %s\n'
576 b' local marker: %s\n'
565 )
577 )
566 repo.ui.debug(msg % hex(h))
578 repo.ui.debug(msg % hex(h))
567 unknownheads.add(h)
579 unknownheads.add(h)
568
580
569 # fast path the simple case
581 # fast path the simple case
570 if len(localcandidate) == 1:
582 if len(localcandidate) == 1:
571 return unknownheads | set(candidate_newhs), set()
583 return unknownheads | set(candidate_newhs), set()
572
584
573 obsrevs = obsolete.getrevs(unfi, b'obsolete')
585 obsrevs = obsolete.getrevs(unfi, b'obsolete')
574 futurenonobsolete = frozenset(futurecommon) - obsrevs
586 futurenonobsolete = frozenset(futurecommon) - obsrevs
575
587
576 # actually process branch replacement
588 # actually process branch replacement
577 while localcandidate:
589 while localcandidate:
578 nh = localcandidate.pop()
590 nh = localcandidate.pop()
579 r = torev(nh)
591 r = torev(nh)
580 current_branch = unfi[nh].branch()
592 current_branch = unfi[nh].branch()
581 # run this check early to skip the evaluation of the whole branch
593 # run this check early to skip the evaluation of the whole branch
582 if ispublic(r) or r not in obsrevs:
594 if ispublic(r) or r not in obsrevs:
583 newhs.add(nh)
595 newhs.add(nh)
584 continue
596 continue
585
597
586 # Get all revs/nodes on the branch exclusive to this head
598 # Get all revs/nodes on the branch exclusive to this head
587 # (already filtered heads are "ignored"))
599 # (already filtered heads are "ignored"))
588 branchrevs = unfi.revs(
600 branchrevs = unfi.revs(
589 b'only(%n, (%ln+%ln))', nh, localcandidate, newhs
601 b'only(%n, (%ln+%ln))', nh, localcandidate, newhs
590 )
602 )
591
603
592 branchnodes = []
604 branchnodes = []
593 for r in branchrevs:
605 for r in branchrevs:
594 c = unfi[r]
606 c = unfi[r]
595 if c.branch() == current_branch:
607 if c.branch() == current_branch:
596 branchnodes.append(c.node())
608 branchnodes.append(c.node())
597
609
598 # The branch won't be hidden on the remote if
610 # The branch won't be hidden on the remote if
599 # * any part of it is public,
611 # * any part of it is public,
600 # * any part of it is considered part of the result by previous logic,
612 # * any part of it is considered part of the result by previous logic,
601 # * if we have no markers to push to obsolete it.
613 # * if we have no markers to push to obsolete it.
602 if (
614 if (
603 any(ispublic(r) for r in branchrevs)
615 any(ispublic(r) for r in branchrevs)
604 or any(torev(n) in futurenonobsolete for n in branchnodes)
616 or any(torev(n) in futurenonobsolete for n in branchnodes)
605 or any(not hasoutmarker(n) for n in branchnodes)
617 or any(not hasoutmarker(n) for n in branchnodes)
606 ):
618 ):
607 newhs.add(nh)
619 newhs.add(nh)
608 else:
620 else:
609 # note: there is a corner case if there is a merge in the branch.
621 # note: there is a corner case if there is a merge in the branch.
610 # we might end up with -more- heads. However, these heads are not
622 # we might end up with -more- heads. However, these heads are not
611 # "added" by the push, but more by the "removal" on the remote so I
623 # "added" by the push, but more by the "removal" on the remote so I
612 # think is a okay to ignore them,
624 # think is a okay to ignore them,
613 discarded.add(nh)
625 discarded.add(nh)
614 newhs |= unknownheads
626 newhs |= unknownheads
615 return newhs, discarded
627 return newhs, discarded
616
628
617
629
618 def pushingmarkerfor(obsstore, ispushed, node):
630 def pushingmarkerfor(obsstore, ispushed, node):
619 """true if some markers are to be pushed for node
631 """true if some markers are to be pushed for node
620
632
621 We cannot just look in to the pushed obsmarkers from the pushop because
633 We cannot just look in to the pushed obsmarkers from the pushop because
622 discovery might have filtered relevant markers. In addition listing all
634 discovery might have filtered relevant markers. In addition listing all
623 markers relevant to all changesets in the pushed set would be too expensive
635 markers relevant to all changesets in the pushed set would be too expensive
624 (O(len(repo)))
636 (O(len(repo)))
625
637
626 (note: There are cache opportunity in this function. but it would requires
638 (note: There are cache opportunity in this function. but it would requires
627 a two dimensional stack.)
639 a two dimensional stack.)
628 """
640 """
629 successorsmarkers = obsstore.successors
641 successorsmarkers = obsstore.successors
630 stack = [node]
642 stack = [node]
631 seen = set(stack)
643 seen = set(stack)
632 while stack:
644 while stack:
633 current = stack.pop()
645 current = stack.pop()
634 if ispushed(current):
646 if ispushed(current):
635 return True
647 return True
636 markers = successorsmarkers.get(current, ())
648 markers = successorsmarkers.get(current, ())
637 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
649 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
638 for m in markers:
650 for m in markers:
639 nexts = m[1] # successors
651 nexts = m[1] # successors
640 if not nexts: # this is a prune marker
652 if not nexts: # this is a prune marker
641 nexts = m[5] or () # parents
653 nexts = m[5] or () # parents
642 for n in nexts:
654 for n in nexts:
643 if n not in seen:
655 if n not in seen:
644 seen.add(n)
656 seen.add(n)
645 stack.append(n)
657 stack.append(n)
646 return False
658 return False
General Comments 0
You need to be logged in to leave comments. Login now