##// END OF EJS Templates
discovery: only calculate closed branches if required...
Pulkit Goyal -
r42402:d0e773ad default
parent child Browse files
Show More
@@ -1,538 +1,538 b''
1 # discovery.py - protocol changeset discovery functions
1 # discovery.py - protocol changeset discovery functions
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 hex,
14 hex,
15 nullid,
15 nullid,
16 short,
16 short,
17 )
17 )
18
18
19 from . import (
19 from . import (
20 bookmarks,
20 bookmarks,
21 branchmap,
21 branchmap,
22 error,
22 error,
23 phases,
23 phases,
24 scmutil,
24 scmutil,
25 setdiscovery,
25 setdiscovery,
26 treediscovery,
26 treediscovery,
27 util,
27 util,
28 )
28 )
29
29
30 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
30 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
31 """Return a tuple (common, anyincoming, heads) used to identify the common
31 """Return a tuple (common, anyincoming, heads) used to identify the common
32 subset of nodes between repo and remote.
32 subset of nodes between repo and remote.
33
33
34 "common" is a list of (at least) the heads of the common subset.
34 "common" is a list of (at least) the heads of the common subset.
35 "anyincoming" is testable as a boolean indicating if any nodes are missing
35 "anyincoming" is testable as a boolean indicating if any nodes are missing
36 locally. If remote does not support getbundle, this actually is a list of
36 locally. If remote does not support getbundle, this actually is a list of
37 roots of the nodes that would be incoming, to be supplied to
37 roots of the nodes that would be incoming, to be supplied to
38 changegroupsubset. No code except for pull should be relying on this fact
38 changegroupsubset. No code except for pull should be relying on this fact
39 any longer.
39 any longer.
40 "heads" is either the supplied heads, or else the remote's heads.
40 "heads" is either the supplied heads, or else the remote's heads.
41 "ancestorsof" if not None, restrict the discovery to a subset defined by
41 "ancestorsof" if not None, restrict the discovery to a subset defined by
42 these nodes. Changeset outside of this set won't be considered (and
42 these nodes. Changeset outside of this set won't be considered (and
43 won't appears in "common")
43 won't appears in "common")
44
44
45 If you pass heads and they are all known locally, the response lists just
45 If you pass heads and they are all known locally, the response lists just
46 these heads in "common" and in "heads".
46 these heads in "common" and in "heads".
47
47
48 Please use findcommonoutgoing to compute the set of outgoing nodes to give
48 Please use findcommonoutgoing to compute the set of outgoing nodes to give
49 extensions a good hook into outgoing.
49 extensions a good hook into outgoing.
50 """
50 """
51
51
52 if not remote.capable('getbundle'):
52 if not remote.capable('getbundle'):
53 return treediscovery.findcommonincoming(repo, remote, heads, force)
53 return treediscovery.findcommonincoming(repo, remote, heads, force)
54
54
55 if heads:
55 if heads:
56 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
56 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
57 if all(knownnode(h) for h in heads):
57 if all(knownnode(h) for h in heads):
58 return (heads, False, heads)
58 return (heads, False, heads)
59
59
60 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
60 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
61 abortwhenunrelated=not force,
61 abortwhenunrelated=not force,
62 ancestorsof=ancestorsof)
62 ancestorsof=ancestorsof)
63 common, anyinc, srvheads = res
63 common, anyinc, srvheads = res
64 return (list(common), anyinc, heads or list(srvheads))
64 return (list(common), anyinc, heads or list(srvheads))
65
65
66 class outgoing(object):
66 class outgoing(object):
67 '''Represents the set of nodes present in a local repo but not in a
67 '''Represents the set of nodes present in a local repo but not in a
68 (possibly) remote one.
68 (possibly) remote one.
69
69
70 Members:
70 Members:
71
71
72 missing is a list of all nodes present in local but not in remote.
72 missing is a list of all nodes present in local but not in remote.
73 common is a list of all nodes shared between the two repos.
73 common is a list of all nodes shared between the two repos.
74 excluded is the list of missing changeset that shouldn't be sent remotely.
74 excluded is the list of missing changeset that shouldn't be sent remotely.
75 missingheads is the list of heads of missing.
75 missingheads is the list of heads of missing.
76 commonheads is the list of heads of common.
76 commonheads is the list of heads of common.
77
77
78 The sets are computed on demand from the heads, unless provided upfront
78 The sets are computed on demand from the heads, unless provided upfront
79 by discovery.'''
79 by discovery.'''
80
80
81 def __init__(self, repo, commonheads=None, missingheads=None,
81 def __init__(self, repo, commonheads=None, missingheads=None,
82 missingroots=None):
82 missingroots=None):
83 # at least one of them must not be set
83 # at least one of them must not be set
84 assert None in (commonheads, missingroots)
84 assert None in (commonheads, missingroots)
85 cl = repo.changelog
85 cl = repo.changelog
86 if missingheads is None:
86 if missingheads is None:
87 missingheads = cl.heads()
87 missingheads = cl.heads()
88 if missingroots:
88 if missingroots:
89 discbases = []
89 discbases = []
90 for n in missingroots:
90 for n in missingroots:
91 discbases.extend([p for p in cl.parents(n) if p != nullid])
91 discbases.extend([p for p in cl.parents(n) if p != nullid])
92 # TODO remove call to nodesbetween.
92 # TODO remove call to nodesbetween.
93 # TODO populate attributes on outgoing instance instead of setting
93 # TODO populate attributes on outgoing instance instead of setting
94 # discbases.
94 # discbases.
95 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
95 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
96 included = set(csets)
96 included = set(csets)
97 missingheads = heads
97 missingheads = heads
98 commonheads = [n for n in discbases if n not in included]
98 commonheads = [n for n in discbases if n not in included]
99 elif not commonheads:
99 elif not commonheads:
100 commonheads = [nullid]
100 commonheads = [nullid]
101 self.commonheads = commonheads
101 self.commonheads = commonheads
102 self.missingheads = missingheads
102 self.missingheads = missingheads
103 self._revlog = cl
103 self._revlog = cl
104 self._common = None
104 self._common = None
105 self._missing = None
105 self._missing = None
106 self.excluded = []
106 self.excluded = []
107
107
108 def _computecommonmissing(self):
108 def _computecommonmissing(self):
109 sets = self._revlog.findcommonmissing(self.commonheads,
109 sets = self._revlog.findcommonmissing(self.commonheads,
110 self.missingheads)
110 self.missingheads)
111 self._common, self._missing = sets
111 self._common, self._missing = sets
112
112
113 @util.propertycache
113 @util.propertycache
114 def common(self):
114 def common(self):
115 if self._common is None:
115 if self._common is None:
116 self._computecommonmissing()
116 self._computecommonmissing()
117 return self._common
117 return self._common
118
118
119 @util.propertycache
119 @util.propertycache
120 def missing(self):
120 def missing(self):
121 if self._missing is None:
121 if self._missing is None:
122 self._computecommonmissing()
122 self._computecommonmissing()
123 return self._missing
123 return self._missing
124
124
125 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
125 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
126 commoninc=None, portable=False):
126 commoninc=None, portable=False):
127 '''Return an outgoing instance to identify the nodes present in repo but
127 '''Return an outgoing instance to identify the nodes present in repo but
128 not in other.
128 not in other.
129
129
130 If onlyheads is given, only nodes ancestral to nodes in onlyheads
130 If onlyheads is given, only nodes ancestral to nodes in onlyheads
131 (inclusive) are included. If you already know the local repo's heads,
131 (inclusive) are included. If you already know the local repo's heads,
132 passing them in onlyheads is faster than letting them be recomputed here.
132 passing them in onlyheads is faster than letting them be recomputed here.
133
133
134 If commoninc is given, it must be the result of a prior call to
134 If commoninc is given, it must be the result of a prior call to
135 findcommonincoming(repo, other, force) to avoid recomputing it here.
135 findcommonincoming(repo, other, force) to avoid recomputing it here.
136
136
137 If portable is given, compute more conservative common and missingheads,
137 If portable is given, compute more conservative common and missingheads,
138 to make bundles created from the instance more portable.'''
138 to make bundles created from the instance more portable.'''
139 # declare an empty outgoing object to be filled later
139 # declare an empty outgoing object to be filled later
140 og = outgoing(repo, None, None)
140 og = outgoing(repo, None, None)
141
141
142 # get common set if not provided
142 # get common set if not provided
143 if commoninc is None:
143 if commoninc is None:
144 commoninc = findcommonincoming(repo, other, force=force,
144 commoninc = findcommonincoming(repo, other, force=force,
145 ancestorsof=onlyheads)
145 ancestorsof=onlyheads)
146 og.commonheads, _any, _hds = commoninc
146 og.commonheads, _any, _hds = commoninc
147
147
148 # compute outgoing
148 # compute outgoing
149 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
149 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
150 if not mayexclude:
150 if not mayexclude:
151 og.missingheads = onlyheads or repo.heads()
151 og.missingheads = onlyheads or repo.heads()
152 elif onlyheads is None:
152 elif onlyheads is None:
153 # use visible heads as it should be cached
153 # use visible heads as it should be cached
154 og.missingheads = repo.filtered("served").heads()
154 og.missingheads = repo.filtered("served").heads()
155 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
155 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
156 else:
156 else:
157 # compute common, missing and exclude secret stuff
157 # compute common, missing and exclude secret stuff
158 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
158 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
159 og._common, allmissing = sets
159 og._common, allmissing = sets
160 og._missing = missing = []
160 og._missing = missing = []
161 og.excluded = excluded = []
161 og.excluded = excluded = []
162 for node in allmissing:
162 for node in allmissing:
163 ctx = repo[node]
163 ctx = repo[node]
164 if ctx.phase() >= phases.secret or ctx.extinct():
164 if ctx.phase() >= phases.secret or ctx.extinct():
165 excluded.append(node)
165 excluded.append(node)
166 else:
166 else:
167 missing.append(node)
167 missing.append(node)
168 if len(missing) == len(allmissing):
168 if len(missing) == len(allmissing):
169 missingheads = onlyheads
169 missingheads = onlyheads
170 else: # update missing heads
170 else: # update missing heads
171 missingheads = phases.newheads(repo, onlyheads, excluded)
171 missingheads = phases.newheads(repo, onlyheads, excluded)
172 og.missingheads = missingheads
172 og.missingheads = missingheads
173 if portable:
173 if portable:
174 # recompute common and missingheads as if -r<rev> had been given for
174 # recompute common and missingheads as if -r<rev> had been given for
175 # each head of missing, and --base <rev> for each head of the proper
175 # each head of missing, and --base <rev> for each head of the proper
176 # ancestors of missing
176 # ancestors of missing
177 og._computecommonmissing()
177 og._computecommonmissing()
178 cl = repo.changelog
178 cl = repo.changelog
179 missingrevs = set(cl.rev(n) for n in og._missing)
179 missingrevs = set(cl.rev(n) for n in og._missing)
180 og._common = set(cl.ancestors(missingrevs)) - missingrevs
180 og._common = set(cl.ancestors(missingrevs)) - missingrevs
181 commonheads = set(og.commonheads)
181 commonheads = set(og.commonheads)
182 og.missingheads = [h for h in og.missingheads if h not in commonheads]
182 og.missingheads = [h for h in og.missingheads if h not in commonheads]
183
183
184 return og
184 return og
185
185
186 def _headssummary(pushop):
186 def _headssummary(pushop):
187 """compute a summary of branch and heads status before and after push
187 """compute a summary of branch and heads status before and after push
188
188
189 return {'branch': ([remoteheads], [newheads],
189 return {'branch': ([remoteheads], [newheads],
190 [unsyncedheads], [discardedheads])} mapping
190 [unsyncedheads], [discardedheads])} mapping
191
191
192 - branch: the branch name,
192 - branch: the branch name,
193 - remoteheads: the list of remote heads known locally
193 - remoteheads: the list of remote heads known locally
194 None if the branch is new,
194 None if the branch is new,
195 - newheads: the new remote heads (known locally) with outgoing pushed,
195 - newheads: the new remote heads (known locally) with outgoing pushed,
196 - unsyncedheads: the list of remote heads unknown locally,
196 - unsyncedheads: the list of remote heads unknown locally,
197 - discardedheads: the list of heads made obsolete by the push.
197 - discardedheads: the list of heads made obsolete by the push.
198 """
198 """
199 repo = pushop.repo.unfiltered()
199 repo = pushop.repo.unfiltered()
200 remote = pushop.remote
200 remote = pushop.remote
201 outgoing = pushop.outgoing
201 outgoing = pushop.outgoing
202 cl = repo.changelog
202 cl = repo.changelog
203 headssum = {}
203 headssum = {}
204 missingctx = set()
204 missingctx = set()
205 # A. Create set of branches involved in the push.
205 # A. Create set of branches involved in the push.
206 branches = set()
206 branches = set()
207 for n in outgoing.missing:
207 for n in outgoing.missing:
208 ctx = repo[n]
208 ctx = repo[n]
209 missingctx.add(ctx)
209 missingctx.add(ctx)
210 branches.add(ctx.branch())
210 branches.add(ctx.branch())
211
211
212 with remote.commandexecutor() as e:
212 with remote.commandexecutor() as e:
213 remotemap = e.callcommand('branchmap', {}).result()
213 remotemap = e.callcommand('branchmap', {}).result()
214
214
215 knownnode = cl.hasnode # do not use nodemap until it is filtered
215 knownnode = cl.hasnode # do not use nodemap until it is filtered
216 # A. register remote heads of branches which are in outgoing set
216 # A. register remote heads of branches which are in outgoing set
217 for branch, heads in remotemap.iteritems():
217 for branch, heads in remotemap.iteritems():
218 # don't add head info about branches which we don't have locally
218 # don't add head info about branches which we don't have locally
219 if branch not in branches:
219 if branch not in branches:
220 continue
220 continue
221 known = []
221 known = []
222 unsynced = []
222 unsynced = []
223 for h in heads:
223 for h in heads:
224 if knownnode(h):
224 if knownnode(h):
225 known.append(h)
225 known.append(h)
226 else:
226 else:
227 unsynced.append(h)
227 unsynced.append(h)
228 headssum[branch] = (known, list(known), unsynced)
228 headssum[branch] = (known, list(known), unsynced)
229
229
230 # B. add new branch data
230 # B. add new branch data
231 for branch in branches:
231 for branch in branches:
232 if branch not in headssum:
232 if branch not in headssum:
233 headssum[branch] = (None, [], [])
233 headssum[branch] = (None, [], [])
234
234
235 # C. Update newmap with outgoing changes.
235 # C. Update newmap with outgoing changes.
236 # This will possibly add new heads and remove existing ones.
236 # This will possibly add new heads and remove existing ones.
237 newmap = branchmap.remotebranchcache((branch, heads[1])
237 newmap = branchmap.remotebranchcache((branch, heads[1])
238 for branch, heads in headssum.iteritems()
238 for branch, heads in headssum.iteritems()
239 if heads[0] is not None)
239 if heads[0] is not None)
240 newmap.update(repo, (ctx.rev() for ctx in missingctx))
240 newmap.update(repo, (ctx.rev() for ctx in missingctx))
241 for branch, newheads in newmap.iteritems():
241 for branch, newheads in newmap.iteritems():
242 headssum[branch][1][:] = newheads
242 headssum[branch][1][:] = newheads
243 for branch, items in headssum.iteritems():
243 for branch, items in headssum.iteritems():
244 for l in items:
244 for l in items:
245 if l is not None:
245 if l is not None:
246 l.sort()
246 l.sort()
247 headssum[branch] = items + ([],)
247 headssum[branch] = items + ([],)
248
248
249 # If there are no obsstore, no post processing are needed.
249 # If there are no obsstore, no post processing are needed.
250 if repo.obsstore:
250 if repo.obsstore:
251 torev = repo.changelog.rev
251 torev = repo.changelog.rev
252 futureheads = set(torev(h) for h in outgoing.missingheads)
252 futureheads = set(torev(h) for h in outgoing.missingheads)
253 futureheads |= set(torev(h) for h in outgoing.commonheads)
253 futureheads |= set(torev(h) for h in outgoing.commonheads)
254 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
254 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
255 for branch, heads in sorted(headssum.iteritems()):
255 for branch, heads in sorted(headssum.iteritems()):
256 remoteheads, newheads, unsyncedheads, placeholder = heads
256 remoteheads, newheads, unsyncedheads, placeholder = heads
257 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
257 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
258 headssum[branch] = (remoteheads, sorted(result[0]), unsyncedheads,
258 headssum[branch] = (remoteheads, sorted(result[0]), unsyncedheads,
259 sorted(result[1]))
259 sorted(result[1]))
260 return headssum
260 return headssum
261
261
262 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
262 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
263 """Compute branchmapsummary for repo without branchmap support"""
263 """Compute branchmapsummary for repo without branchmap support"""
264
264
265 # 1-4b. old servers: Check for new topological heads.
265 # 1-4b. old servers: Check for new topological heads.
266 # Construct {old,new}map with branch = None (topological branch).
266 # Construct {old,new}map with branch = None (topological branch).
267 # (code based on update)
267 # (code based on update)
268 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
268 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
269 oldheads = sorted(h for h in remoteheads if knownnode(h))
269 oldheads = sorted(h for h in remoteheads if knownnode(h))
270 # all nodes in outgoing.missing are children of either:
270 # all nodes in outgoing.missing are children of either:
271 # - an element of oldheads
271 # - an element of oldheads
272 # - another element of outgoing.missing
272 # - another element of outgoing.missing
273 # - nullrev
273 # - nullrev
274 # This explains why the new head are very simple to compute.
274 # This explains why the new head are very simple to compute.
275 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
275 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
276 newheads = sorted(c.node() for c in r)
276 newheads = sorted(c.node() for c in r)
277 # set some unsynced head to issue the "unsynced changes" warning
277 # set some unsynced head to issue the "unsynced changes" warning
278 if inc:
278 if inc:
279 unsynced = [None]
279 unsynced = [None]
280 else:
280 else:
281 unsynced = []
281 unsynced = []
282 return {None: (oldheads, newheads, unsynced, [])}
282 return {None: (oldheads, newheads, unsynced, [])}
283
283
284 def _nowarnheads(pushop):
284 def _nowarnheads(pushop):
285 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
285 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
286 repo = pushop.repo.unfiltered()
286 repo = pushop.repo.unfiltered()
287 remote = pushop.remote
287 remote = pushop.remote
288 localbookmarks = repo._bookmarks
288 localbookmarks = repo._bookmarks
289
289
290 with remote.commandexecutor() as e:
290 with remote.commandexecutor() as e:
291 remotebookmarks = e.callcommand('listkeys', {
291 remotebookmarks = e.callcommand('listkeys', {
292 'namespace': 'bookmarks',
292 'namespace': 'bookmarks',
293 }).result()
293 }).result()
294
294
295 bookmarkedheads = set()
295 bookmarkedheads = set()
296
296
297 # internal config: bookmarks.pushing
297 # internal config: bookmarks.pushing
298 newbookmarks = [localbookmarks.expandname(b)
298 newbookmarks = [localbookmarks.expandname(b)
299 for b in pushop.ui.configlist('bookmarks', 'pushing')]
299 for b in pushop.ui.configlist('bookmarks', 'pushing')]
300
300
301 for bm in localbookmarks:
301 for bm in localbookmarks:
302 rnode = remotebookmarks.get(bm)
302 rnode = remotebookmarks.get(bm)
303 if rnode and rnode in repo:
303 if rnode and rnode in repo:
304 lctx, rctx = localbookmarks.changectx(bm), repo[rnode]
304 lctx, rctx = localbookmarks.changectx(bm), repo[rnode]
305 if bookmarks.validdest(repo, rctx, lctx):
305 if bookmarks.validdest(repo, rctx, lctx):
306 bookmarkedheads.add(lctx.node())
306 bookmarkedheads.add(lctx.node())
307 else:
307 else:
308 if bm in newbookmarks and bm not in remotebookmarks:
308 if bm in newbookmarks and bm not in remotebookmarks:
309 bookmarkedheads.add(localbookmarks[bm])
309 bookmarkedheads.add(localbookmarks[bm])
310
310
311 return bookmarkedheads
311 return bookmarkedheads
312
312
313 def checkheads(pushop):
313 def checkheads(pushop):
314 """Check that a push won't add any outgoing head
314 """Check that a push won't add any outgoing head
315
315
316 raise Abort error and display ui message as needed.
316 raise Abort error and display ui message as needed.
317 """
317 """
318
318
319 repo = pushop.repo.unfiltered()
319 repo = pushop.repo.unfiltered()
320 remote = pushop.remote
320 remote = pushop.remote
321 outgoing = pushop.outgoing
321 outgoing = pushop.outgoing
322 remoteheads = pushop.remoteheads
322 remoteheads = pushop.remoteheads
323 newbranch = pushop.newbranch
323 newbranch = pushop.newbranch
324 inc = bool(pushop.incoming)
324 inc = bool(pushop.incoming)
325
325
326 # Check for each named branch if we're creating new remote heads.
326 # Check for each named branch if we're creating new remote heads.
327 # To be a remote head after push, node must be either:
327 # To be a remote head after push, node must be either:
328 # - unknown locally
328 # - unknown locally
329 # - a local outgoing head descended from update
329 # - a local outgoing head descended from update
330 # - a remote head that's known locally and not
330 # - a remote head that's known locally and not
331 # ancestral to an outgoing head
331 # ancestral to an outgoing head
332 if remoteheads == [nullid]:
332 if remoteheads == [nullid]:
333 # remote is empty, nothing to check.
333 # remote is empty, nothing to check.
334 return
334 return
335
335
336 if remote.capable('branchmap'):
336 if remote.capable('branchmap'):
337 headssum = _headssummary(pushop)
337 headssum = _headssummary(pushop)
338 else:
338 else:
339 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
339 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
340 pushop.pushbranchmap = headssum
340 pushop.pushbranchmap = headssum
341 newbranches = [branch for branch, heads in headssum.iteritems()
341 newbranches = [branch for branch, heads in headssum.iteritems()
342 if heads[0] is None]
342 if heads[0] is None]
343 # Makes a set of closed branches
344 closedbranches = set()
345 for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
346 if isclosed:
347 closedbranches.add(tag)
348 closedbranches = (closedbranches & set(newbranches))
349 # 1. Check for new branches on the remote.
343 # 1. Check for new branches on the remote.
350 if newbranches and not newbranch: # new branch requires --new-branch
344 if newbranches and not newbranch: # new branch requires --new-branch
351 branchnames = ', '.join(sorted(newbranches))
345 branchnames = ', '.join(sorted(newbranches))
346 # Calculate how many of the new branches are closed branches
347 closedbranches = set()
348 for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
349 if isclosed:
350 closedbranches.add(tag)
351 closedbranches = (closedbranches & set(newbranches))
352 if closedbranches:
352 if closedbranches:
353 errmsg = (_("push creates new remote branches: %s (%d closed)!")
353 errmsg = (_("push creates new remote branches: %s (%d closed)!")
354 % (branchnames, len(closedbranches)))
354 % (branchnames, len(closedbranches)))
355 else:
355 else:
356 errmsg = (_("push creates new remote branches: %s!")% branchnames)
356 errmsg = (_("push creates new remote branches: %s!")% branchnames)
357 hint=_("use 'hg push --new-branch' to create new remote branches")
357 hint=_("use 'hg push --new-branch' to create new remote branches")
358 raise error.Abort(errmsg, hint=hint)
358 raise error.Abort(errmsg, hint=hint)
359
359
360 # 2. Find heads that we need not warn about
360 # 2. Find heads that we need not warn about
361 nowarnheads = _nowarnheads(pushop)
361 nowarnheads = _nowarnheads(pushop)
362
362
363 # 3. Check for new heads.
363 # 3. Check for new heads.
364 # If there are more heads after the push than before, a suitable
364 # If there are more heads after the push than before, a suitable
365 # error message, depending on unsynced status, is displayed.
365 # error message, depending on unsynced status, is displayed.
366 errormsg = None
366 errormsg = None
367 for branch, heads in sorted(headssum.iteritems()):
367 for branch, heads in sorted(headssum.iteritems()):
368 remoteheads, newheads, unsyncedheads, discardedheads = heads
368 remoteheads, newheads, unsyncedheads, discardedheads = heads
369 # add unsynced data
369 # add unsynced data
370 if remoteheads is None:
370 if remoteheads is None:
371 oldhs = set()
371 oldhs = set()
372 else:
372 else:
373 oldhs = set(remoteheads)
373 oldhs = set(remoteheads)
374 oldhs.update(unsyncedheads)
374 oldhs.update(unsyncedheads)
375 dhs = None # delta heads, the new heads on branch
375 dhs = None # delta heads, the new heads on branch
376 newhs = set(newheads)
376 newhs = set(newheads)
377 newhs.update(unsyncedheads)
377 newhs.update(unsyncedheads)
378 if unsyncedheads:
378 if unsyncedheads:
379 if None in unsyncedheads:
379 if None in unsyncedheads:
380 # old remote, no heads data
380 # old remote, no heads data
381 heads = None
381 heads = None
382 else:
382 else:
383 heads = scmutil.nodesummaries(repo, unsyncedheads)
383 heads = scmutil.nodesummaries(repo, unsyncedheads)
384 if heads is None:
384 if heads is None:
385 repo.ui.status(_("remote has heads that are "
385 repo.ui.status(_("remote has heads that are "
386 "not known locally\n"))
386 "not known locally\n"))
387 elif branch is None:
387 elif branch is None:
388 repo.ui.status(_("remote has heads that are "
388 repo.ui.status(_("remote has heads that are "
389 "not known locally: %s\n") % heads)
389 "not known locally: %s\n") % heads)
390 else:
390 else:
391 repo.ui.status(_("remote has heads on branch '%s' that are "
391 repo.ui.status(_("remote has heads on branch '%s' that are "
392 "not known locally: %s\n") % (branch, heads))
392 "not known locally: %s\n") % (branch, heads))
393 if remoteheads is None:
393 if remoteheads is None:
394 if len(newhs) > 1:
394 if len(newhs) > 1:
395 dhs = list(newhs)
395 dhs = list(newhs)
396 if errormsg is None:
396 if errormsg is None:
397 errormsg = (_("push creates new branch '%s' "
397 errormsg = (_("push creates new branch '%s' "
398 "with multiple heads") % (branch))
398 "with multiple heads") % (branch))
399 hint = _("merge or"
399 hint = _("merge or"
400 " see 'hg help push' for details about"
400 " see 'hg help push' for details about"
401 " pushing new heads")
401 " pushing new heads")
402 elif len(newhs) > len(oldhs):
402 elif len(newhs) > len(oldhs):
403 # remove bookmarked or existing remote heads from the new heads list
403 # remove bookmarked or existing remote heads from the new heads list
404 dhs = sorted(newhs - nowarnheads - oldhs)
404 dhs = sorted(newhs - nowarnheads - oldhs)
405 if dhs:
405 if dhs:
406 if errormsg is None:
406 if errormsg is None:
407 if branch not in ('default', None):
407 if branch not in ('default', None):
408 errormsg = _("push creates new remote head %s "
408 errormsg = _("push creates new remote head %s "
409 "on branch '%s'!") % (short(dhs[0]), branch)
409 "on branch '%s'!") % (short(dhs[0]), branch)
410 elif repo[dhs[0]].bookmarks():
410 elif repo[dhs[0]].bookmarks():
411 errormsg = _("push creates new remote head %s "
411 errormsg = _("push creates new remote head %s "
412 "with bookmark '%s'!") % (
412 "with bookmark '%s'!") % (
413 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
413 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
414 else:
414 else:
415 errormsg = _("push creates new remote head %s!"
415 errormsg = _("push creates new remote head %s!"
416 ) % short(dhs[0])
416 ) % short(dhs[0])
417 if unsyncedheads:
417 if unsyncedheads:
418 hint = _("pull and merge or"
418 hint = _("pull and merge or"
419 " see 'hg help push' for details about"
419 " see 'hg help push' for details about"
420 " pushing new heads")
420 " pushing new heads")
421 else:
421 else:
422 hint = _("merge or"
422 hint = _("merge or"
423 " see 'hg help push' for details about"
423 " see 'hg help push' for details about"
424 " pushing new heads")
424 " pushing new heads")
425 if branch is None:
425 if branch is None:
426 repo.ui.note(_("new remote heads:\n"))
426 repo.ui.note(_("new remote heads:\n"))
427 else:
427 else:
428 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
428 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
429 for h in dhs:
429 for h in dhs:
430 repo.ui.note((" %s\n") % short(h))
430 repo.ui.note((" %s\n") % short(h))
431 if errormsg:
431 if errormsg:
432 raise error.Abort(errormsg, hint=hint)
432 raise error.Abort(errormsg, hint=hint)
433
433
434 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
434 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
435 """post process the list of new heads with obsolescence information
435 """post process the list of new heads with obsolescence information
436
436
437 Exists as a sub-function to contain the complexity and allow extensions to
437 Exists as a sub-function to contain the complexity and allow extensions to
438 experiment with smarter logic.
438 experiment with smarter logic.
439
439
440 Returns (newheads, discarded_heads) tuple
440 Returns (newheads, discarded_heads) tuple
441 """
441 """
442 # known issue
442 # known issue
443 #
443 #
444 # * We "silently" skip processing on all changeset unknown locally
444 # * We "silently" skip processing on all changeset unknown locally
445 #
445 #
446 # * if <nh> is public on the remote, it won't be affected by obsolete
446 # * if <nh> is public on the remote, it won't be affected by obsolete
447 # marker and a new is created
447 # marker and a new is created
448
448
449 # define various utilities and containers
449 # define various utilities and containers
450 repo = pushop.repo
450 repo = pushop.repo
451 unfi = repo.unfiltered()
451 unfi = repo.unfiltered()
452 tonode = unfi.changelog.node
452 tonode = unfi.changelog.node
453 torev = unfi.changelog.nodemap.get
453 torev = unfi.changelog.nodemap.get
454 public = phases.public
454 public = phases.public
455 getphase = unfi._phasecache.phase
455 getphase = unfi._phasecache.phase
456 ispublic = (lambda r: getphase(unfi, r) == public)
456 ispublic = (lambda r: getphase(unfi, r) == public)
457 ispushed = (lambda n: torev(n) in futurecommon)
457 ispushed = (lambda n: torev(n) in futurecommon)
458 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
458 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
459 successorsmarkers = unfi.obsstore.successors
459 successorsmarkers = unfi.obsstore.successors
460 newhs = set() # final set of new heads
460 newhs = set() # final set of new heads
461 discarded = set() # new head of fully replaced branch
461 discarded = set() # new head of fully replaced branch
462
462
463 localcandidate = set() # candidate heads known locally
463 localcandidate = set() # candidate heads known locally
464 unknownheads = set() # candidate heads unknown locally
464 unknownheads = set() # candidate heads unknown locally
465 for h in candidate_newhs:
465 for h in candidate_newhs:
466 if h in unfi:
466 if h in unfi:
467 localcandidate.add(h)
467 localcandidate.add(h)
468 else:
468 else:
469 if successorsmarkers.get(h) is not None:
469 if successorsmarkers.get(h) is not None:
470 msg = ('checkheads: remote head unknown locally has'
470 msg = ('checkheads: remote head unknown locally has'
471 ' local marker: %s\n')
471 ' local marker: %s\n')
472 repo.ui.debug(msg % hex(h))
472 repo.ui.debug(msg % hex(h))
473 unknownheads.add(h)
473 unknownheads.add(h)
474
474
475 # fast path the simple case
475 # fast path the simple case
476 if len(localcandidate) == 1:
476 if len(localcandidate) == 1:
477 return unknownheads | set(candidate_newhs), set()
477 return unknownheads | set(candidate_newhs), set()
478
478
479 # actually process branch replacement
479 # actually process branch replacement
480 while localcandidate:
480 while localcandidate:
481 nh = localcandidate.pop()
481 nh = localcandidate.pop()
482 # run this check early to skip the evaluation of the whole branch
482 # run this check early to skip the evaluation of the whole branch
483 if (torev(nh) in futurecommon or ispublic(torev(nh))):
483 if (torev(nh) in futurecommon or ispublic(torev(nh))):
484 newhs.add(nh)
484 newhs.add(nh)
485 continue
485 continue
486
486
487 # Get all revs/nodes on the branch exclusive to this head
487 # Get all revs/nodes on the branch exclusive to this head
488 # (already filtered heads are "ignored"))
488 # (already filtered heads are "ignored"))
489 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
489 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
490 nh, localcandidate, newhs)
490 nh, localcandidate, newhs)
491 branchnodes = [tonode(r) for r in branchrevs]
491 branchnodes = [tonode(r) for r in branchrevs]
492
492
493 # The branch won't be hidden on the remote if
493 # The branch won't be hidden on the remote if
494 # * any part of it is public,
494 # * any part of it is public,
495 # * any part of it is considered part of the result by previous logic,
495 # * any part of it is considered part of the result by previous logic,
496 # * if we have no markers to push to obsolete it.
496 # * if we have no markers to push to obsolete it.
497 if (any(ispublic(r) for r in branchrevs)
497 if (any(ispublic(r) for r in branchrevs)
498 or any(torev(n) in futurecommon for n in branchnodes)
498 or any(torev(n) in futurecommon for n in branchnodes)
499 or any(not hasoutmarker(n) for n in branchnodes)):
499 or any(not hasoutmarker(n) for n in branchnodes)):
500 newhs.add(nh)
500 newhs.add(nh)
501 else:
501 else:
502 # note: there is a corner case if there is a merge in the branch.
502 # note: there is a corner case if there is a merge in the branch.
503 # we might end up with -more- heads. However, these heads are not
503 # we might end up with -more- heads. However, these heads are not
504 # "added" by the push, but more by the "removal" on the remote so I
504 # "added" by the push, but more by the "removal" on the remote so I
505 # think is a okay to ignore them,
505 # think is a okay to ignore them,
506 discarded.add(nh)
506 discarded.add(nh)
507 newhs |= unknownheads
507 newhs |= unknownheads
508 return newhs, discarded
508 return newhs, discarded
509
509
510 def pushingmarkerfor(obsstore, ispushed, node):
510 def pushingmarkerfor(obsstore, ispushed, node):
511 """true if some markers are to be pushed for node
511 """true if some markers are to be pushed for node
512
512
513 We cannot just look in to the pushed obsmarkers from the pushop because
513 We cannot just look in to the pushed obsmarkers from the pushop because
514 discovery might have filtered relevant markers. In addition listing all
514 discovery might have filtered relevant markers. In addition listing all
515 markers relevant to all changesets in the pushed set would be too expensive
515 markers relevant to all changesets in the pushed set would be too expensive
516 (O(len(repo)))
516 (O(len(repo)))
517
517
518 (note: There are cache opportunity in this function. but it would requires
518 (note: There are cache opportunity in this function. but it would requires
519 a two dimensional stack.)
519 a two dimensional stack.)
520 """
520 """
521 successorsmarkers = obsstore.successors
521 successorsmarkers = obsstore.successors
522 stack = [node]
522 stack = [node]
523 seen = set(stack)
523 seen = set(stack)
524 while stack:
524 while stack:
525 current = stack.pop()
525 current = stack.pop()
526 if ispushed(current):
526 if ispushed(current):
527 return True
527 return True
528 markers = successorsmarkers.get(current, ())
528 markers = successorsmarkers.get(current, ())
529 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
529 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
530 for m in markers:
530 for m in markers:
531 nexts = m[1] # successors
531 nexts = m[1] # successors
532 if not nexts: # this is a prune marker
532 if not nexts: # this is a prune marker
533 nexts = m[5] or () # parents
533 nexts = m[5] or () # parents
534 for n in nexts:
534 for n in nexts:
535 if n not in seen:
535 if n not in seen:
536 seen.add(n)
536 seen.add(n)
537 stack.append(n)
537 stack.append(n)
538 return False
538 return False
General Comments 0
You need to be logged in to leave comments. Login now