##// END OF EJS Templates
discovery: move cl.hasnode outside of the for-loop...
Pulkit Goyal -
r42196:19ccc678 default
parent child Browse files
Show More
@@ -1,529 +1,529 b''
1 # discovery.py - protocol changeset discovery functions
1 # discovery.py - protocol changeset discovery functions
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 hex,
14 hex,
15 nullid,
15 nullid,
16 short,
16 short,
17 )
17 )
18
18
19 from . import (
19 from . import (
20 bookmarks,
20 bookmarks,
21 branchmap,
21 branchmap,
22 error,
22 error,
23 phases,
23 phases,
24 scmutil,
24 scmutil,
25 setdiscovery,
25 setdiscovery,
26 treediscovery,
26 treediscovery,
27 util,
27 util,
28 )
28 )
29
29
30 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
30 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
31 """Return a tuple (common, anyincoming, heads) used to identify the common
31 """Return a tuple (common, anyincoming, heads) used to identify the common
32 subset of nodes between repo and remote.
32 subset of nodes between repo and remote.
33
33
34 "common" is a list of (at least) the heads of the common subset.
34 "common" is a list of (at least) the heads of the common subset.
35 "anyincoming" is testable as a boolean indicating if any nodes are missing
35 "anyincoming" is testable as a boolean indicating if any nodes are missing
36 locally. If remote does not support getbundle, this actually is a list of
36 locally. If remote does not support getbundle, this actually is a list of
37 roots of the nodes that would be incoming, to be supplied to
37 roots of the nodes that would be incoming, to be supplied to
38 changegroupsubset. No code except for pull should be relying on this fact
38 changegroupsubset. No code except for pull should be relying on this fact
39 any longer.
39 any longer.
40 "heads" is either the supplied heads, or else the remote's heads.
40 "heads" is either the supplied heads, or else the remote's heads.
41 "ancestorsof" if not None, restrict the discovery to a subset defined by
41 "ancestorsof" if not None, restrict the discovery to a subset defined by
42 these nodes. Changeset outside of this set won't be considered (and
42 these nodes. Changeset outside of this set won't be considered (and
43 won't appears in "common")
43 won't appears in "common")
44
44
45 If you pass heads and they are all known locally, the response lists just
45 If you pass heads and they are all known locally, the response lists just
46 these heads in "common" and in "heads".
46 these heads in "common" and in "heads".
47
47
48 Please use findcommonoutgoing to compute the set of outgoing nodes to give
48 Please use findcommonoutgoing to compute the set of outgoing nodes to give
49 extensions a good hook into outgoing.
49 extensions a good hook into outgoing.
50 """
50 """
51
51
52 if not remote.capable('getbundle'):
52 if not remote.capable('getbundle'):
53 return treediscovery.findcommonincoming(repo, remote, heads, force)
53 return treediscovery.findcommonincoming(repo, remote, heads, force)
54
54
55 if heads:
55 if heads:
56 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
56 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
57 if all(knownnode(h) for h in heads):
57 if all(knownnode(h) for h in heads):
58 return (heads, False, heads)
58 return (heads, False, heads)
59
59
60 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
60 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
61 abortwhenunrelated=not force,
61 abortwhenunrelated=not force,
62 ancestorsof=ancestorsof)
62 ancestorsof=ancestorsof)
63 common, anyinc, srvheads = res
63 common, anyinc, srvheads = res
64 return (list(common), anyinc, heads or list(srvheads))
64 return (list(common), anyinc, heads or list(srvheads))
65
65
66 class outgoing(object):
66 class outgoing(object):
67 '''Represents the set of nodes present in a local repo but not in a
67 '''Represents the set of nodes present in a local repo but not in a
68 (possibly) remote one.
68 (possibly) remote one.
69
69
70 Members:
70 Members:
71
71
72 missing is a list of all nodes present in local but not in remote.
72 missing is a list of all nodes present in local but not in remote.
73 common is a list of all nodes shared between the two repos.
73 common is a list of all nodes shared between the two repos.
74 excluded is the list of missing changeset that shouldn't be sent remotely.
74 excluded is the list of missing changeset that shouldn't be sent remotely.
75 missingheads is the list of heads of missing.
75 missingheads is the list of heads of missing.
76 commonheads is the list of heads of common.
76 commonheads is the list of heads of common.
77
77
78 The sets are computed on demand from the heads, unless provided upfront
78 The sets are computed on demand from the heads, unless provided upfront
79 by discovery.'''
79 by discovery.'''
80
80
81 def __init__(self, repo, commonheads=None, missingheads=None,
81 def __init__(self, repo, commonheads=None, missingheads=None,
82 missingroots=None):
82 missingroots=None):
83 # at least one of them must not be set
83 # at least one of them must not be set
84 assert None in (commonheads, missingroots)
84 assert None in (commonheads, missingroots)
85 cl = repo.changelog
85 cl = repo.changelog
86 if missingheads is None:
86 if missingheads is None:
87 missingheads = cl.heads()
87 missingheads = cl.heads()
88 if missingroots:
88 if missingroots:
89 discbases = []
89 discbases = []
90 for n in missingroots:
90 for n in missingroots:
91 discbases.extend([p for p in cl.parents(n) if p != nullid])
91 discbases.extend([p for p in cl.parents(n) if p != nullid])
92 # TODO remove call to nodesbetween.
92 # TODO remove call to nodesbetween.
93 # TODO populate attributes on outgoing instance instead of setting
93 # TODO populate attributes on outgoing instance instead of setting
94 # discbases.
94 # discbases.
95 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
95 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
96 included = set(csets)
96 included = set(csets)
97 missingheads = heads
97 missingheads = heads
98 commonheads = [n for n in discbases if n not in included]
98 commonheads = [n for n in discbases if n not in included]
99 elif not commonheads:
99 elif not commonheads:
100 commonheads = [nullid]
100 commonheads = [nullid]
101 self.commonheads = commonheads
101 self.commonheads = commonheads
102 self.missingheads = missingheads
102 self.missingheads = missingheads
103 self._revlog = cl
103 self._revlog = cl
104 self._common = None
104 self._common = None
105 self._missing = None
105 self._missing = None
106 self.excluded = []
106 self.excluded = []
107
107
108 def _computecommonmissing(self):
108 def _computecommonmissing(self):
109 sets = self._revlog.findcommonmissing(self.commonheads,
109 sets = self._revlog.findcommonmissing(self.commonheads,
110 self.missingheads)
110 self.missingheads)
111 self._common, self._missing = sets
111 self._common, self._missing = sets
112
112
113 @util.propertycache
113 @util.propertycache
114 def common(self):
114 def common(self):
115 if self._common is None:
115 if self._common is None:
116 self._computecommonmissing()
116 self._computecommonmissing()
117 return self._common
117 return self._common
118
118
119 @util.propertycache
119 @util.propertycache
120 def missing(self):
120 def missing(self):
121 if self._missing is None:
121 if self._missing is None:
122 self._computecommonmissing()
122 self._computecommonmissing()
123 return self._missing
123 return self._missing
124
124
125 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
125 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
126 commoninc=None, portable=False):
126 commoninc=None, portable=False):
127 '''Return an outgoing instance to identify the nodes present in repo but
127 '''Return an outgoing instance to identify the nodes present in repo but
128 not in other.
128 not in other.
129
129
130 If onlyheads is given, only nodes ancestral to nodes in onlyheads
130 If onlyheads is given, only nodes ancestral to nodes in onlyheads
131 (inclusive) are included. If you already know the local repo's heads,
131 (inclusive) are included. If you already know the local repo's heads,
132 passing them in onlyheads is faster than letting them be recomputed here.
132 passing them in onlyheads is faster than letting them be recomputed here.
133
133
134 If commoninc is given, it must be the result of a prior call to
134 If commoninc is given, it must be the result of a prior call to
135 findcommonincoming(repo, other, force) to avoid recomputing it here.
135 findcommonincoming(repo, other, force) to avoid recomputing it here.
136
136
137 If portable is given, compute more conservative common and missingheads,
137 If portable is given, compute more conservative common and missingheads,
138 to make bundles created from the instance more portable.'''
138 to make bundles created from the instance more portable.'''
139 # declare an empty outgoing object to be filled later
139 # declare an empty outgoing object to be filled later
140 og = outgoing(repo, None, None)
140 og = outgoing(repo, None, None)
141
141
142 # get common set if not provided
142 # get common set if not provided
143 if commoninc is None:
143 if commoninc is None:
144 commoninc = findcommonincoming(repo, other, force=force,
144 commoninc = findcommonincoming(repo, other, force=force,
145 ancestorsof=onlyheads)
145 ancestorsof=onlyheads)
146 og.commonheads, _any, _hds = commoninc
146 og.commonheads, _any, _hds = commoninc
147
147
148 # compute outgoing
148 # compute outgoing
149 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
149 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
150 if not mayexclude:
150 if not mayexclude:
151 og.missingheads = onlyheads or repo.heads()
151 og.missingheads = onlyheads or repo.heads()
152 elif onlyheads is None:
152 elif onlyheads is None:
153 # use visible heads as it should be cached
153 # use visible heads as it should be cached
154 og.missingheads = repo.filtered("served").heads()
154 og.missingheads = repo.filtered("served").heads()
155 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
155 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
156 else:
156 else:
157 # compute common, missing and exclude secret stuff
157 # compute common, missing and exclude secret stuff
158 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
158 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
159 og._common, allmissing = sets
159 og._common, allmissing = sets
160 og._missing = missing = []
160 og._missing = missing = []
161 og.excluded = excluded = []
161 og.excluded = excluded = []
162 for node in allmissing:
162 for node in allmissing:
163 ctx = repo[node]
163 ctx = repo[node]
164 if ctx.phase() >= phases.secret or ctx.extinct():
164 if ctx.phase() >= phases.secret or ctx.extinct():
165 excluded.append(node)
165 excluded.append(node)
166 else:
166 else:
167 missing.append(node)
167 missing.append(node)
168 if len(missing) == len(allmissing):
168 if len(missing) == len(allmissing):
169 missingheads = onlyheads
169 missingheads = onlyheads
170 else: # update missing heads
170 else: # update missing heads
171 missingheads = phases.newheads(repo, onlyheads, excluded)
171 missingheads = phases.newheads(repo, onlyheads, excluded)
172 og.missingheads = missingheads
172 og.missingheads = missingheads
173 if portable:
173 if portable:
174 # recompute common and missingheads as if -r<rev> had been given for
174 # recompute common and missingheads as if -r<rev> had been given for
175 # each head of missing, and --base <rev> for each head of the proper
175 # each head of missing, and --base <rev> for each head of the proper
176 # ancestors of missing
176 # ancestors of missing
177 og._computecommonmissing()
177 og._computecommonmissing()
178 cl = repo.changelog
178 cl = repo.changelog
179 missingrevs = set(cl.rev(n) for n in og._missing)
179 missingrevs = set(cl.rev(n) for n in og._missing)
180 og._common = set(cl.ancestors(missingrevs)) - missingrevs
180 og._common = set(cl.ancestors(missingrevs)) - missingrevs
181 commonheads = set(og.commonheads)
181 commonheads = set(og.commonheads)
182 og.missingheads = [h for h in og.missingheads if h not in commonheads]
182 og.missingheads = [h for h in og.missingheads if h not in commonheads]
183
183
184 return og
184 return og
185
185
186 def _headssummary(pushop):
186 def _headssummary(pushop):
187 """compute a summary of branch and heads status before and after push
187 """compute a summary of branch and heads status before and after push
188
188
189 return {'branch': ([remoteheads], [newheads],
189 return {'branch': ([remoteheads], [newheads],
190 [unsyncedheads], [discardedheads])} mapping
190 [unsyncedheads], [discardedheads])} mapping
191
191
192 - branch: the branch name,
192 - branch: the branch name,
193 - remoteheads: the list of remote heads known locally
193 - remoteheads: the list of remote heads known locally
194 None if the branch is new,
194 None if the branch is new,
195 - newheads: the new remote heads (known locally) with outgoing pushed,
195 - newheads: the new remote heads (known locally) with outgoing pushed,
196 - unsyncedheads: the list of remote heads unknown locally,
196 - unsyncedheads: the list of remote heads unknown locally,
197 - discardedheads: the list of heads made obsolete by the push.
197 - discardedheads: the list of heads made obsolete by the push.
198 """
198 """
199 repo = pushop.repo.unfiltered()
199 repo = pushop.repo.unfiltered()
200 remote = pushop.remote
200 remote = pushop.remote
201 outgoing = pushop.outgoing
201 outgoing = pushop.outgoing
202 cl = repo.changelog
202 cl = repo.changelog
203 headssum = {}
203 headssum = {}
204 missingctx = set()
204 missingctx = set()
205 # A. Create set of branches involved in the push.
205 # A. Create set of branches involved in the push.
206 branches = set()
206 branches = set()
207 for n in outgoing.missing:
207 for n in outgoing.missing:
208 ctx = repo[n]
208 ctx = repo[n]
209 missingctx.add(ctx)
209 missingctx.add(ctx)
210 branches.add(ctx.branch())
210 branches.add(ctx.branch())
211
211
212 with remote.commandexecutor() as e:
212 with remote.commandexecutor() as e:
213 remotemap = e.callcommand('branchmap', {}).result()
213 remotemap = e.callcommand('branchmap', {}).result()
214
214
215 knownnode = cl.hasnode # do not use nodemap until it is filtered
215 # A. register remote heads of branches which are in outgoing set
216 # A. register remote heads of branches which are in outgoing set
216 for branch, heads in remotemap.iteritems():
217 for branch, heads in remotemap.iteritems():
217 # don't add head info about branches which we don't have locally
218 # don't add head info about branches which we don't have locally
218 if branch not in branches:
219 if branch not in branches:
219 continue
220 continue
220 known = []
221 known = []
221 unsynced = []
222 unsynced = []
222 knownnode = cl.hasnode # do not use nodemap until it is filtered
223 for h in heads:
223 for h in heads:
224 if knownnode(h):
224 if knownnode(h):
225 known.append(h)
225 known.append(h)
226 else:
226 else:
227 unsynced.append(h)
227 unsynced.append(h)
228 headssum[branch] = (known, list(known), unsynced)
228 headssum[branch] = (known, list(known), unsynced)
229
229
230 # B. add new branch data
230 # B. add new branch data
231 for branch in branches:
231 for branch in branches:
232 if branch not in headssum:
232 if branch not in headssum:
233 headssum[branch] = (None, [], [])
233 headssum[branch] = (None, [], [])
234
234
235 # C. Update newmap with outgoing changes.
235 # C. Update newmap with outgoing changes.
236 # This will possibly add new heads and remove existing ones.
236 # This will possibly add new heads and remove existing ones.
237 newmap = branchmap.remotebranchcache((branch, heads[1])
237 newmap = branchmap.remotebranchcache((branch, heads[1])
238 for branch, heads in headssum.iteritems()
238 for branch, heads in headssum.iteritems()
239 if heads[0] is not None)
239 if heads[0] is not None)
240 newmap.update(repo, (ctx.rev() for ctx in missingctx))
240 newmap.update(repo, (ctx.rev() for ctx in missingctx))
241 for branch, newheads in newmap.iteritems():
241 for branch, newheads in newmap.iteritems():
242 headssum[branch][1][:] = newheads
242 headssum[branch][1][:] = newheads
243 for branch, items in headssum.iteritems():
243 for branch, items in headssum.iteritems():
244 for l in items:
244 for l in items:
245 if l is not None:
245 if l is not None:
246 l.sort()
246 l.sort()
247 headssum[branch] = items + ([],)
247 headssum[branch] = items + ([],)
248
248
249 # If there are no obsstore, no post processing are needed.
249 # If there are no obsstore, no post processing are needed.
250 if repo.obsstore:
250 if repo.obsstore:
251 torev = repo.changelog.rev
251 torev = repo.changelog.rev
252 futureheads = set(torev(h) for h in outgoing.missingheads)
252 futureheads = set(torev(h) for h in outgoing.missingheads)
253 futureheads |= set(torev(h) for h in outgoing.commonheads)
253 futureheads |= set(torev(h) for h in outgoing.commonheads)
254 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
254 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
255 for branch, heads in sorted(headssum.iteritems()):
255 for branch, heads in sorted(headssum.iteritems()):
256 remoteheads, newheads, unsyncedheads, placeholder = heads
256 remoteheads, newheads, unsyncedheads, placeholder = heads
257 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
257 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
258 headssum[branch] = (remoteheads, sorted(result[0]), unsyncedheads,
258 headssum[branch] = (remoteheads, sorted(result[0]), unsyncedheads,
259 sorted(result[1]))
259 sorted(result[1]))
260 return headssum
260 return headssum
261
261
262 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
262 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
263 """Compute branchmapsummary for repo without branchmap support"""
263 """Compute branchmapsummary for repo without branchmap support"""
264
264
265 # 1-4b. old servers: Check for new topological heads.
265 # 1-4b. old servers: Check for new topological heads.
266 # Construct {old,new}map with branch = None (topological branch).
266 # Construct {old,new}map with branch = None (topological branch).
267 # (code based on update)
267 # (code based on update)
268 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
268 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
269 oldheads = sorted(h for h in remoteheads if knownnode(h))
269 oldheads = sorted(h for h in remoteheads if knownnode(h))
270 # all nodes in outgoing.missing are children of either:
270 # all nodes in outgoing.missing are children of either:
271 # - an element of oldheads
271 # - an element of oldheads
272 # - another element of outgoing.missing
272 # - another element of outgoing.missing
273 # - nullrev
273 # - nullrev
274 # This explains why the new head are very simple to compute.
274 # This explains why the new head are very simple to compute.
275 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
275 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
276 newheads = sorted(c.node() for c in r)
276 newheads = sorted(c.node() for c in r)
277 # set some unsynced head to issue the "unsynced changes" warning
277 # set some unsynced head to issue the "unsynced changes" warning
278 if inc:
278 if inc:
279 unsynced = [None]
279 unsynced = [None]
280 else:
280 else:
281 unsynced = []
281 unsynced = []
282 return {None: (oldheads, newheads, unsynced, [])}
282 return {None: (oldheads, newheads, unsynced, [])}
283
283
284 def _nowarnheads(pushop):
284 def _nowarnheads(pushop):
285 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
285 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
286 repo = pushop.repo.unfiltered()
286 repo = pushop.repo.unfiltered()
287 remote = pushop.remote
287 remote = pushop.remote
288 localbookmarks = repo._bookmarks
288 localbookmarks = repo._bookmarks
289
289
290 with remote.commandexecutor() as e:
290 with remote.commandexecutor() as e:
291 remotebookmarks = e.callcommand('listkeys', {
291 remotebookmarks = e.callcommand('listkeys', {
292 'namespace': 'bookmarks',
292 'namespace': 'bookmarks',
293 }).result()
293 }).result()
294
294
295 bookmarkedheads = set()
295 bookmarkedheads = set()
296
296
297 # internal config: bookmarks.pushing
297 # internal config: bookmarks.pushing
298 newbookmarks = [localbookmarks.expandname(b)
298 newbookmarks = [localbookmarks.expandname(b)
299 for b in pushop.ui.configlist('bookmarks', 'pushing')]
299 for b in pushop.ui.configlist('bookmarks', 'pushing')]
300
300
301 for bm in localbookmarks:
301 for bm in localbookmarks:
302 rnode = remotebookmarks.get(bm)
302 rnode = remotebookmarks.get(bm)
303 if rnode and rnode in repo:
303 if rnode and rnode in repo:
304 lctx, rctx = localbookmarks.changectx(bm), repo[rnode]
304 lctx, rctx = localbookmarks.changectx(bm), repo[rnode]
305 if bookmarks.validdest(repo, rctx, lctx):
305 if bookmarks.validdest(repo, rctx, lctx):
306 bookmarkedheads.add(lctx.node())
306 bookmarkedheads.add(lctx.node())
307 else:
307 else:
308 if bm in newbookmarks and bm not in remotebookmarks:
308 if bm in newbookmarks and bm not in remotebookmarks:
309 bookmarkedheads.add(localbookmarks[bm])
309 bookmarkedheads.add(localbookmarks[bm])
310
310
311 return bookmarkedheads
311 return bookmarkedheads
312
312
313 def checkheads(pushop):
313 def checkheads(pushop):
314 """Check that a push won't add any outgoing head
314 """Check that a push won't add any outgoing head
315
315
316 raise Abort error and display ui message as needed.
316 raise Abort error and display ui message as needed.
317 """
317 """
318
318
319 repo = pushop.repo.unfiltered()
319 repo = pushop.repo.unfiltered()
320 remote = pushop.remote
320 remote = pushop.remote
321 outgoing = pushop.outgoing
321 outgoing = pushop.outgoing
322 remoteheads = pushop.remoteheads
322 remoteheads = pushop.remoteheads
323 newbranch = pushop.newbranch
323 newbranch = pushop.newbranch
324 inc = bool(pushop.incoming)
324 inc = bool(pushop.incoming)
325
325
326 # Check for each named branch if we're creating new remote heads.
326 # Check for each named branch if we're creating new remote heads.
327 # To be a remote head after push, node must be either:
327 # To be a remote head after push, node must be either:
328 # - unknown locally
328 # - unknown locally
329 # - a local outgoing head descended from update
329 # - a local outgoing head descended from update
330 # - a remote head that's known locally and not
330 # - a remote head that's known locally and not
331 # ancestral to an outgoing head
331 # ancestral to an outgoing head
332 if remoteheads == [nullid]:
332 if remoteheads == [nullid]:
333 # remote is empty, nothing to check.
333 # remote is empty, nothing to check.
334 return
334 return
335
335
336 if remote.capable('branchmap'):
336 if remote.capable('branchmap'):
337 headssum = _headssummary(pushop)
337 headssum = _headssummary(pushop)
338 else:
338 else:
339 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
339 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
340 pushop.pushbranchmap = headssum
340 pushop.pushbranchmap = headssum
341 newbranches = [branch for branch, heads in headssum.iteritems()
341 newbranches = [branch for branch, heads in headssum.iteritems()
342 if heads[0] is None]
342 if heads[0] is None]
343 # 1. Check for new branches on the remote.
343 # 1. Check for new branches on the remote.
344 if newbranches and not newbranch: # new branch requires --new-branch
344 if newbranches and not newbranch: # new branch requires --new-branch
345 branchnames = ', '.join(sorted(newbranches))
345 branchnames = ', '.join(sorted(newbranches))
346 raise error.Abort(_("push creates new remote branches: %s!")
346 raise error.Abort(_("push creates new remote branches: %s!")
347 % branchnames,
347 % branchnames,
348 hint=_("use 'hg push --new-branch' to create"
348 hint=_("use 'hg push --new-branch' to create"
349 " new remote branches"))
349 " new remote branches"))
350
350
351 # 2. Find heads that we need not warn about
351 # 2. Find heads that we need not warn about
352 nowarnheads = _nowarnheads(pushop)
352 nowarnheads = _nowarnheads(pushop)
353
353
354 # 3. Check for new heads.
354 # 3. Check for new heads.
355 # If there are more heads after the push than before, a suitable
355 # If there are more heads after the push than before, a suitable
356 # error message, depending on unsynced status, is displayed.
356 # error message, depending on unsynced status, is displayed.
357 errormsg = None
357 errormsg = None
358 for branch, heads in sorted(headssum.iteritems()):
358 for branch, heads in sorted(headssum.iteritems()):
359 remoteheads, newheads, unsyncedheads, discardedheads = heads
359 remoteheads, newheads, unsyncedheads, discardedheads = heads
360 # add unsynced data
360 # add unsynced data
361 if remoteheads is None:
361 if remoteheads is None:
362 oldhs = set()
362 oldhs = set()
363 else:
363 else:
364 oldhs = set(remoteheads)
364 oldhs = set(remoteheads)
365 oldhs.update(unsyncedheads)
365 oldhs.update(unsyncedheads)
366 dhs = None # delta heads, the new heads on branch
366 dhs = None # delta heads, the new heads on branch
367 newhs = set(newheads)
367 newhs = set(newheads)
368 newhs.update(unsyncedheads)
368 newhs.update(unsyncedheads)
369 if unsyncedheads:
369 if unsyncedheads:
370 if None in unsyncedheads:
370 if None in unsyncedheads:
371 # old remote, no heads data
371 # old remote, no heads data
372 heads = None
372 heads = None
373 else:
373 else:
374 heads = scmutil.nodesummaries(repo, unsyncedheads)
374 heads = scmutil.nodesummaries(repo, unsyncedheads)
375 if heads is None:
375 if heads is None:
376 repo.ui.status(_("remote has heads that are "
376 repo.ui.status(_("remote has heads that are "
377 "not known locally\n"))
377 "not known locally\n"))
378 elif branch is None:
378 elif branch is None:
379 repo.ui.status(_("remote has heads that are "
379 repo.ui.status(_("remote has heads that are "
380 "not known locally: %s\n") % heads)
380 "not known locally: %s\n") % heads)
381 else:
381 else:
382 repo.ui.status(_("remote has heads on branch '%s' that are "
382 repo.ui.status(_("remote has heads on branch '%s' that are "
383 "not known locally: %s\n") % (branch, heads))
383 "not known locally: %s\n") % (branch, heads))
384 if remoteheads is None:
384 if remoteheads is None:
385 if len(newhs) > 1:
385 if len(newhs) > 1:
386 dhs = list(newhs)
386 dhs = list(newhs)
387 if errormsg is None:
387 if errormsg is None:
388 errormsg = (_("push creates new branch '%s' "
388 errormsg = (_("push creates new branch '%s' "
389 "with multiple heads") % (branch))
389 "with multiple heads") % (branch))
390 hint = _("merge or"
390 hint = _("merge or"
391 " see 'hg help push' for details about"
391 " see 'hg help push' for details about"
392 " pushing new heads")
392 " pushing new heads")
393 elif len(newhs) > len(oldhs):
393 elif len(newhs) > len(oldhs):
394 # remove bookmarked or existing remote heads from the new heads list
394 # remove bookmarked or existing remote heads from the new heads list
395 dhs = sorted(newhs - nowarnheads - oldhs)
395 dhs = sorted(newhs - nowarnheads - oldhs)
396 if dhs:
396 if dhs:
397 if errormsg is None:
397 if errormsg is None:
398 if branch not in ('default', None):
398 if branch not in ('default', None):
399 errormsg = _("push creates new remote head %s "
399 errormsg = _("push creates new remote head %s "
400 "on branch '%s'!") % (short(dhs[0]), branch)
400 "on branch '%s'!") % (short(dhs[0]), branch)
401 elif repo[dhs[0]].bookmarks():
401 elif repo[dhs[0]].bookmarks():
402 errormsg = _("push creates new remote head %s "
402 errormsg = _("push creates new remote head %s "
403 "with bookmark '%s'!") % (
403 "with bookmark '%s'!") % (
404 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
404 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
405 else:
405 else:
406 errormsg = _("push creates new remote head %s!"
406 errormsg = _("push creates new remote head %s!"
407 ) % short(dhs[0])
407 ) % short(dhs[0])
408 if unsyncedheads:
408 if unsyncedheads:
409 hint = _("pull and merge or"
409 hint = _("pull and merge or"
410 " see 'hg help push' for details about"
410 " see 'hg help push' for details about"
411 " pushing new heads")
411 " pushing new heads")
412 else:
412 else:
413 hint = _("merge or"
413 hint = _("merge or"
414 " see 'hg help push' for details about"
414 " see 'hg help push' for details about"
415 " pushing new heads")
415 " pushing new heads")
416 if branch is None:
416 if branch is None:
417 repo.ui.note(_("new remote heads:\n"))
417 repo.ui.note(_("new remote heads:\n"))
418 else:
418 else:
419 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
419 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
420 for h in dhs:
420 for h in dhs:
421 repo.ui.note((" %s\n") % short(h))
421 repo.ui.note((" %s\n") % short(h))
422 if errormsg:
422 if errormsg:
423 raise error.Abort(errormsg, hint=hint)
423 raise error.Abort(errormsg, hint=hint)
424
424
425 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
425 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
426 """post process the list of new heads with obsolescence information
426 """post process the list of new heads with obsolescence information
427
427
428 Exists as a sub-function to contain the complexity and allow extensions to
428 Exists as a sub-function to contain the complexity and allow extensions to
429 experiment with smarter logic.
429 experiment with smarter logic.
430
430
431 Returns (newheads, discarded_heads) tuple
431 Returns (newheads, discarded_heads) tuple
432 """
432 """
433 # known issue
433 # known issue
434 #
434 #
435 # * We "silently" skip processing on all changeset unknown locally
435 # * We "silently" skip processing on all changeset unknown locally
436 #
436 #
437 # * if <nh> is public on the remote, it won't be affected by obsolete
437 # * if <nh> is public on the remote, it won't be affected by obsolete
438 # marker and a new is created
438 # marker and a new is created
439
439
440 # define various utilities and containers
440 # define various utilities and containers
441 repo = pushop.repo
441 repo = pushop.repo
442 unfi = repo.unfiltered()
442 unfi = repo.unfiltered()
443 tonode = unfi.changelog.node
443 tonode = unfi.changelog.node
444 torev = unfi.changelog.nodemap.get
444 torev = unfi.changelog.nodemap.get
445 public = phases.public
445 public = phases.public
446 getphase = unfi._phasecache.phase
446 getphase = unfi._phasecache.phase
447 ispublic = (lambda r: getphase(unfi, r) == public)
447 ispublic = (lambda r: getphase(unfi, r) == public)
448 ispushed = (lambda n: torev(n) in futurecommon)
448 ispushed = (lambda n: torev(n) in futurecommon)
449 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
449 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
450 successorsmarkers = unfi.obsstore.successors
450 successorsmarkers = unfi.obsstore.successors
451 newhs = set() # final set of new heads
451 newhs = set() # final set of new heads
452 discarded = set() # new head of fully replaced branch
452 discarded = set() # new head of fully replaced branch
453
453
454 localcandidate = set() # candidate heads known locally
454 localcandidate = set() # candidate heads known locally
455 unknownheads = set() # candidate heads unknown locally
455 unknownheads = set() # candidate heads unknown locally
456 for h in candidate_newhs:
456 for h in candidate_newhs:
457 if h in unfi:
457 if h in unfi:
458 localcandidate.add(h)
458 localcandidate.add(h)
459 else:
459 else:
460 if successorsmarkers.get(h) is not None:
460 if successorsmarkers.get(h) is not None:
461 msg = ('checkheads: remote head unknown locally has'
461 msg = ('checkheads: remote head unknown locally has'
462 ' local marker: %s\n')
462 ' local marker: %s\n')
463 repo.ui.debug(msg % hex(h))
463 repo.ui.debug(msg % hex(h))
464 unknownheads.add(h)
464 unknownheads.add(h)
465
465
466 # fast path the simple case
466 # fast path the simple case
467 if len(localcandidate) == 1:
467 if len(localcandidate) == 1:
468 return unknownheads | set(candidate_newhs), set()
468 return unknownheads | set(candidate_newhs), set()
469
469
470 # actually process branch replacement
470 # actually process branch replacement
471 while localcandidate:
471 while localcandidate:
472 nh = localcandidate.pop()
472 nh = localcandidate.pop()
473 # run this check early to skip the evaluation of the whole branch
473 # run this check early to skip the evaluation of the whole branch
474 if (torev(nh) in futurecommon or ispublic(torev(nh))):
474 if (torev(nh) in futurecommon or ispublic(torev(nh))):
475 newhs.add(nh)
475 newhs.add(nh)
476 continue
476 continue
477
477
478 # Get all revs/nodes on the branch exclusive to this head
478 # Get all revs/nodes on the branch exclusive to this head
479 # (already filtered heads are "ignored"))
479 # (already filtered heads are "ignored"))
480 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
480 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
481 nh, localcandidate, newhs)
481 nh, localcandidate, newhs)
482 branchnodes = [tonode(r) for r in branchrevs]
482 branchnodes = [tonode(r) for r in branchrevs]
483
483
484 # The branch won't be hidden on the remote if
484 # The branch won't be hidden on the remote if
485 # * any part of it is public,
485 # * any part of it is public,
486 # * any part of it is considered part of the result by previous logic,
486 # * any part of it is considered part of the result by previous logic,
487 # * if we have no markers to push to obsolete it.
487 # * if we have no markers to push to obsolete it.
488 if (any(ispublic(r) for r in branchrevs)
488 if (any(ispublic(r) for r in branchrevs)
489 or any(torev(n) in futurecommon for n in branchnodes)
489 or any(torev(n) in futurecommon for n in branchnodes)
490 or any(not hasoutmarker(n) for n in branchnodes)):
490 or any(not hasoutmarker(n) for n in branchnodes)):
491 newhs.add(nh)
491 newhs.add(nh)
492 else:
492 else:
493 # note: there is a corner case if there is a merge in the branch.
493 # note: there is a corner case if there is a merge in the branch.
494 # we might end up with -more- heads. However, these heads are not
494 # we might end up with -more- heads. However, these heads are not
495 # "added" by the push, but more by the "removal" on the remote so I
495 # "added" by the push, but more by the "removal" on the remote so I
496 # think is a okay to ignore them,
496 # think is a okay to ignore them,
497 discarded.add(nh)
497 discarded.add(nh)
498 newhs |= unknownheads
498 newhs |= unknownheads
499 return newhs, discarded
499 return newhs, discarded
500
500
501 def pushingmarkerfor(obsstore, ispushed, node):
501 def pushingmarkerfor(obsstore, ispushed, node):
502 """true if some markers are to be pushed for node
502 """true if some markers are to be pushed for node
503
503
504 We cannot just look in to the pushed obsmarkers from the pushop because
504 We cannot just look in to the pushed obsmarkers from the pushop because
505 discovery might have filtered relevant markers. In addition listing all
505 discovery might have filtered relevant markers. In addition listing all
506 markers relevant to all changesets in the pushed set would be too expensive
506 markers relevant to all changesets in the pushed set would be too expensive
507 (O(len(repo)))
507 (O(len(repo)))
508
508
509 (note: There are cache opportunity in this function. but it would requires
509 (note: There are cache opportunity in this function. but it would requires
510 a two dimensional stack.)
510 a two dimensional stack.)
511 """
511 """
512 successorsmarkers = obsstore.successors
512 successorsmarkers = obsstore.successors
513 stack = [node]
513 stack = [node]
514 seen = set(stack)
514 seen = set(stack)
515 while stack:
515 while stack:
516 current = stack.pop()
516 current = stack.pop()
517 if ispushed(current):
517 if ispushed(current):
518 return True
518 return True
519 markers = successorsmarkers.get(current, ())
519 markers = successorsmarkers.get(current, ())
520 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
520 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
521 for m in markers:
521 for m in markers:
522 nexts = m[1] # successors
522 nexts = m[1] # successors
523 if not nexts: # this is a prune marker
523 if not nexts: # this is a prune marker
524 nexts = m[5] or () # parents
524 nexts = m[5] or () # parents
525 for n in nexts:
525 for n in nexts:
526 if n not in seen:
526 if n not in seen:
527 seen.add(n)
527 seen.add(n)
528 stack.append(n)
528 stack.append(n)
529 return False
529 return False
General Comments 0
You need to be logged in to leave comments. Login now