##// END OF EJS Templates
discovery: prevent recomputing info about server and outgoing changesets...
Pulkit Goyal -
r42193:98908e36 default
parent child Browse files
Show More
@@ -1,533 +1,534
1 # discovery.py - protocol changeset discovery functions
1 # discovery.py - protocol changeset discovery functions
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 hex,
14 hex,
15 nullid,
15 nullid,
16 short,
16 short,
17 )
17 )
18
18
19 from . import (
19 from . import (
20 bookmarks,
20 bookmarks,
21 branchmap,
21 branchmap,
22 error,
22 error,
23 phases,
23 phases,
24 scmutil,
24 scmutil,
25 setdiscovery,
25 setdiscovery,
26 treediscovery,
26 treediscovery,
27 util,
27 util,
28 )
28 )
29
29
30 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
30 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
31 """Return a tuple (common, anyincoming, heads) used to identify the common
31 """Return a tuple (common, anyincoming, heads) used to identify the common
32 subset of nodes between repo and remote.
32 subset of nodes between repo and remote.
33
33
34 "common" is a list of (at least) the heads of the common subset.
34 "common" is a list of (at least) the heads of the common subset.
35 "anyincoming" is testable as a boolean indicating if any nodes are missing
35 "anyincoming" is testable as a boolean indicating if any nodes are missing
36 locally. If remote does not support getbundle, this actually is a list of
36 locally. If remote does not support getbundle, this actually is a list of
37 roots of the nodes that would be incoming, to be supplied to
37 roots of the nodes that would be incoming, to be supplied to
38 changegroupsubset. No code except for pull should be relying on this fact
38 changegroupsubset. No code except for pull should be relying on this fact
39 any longer.
39 any longer.
40 "heads" is either the supplied heads, or else the remote's heads.
40 "heads" is either the supplied heads, or else the remote's heads.
41 "ancestorsof" if not None, restrict the discovery to a subset defined by
41 "ancestorsof" if not None, restrict the discovery to a subset defined by
42 these nodes. Changeset outside of this set won't be considered (and
42 these nodes. Changeset outside of this set won't be considered (and
43 won't appears in "common")
43 won't appears in "common")
44
44
45 If you pass heads and they are all known locally, the response lists just
45 If you pass heads and they are all known locally, the response lists just
46 these heads in "common" and in "heads".
46 these heads in "common" and in "heads".
47
47
48 Please use findcommonoutgoing to compute the set of outgoing nodes to give
48 Please use findcommonoutgoing to compute the set of outgoing nodes to give
49 extensions a good hook into outgoing.
49 extensions a good hook into outgoing.
50 """
50 """
51
51
52 if not remote.capable('getbundle'):
52 if not remote.capable('getbundle'):
53 return treediscovery.findcommonincoming(repo, remote, heads, force)
53 return treediscovery.findcommonincoming(repo, remote, heads, force)
54
54
55 if heads:
55 if heads:
56 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
56 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
57 if all(knownnode(h) for h in heads):
57 if all(knownnode(h) for h in heads):
58 return (heads, False, heads)
58 return (heads, False, heads)
59
59
60 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
60 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
61 abortwhenunrelated=not force,
61 abortwhenunrelated=not force,
62 ancestorsof=ancestorsof)
62 ancestorsof=ancestorsof)
63 common, anyinc, srvheads = res
63 common, anyinc, srvheads = res
64 return (list(common), anyinc, heads or list(srvheads))
64 return (list(common), anyinc, heads or list(srvheads))
65
65
66 class outgoing(object):
66 class outgoing(object):
67 '''Represents the set of nodes present in a local repo but not in a
67 '''Represents the set of nodes present in a local repo but not in a
68 (possibly) remote one.
68 (possibly) remote one.
69
69
70 Members:
70 Members:
71
71
72 missing is a list of all nodes present in local but not in remote.
72 missing is a list of all nodes present in local but not in remote.
73 common is a list of all nodes shared between the two repos.
73 common is a list of all nodes shared between the two repos.
74 excluded is the list of missing changeset that shouldn't be sent remotely.
74 excluded is the list of missing changeset that shouldn't be sent remotely.
75 missingheads is the list of heads of missing.
75 missingheads is the list of heads of missing.
76 commonheads is the list of heads of common.
76 commonheads is the list of heads of common.
77
77
78 The sets are computed on demand from the heads, unless provided upfront
78 The sets are computed on demand from the heads, unless provided upfront
79 by discovery.'''
79 by discovery.'''
80
80
81 def __init__(self, repo, commonheads=None, missingheads=None,
81 def __init__(self, repo, commonheads=None, missingheads=None,
82 missingroots=None):
82 missingroots=None):
83 # at least one of them must not be set
83 # at least one of them must not be set
84 assert None in (commonheads, missingroots)
84 assert None in (commonheads, missingroots)
85 cl = repo.changelog
85 cl = repo.changelog
86 if missingheads is None:
86 if missingheads is None:
87 missingheads = cl.heads()
87 missingheads = cl.heads()
88 if missingroots:
88 if missingroots:
89 discbases = []
89 discbases = []
90 for n in missingroots:
90 for n in missingroots:
91 discbases.extend([p for p in cl.parents(n) if p != nullid])
91 discbases.extend([p for p in cl.parents(n) if p != nullid])
92 # TODO remove call to nodesbetween.
92 # TODO remove call to nodesbetween.
93 # TODO populate attributes on outgoing instance instead of setting
93 # TODO populate attributes on outgoing instance instead of setting
94 # discbases.
94 # discbases.
95 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
95 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
96 included = set(csets)
96 included = set(csets)
97 missingheads = heads
97 missingheads = heads
98 commonheads = [n for n in discbases if n not in included]
98 commonheads = [n for n in discbases if n not in included]
99 elif not commonheads:
99 elif not commonheads:
100 commonheads = [nullid]
100 commonheads = [nullid]
101 self.commonheads = commonheads
101 self.commonheads = commonheads
102 self.missingheads = missingheads
102 self.missingheads = missingheads
103 self._revlog = cl
103 self._revlog = cl
104 self._common = None
104 self._common = None
105 self._missing = None
105 self._missing = None
106 self.excluded = []
106 self.excluded = []
107
107
108 def _computecommonmissing(self):
108 def _computecommonmissing(self):
109 sets = self._revlog.findcommonmissing(self.commonheads,
109 sets = self._revlog.findcommonmissing(self.commonheads,
110 self.missingheads)
110 self.missingheads)
111 self._common, self._missing = sets
111 self._common, self._missing = sets
112
112
113 @util.propertycache
113 @util.propertycache
114 def common(self):
114 def common(self):
115 if self._common is None:
115 if self._common is None:
116 self._computecommonmissing()
116 self._computecommonmissing()
117 return self._common
117 return self._common
118
118
119 @util.propertycache
119 @util.propertycache
120 def missing(self):
120 def missing(self):
121 if self._missing is None:
121 if self._missing is None:
122 self._computecommonmissing()
122 self._computecommonmissing()
123 return self._missing
123 return self._missing
124
124
125 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
125 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
126 commoninc=None, portable=False):
126 commoninc=None, portable=False):
127 '''Return an outgoing instance to identify the nodes present in repo but
127 '''Return an outgoing instance to identify the nodes present in repo but
128 not in other.
128 not in other.
129
129
130 If onlyheads is given, only nodes ancestral to nodes in onlyheads
130 If onlyheads is given, only nodes ancestral to nodes in onlyheads
131 (inclusive) are included. If you already know the local repo's heads,
131 (inclusive) are included. If you already know the local repo's heads,
132 passing them in onlyheads is faster than letting them be recomputed here.
132 passing them in onlyheads is faster than letting them be recomputed here.
133
133
134 If commoninc is given, it must be the result of a prior call to
134 If commoninc is given, it must be the result of a prior call to
135 findcommonincoming(repo, other, force) to avoid recomputing it here.
135 findcommonincoming(repo, other, force) to avoid recomputing it here.
136
136
137 If portable is given, compute more conservative common and missingheads,
137 If portable is given, compute more conservative common and missingheads,
138 to make bundles created from the instance more portable.'''
138 to make bundles created from the instance more portable.'''
139 # declare an empty outgoing object to be filled later
139 # declare an empty outgoing object to be filled later
140 og = outgoing(repo, None, None)
140 og = outgoing(repo, None, None)
141
141
142 # get common set if not provided
142 # get common set if not provided
143 if commoninc is None:
143 if commoninc is None:
144 commoninc = findcommonincoming(repo, other, force=force,
144 commoninc = findcommonincoming(repo, other, force=force,
145 ancestorsof=onlyheads)
145 ancestorsof=onlyheads)
146 og.commonheads, _any, _hds = commoninc
146 og.commonheads, _any, _hds = commoninc
147
147
148 # compute outgoing
148 # compute outgoing
149 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
149 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
150 if not mayexclude:
150 if not mayexclude:
151 og.missingheads = onlyheads or repo.heads()
151 og.missingheads = onlyheads or repo.heads()
152 elif onlyheads is None:
152 elif onlyheads is None:
153 # use visible heads as it should be cached
153 # use visible heads as it should be cached
154 og.missingheads = repo.filtered("served").heads()
154 og.missingheads = repo.filtered("served").heads()
155 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
155 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
156 else:
156 else:
157 # compute common, missing and exclude secret stuff
157 # compute common, missing and exclude secret stuff
158 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
158 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
159 og._common, allmissing = sets
159 og._common, allmissing = sets
160 og._missing = missing = []
160 og._missing = missing = []
161 og.excluded = excluded = []
161 og.excluded = excluded = []
162 for node in allmissing:
162 for node in allmissing:
163 ctx = repo[node]
163 ctx = repo[node]
164 if ctx.phase() >= phases.secret or ctx.extinct():
164 if ctx.phase() >= phases.secret or ctx.extinct():
165 excluded.append(node)
165 excluded.append(node)
166 else:
166 else:
167 missing.append(node)
167 missing.append(node)
168 if len(missing) == len(allmissing):
168 if len(missing) == len(allmissing):
169 missingheads = onlyheads
169 missingheads = onlyheads
170 else: # update missing heads
170 else: # update missing heads
171 missingheads = phases.newheads(repo, onlyheads, excluded)
171 missingheads = phases.newheads(repo, onlyheads, excluded)
172 og.missingheads = missingheads
172 og.missingheads = missingheads
173 if portable:
173 if portable:
174 # recompute common and missingheads as if -r<rev> had been given for
174 # recompute common and missingheads as if -r<rev> had been given for
175 # each head of missing, and --base <rev> for each head of the proper
175 # each head of missing, and --base <rev> for each head of the proper
176 # ancestors of missing
176 # ancestors of missing
177 og._computecommonmissing()
177 og._computecommonmissing()
178 cl = repo.changelog
178 cl = repo.changelog
179 missingrevs = set(cl.rev(n) for n in og._missing)
179 missingrevs = set(cl.rev(n) for n in og._missing)
180 og._common = set(cl.ancestors(missingrevs)) - missingrevs
180 og._common = set(cl.ancestors(missingrevs)) - missingrevs
181 commonheads = set(og.commonheads)
181 commonheads = set(og.commonheads)
182 og.missingheads = [h for h in og.missingheads if h not in commonheads]
182 og.missingheads = [h for h in og.missingheads if h not in commonheads]
183
183
184 return og
184 return og
185
185
186 def _headssummary(pushop):
186 def _headssummary(pushop):
187 """compute a summary of branch and heads status before and after push
187 """compute a summary of branch and heads status before and after push
188
188
189 return {'branch': ([remoteheads], [newheads],
189 return {'branch': ([remoteheads], [newheads],
190 [unsyncedheads], [discardedheads])} mapping
190 [unsyncedheads], [discardedheads])} mapping
191
191
192 - branch: the branch name,
192 - branch: the branch name,
193 - remoteheads: the list of remote heads known locally
193 - remoteheads: the list of remote heads known locally
194 None if the branch is new,
194 None if the branch is new,
195 - newheads: the new remote heads (known locally) with outgoing pushed,
195 - newheads: the new remote heads (known locally) with outgoing pushed,
196 - unsyncedheads: the list of remote heads unknown locally,
196 - unsyncedheads: the list of remote heads unknown locally,
197 - discardedheads: the list of heads made obsolete by the push.
197 - discardedheads: the list of heads made obsolete by the push.
198 """
198 """
199 repo = pushop.repo.unfiltered()
199 repo = pushop.repo.unfiltered()
200 remote = pushop.remote
200 remote = pushop.remote
201 outgoing = pushop.outgoing
201 outgoing = pushop.outgoing
202 cl = repo.changelog
202 cl = repo.changelog
203 headssum = {}
203 headssum = {}
204 missingctx = set()
204 # A. Create set of branches involved in the push.
205 # A. Create set of branches involved in the push.
205 branches = set(repo[n].branch() for n in outgoing.missing)
206 branches = set()
207 for n in outgoing.missing:
208 ctx = repo[n]
209 missingctx.add(ctx)
210 branches.add(ctx.branch())
211 nbranches = branches.copy()
206
212
207 with remote.commandexecutor() as e:
213 with remote.commandexecutor() as e:
208 remotemap = e.callcommand('branchmap', {}).result()
214 remotemap = e.callcommand('branchmap', {}).result()
209
215
210 newbranches = branches - set(remotemap)
216 remotebranches = set(remotemap)
217 newbranches = branches - remotebranches
211 branches.difference_update(newbranches)
218 branches.difference_update(newbranches)
212
219
213 # A. register remote heads
220 # A. register remote heads
214 remotebranches = set()
215 for branch, heads in remotemap.iteritems():
221 for branch, heads in remotemap.iteritems():
216 remotebranches.add(branch)
217 known = []
222 known = []
218 unsynced = []
223 unsynced = []
219 knownnode = cl.hasnode # do not use nodemap until it is filtered
224 knownnode = cl.hasnode # do not use nodemap until it is filtered
220 for h in heads:
225 for h in heads:
221 if knownnode(h):
226 if knownnode(h):
222 known.append(h)
227 known.append(h)
223 else:
228 else:
224 unsynced.append(h)
229 unsynced.append(h)
225 headssum[branch] = (known, list(known), unsynced)
230 headssum[branch] = (known, list(known), unsynced)
226 # B. add new branch data
231 # B. add new branch data
227 missingctx = list(repo[n] for n in outgoing.missing)
232 for branch in nbranches:
228 touchedbranches = set()
229 for ctx in missingctx:
230 branch = ctx.branch()
231 touchedbranches.add(branch)
232 if branch not in headssum:
233 if branch not in headssum:
233 headssum[branch] = (None, [], [])
234 headssum[branch] = (None, [], [])
234
235
235 # C drop data about untouched branches:
236 # C drop data about untouched branches:
236 for branch in remotebranches - touchedbranches:
237 for branch in remotebranches - nbranches:
237 del headssum[branch]
238 del headssum[branch]
238
239
239 # D. Update newmap with outgoing changes.
240 # D. Update newmap with outgoing changes.
240 # This will possibly add new heads and remove existing ones.
241 # This will possibly add new heads and remove existing ones.
241 newmap = branchmap.remotebranchcache((branch, heads[1])
242 newmap = branchmap.remotebranchcache((branch, heads[1])
242 for branch, heads in headssum.iteritems()
243 for branch, heads in headssum.iteritems()
243 if heads[0] is not None)
244 if heads[0] is not None)
244 newmap.update(repo, (ctx.rev() for ctx in missingctx))
245 newmap.update(repo, (ctx.rev() for ctx in missingctx))
245 for branch, newheads in newmap.iteritems():
246 for branch, newheads in newmap.iteritems():
246 headssum[branch][1][:] = newheads
247 headssum[branch][1][:] = newheads
247 for branch, items in headssum.iteritems():
248 for branch, items in headssum.iteritems():
248 for l in items:
249 for l in items:
249 if l is not None:
250 if l is not None:
250 l.sort()
251 l.sort()
251 headssum[branch] = items + ([],)
252 headssum[branch] = items + ([],)
252
253
253 # If there are no obsstore, no post processing are needed.
254 # If there are no obsstore, no post processing are needed.
254 if repo.obsstore:
255 if repo.obsstore:
255 torev = repo.changelog.rev
256 torev = repo.changelog.rev
256 futureheads = set(torev(h) for h in outgoing.missingheads)
257 futureheads = set(torev(h) for h in outgoing.missingheads)
257 futureheads |= set(torev(h) for h in outgoing.commonheads)
258 futureheads |= set(torev(h) for h in outgoing.commonheads)
258 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
259 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
259 for branch, heads in sorted(headssum.iteritems()):
260 for branch, heads in sorted(headssum.iteritems()):
260 remoteheads, newheads, unsyncedheads, placeholder = heads
261 remoteheads, newheads, unsyncedheads, placeholder = heads
261 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
262 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
262 headssum[branch] = (remoteheads, sorted(result[0]), unsyncedheads,
263 headssum[branch] = (remoteheads, sorted(result[0]), unsyncedheads,
263 sorted(result[1]))
264 sorted(result[1]))
264 return headssum
265 return headssum
265
266
266 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
267 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
267 """Compute branchmapsummary for repo without branchmap support"""
268 """Compute branchmapsummary for repo without branchmap support"""
268
269
269 # 1-4b. old servers: Check for new topological heads.
270 # 1-4b. old servers: Check for new topological heads.
270 # Construct {old,new}map with branch = None (topological branch).
271 # Construct {old,new}map with branch = None (topological branch).
271 # (code based on update)
272 # (code based on update)
272 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
273 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
273 oldheads = sorted(h for h in remoteheads if knownnode(h))
274 oldheads = sorted(h for h in remoteheads if knownnode(h))
274 # all nodes in outgoing.missing are children of either:
275 # all nodes in outgoing.missing are children of either:
275 # - an element of oldheads
276 # - an element of oldheads
276 # - another element of outgoing.missing
277 # - another element of outgoing.missing
277 # - nullrev
278 # - nullrev
278 # This explains why the new head are very simple to compute.
279 # This explains why the new head are very simple to compute.
279 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
280 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
280 newheads = sorted(c.node() for c in r)
281 newheads = sorted(c.node() for c in r)
281 # set some unsynced head to issue the "unsynced changes" warning
282 # set some unsynced head to issue the "unsynced changes" warning
282 if inc:
283 if inc:
283 unsynced = [None]
284 unsynced = [None]
284 else:
285 else:
285 unsynced = []
286 unsynced = []
286 return {None: (oldheads, newheads, unsynced, [])}
287 return {None: (oldheads, newheads, unsynced, [])}
287
288
288 def _nowarnheads(pushop):
289 def _nowarnheads(pushop):
289 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
290 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
290 repo = pushop.repo.unfiltered()
291 repo = pushop.repo.unfiltered()
291 remote = pushop.remote
292 remote = pushop.remote
292 localbookmarks = repo._bookmarks
293 localbookmarks = repo._bookmarks
293
294
294 with remote.commandexecutor() as e:
295 with remote.commandexecutor() as e:
295 remotebookmarks = e.callcommand('listkeys', {
296 remotebookmarks = e.callcommand('listkeys', {
296 'namespace': 'bookmarks',
297 'namespace': 'bookmarks',
297 }).result()
298 }).result()
298
299
299 bookmarkedheads = set()
300 bookmarkedheads = set()
300
301
301 # internal config: bookmarks.pushing
302 # internal config: bookmarks.pushing
302 newbookmarks = [localbookmarks.expandname(b)
303 newbookmarks = [localbookmarks.expandname(b)
303 for b in pushop.ui.configlist('bookmarks', 'pushing')]
304 for b in pushop.ui.configlist('bookmarks', 'pushing')]
304
305
305 for bm in localbookmarks:
306 for bm in localbookmarks:
306 rnode = remotebookmarks.get(bm)
307 rnode = remotebookmarks.get(bm)
307 if rnode and rnode in repo:
308 if rnode and rnode in repo:
308 lctx, rctx = localbookmarks.changectx(bm), repo[rnode]
309 lctx, rctx = localbookmarks.changectx(bm), repo[rnode]
309 if bookmarks.validdest(repo, rctx, lctx):
310 if bookmarks.validdest(repo, rctx, lctx):
310 bookmarkedheads.add(lctx.node())
311 bookmarkedheads.add(lctx.node())
311 else:
312 else:
312 if bm in newbookmarks and bm not in remotebookmarks:
313 if bm in newbookmarks and bm not in remotebookmarks:
313 bookmarkedheads.add(localbookmarks[bm])
314 bookmarkedheads.add(localbookmarks[bm])
314
315
315 return bookmarkedheads
316 return bookmarkedheads
316
317
317 def checkheads(pushop):
318 def checkheads(pushop):
318 """Check that a push won't add any outgoing head
319 """Check that a push won't add any outgoing head
319
320
320 raise Abort error and display ui message as needed.
321 raise Abort error and display ui message as needed.
321 """
322 """
322
323
323 repo = pushop.repo.unfiltered()
324 repo = pushop.repo.unfiltered()
324 remote = pushop.remote
325 remote = pushop.remote
325 outgoing = pushop.outgoing
326 outgoing = pushop.outgoing
326 remoteheads = pushop.remoteheads
327 remoteheads = pushop.remoteheads
327 newbranch = pushop.newbranch
328 newbranch = pushop.newbranch
328 inc = bool(pushop.incoming)
329 inc = bool(pushop.incoming)
329
330
330 # Check for each named branch if we're creating new remote heads.
331 # Check for each named branch if we're creating new remote heads.
331 # To be a remote head after push, node must be either:
332 # To be a remote head after push, node must be either:
332 # - unknown locally
333 # - unknown locally
333 # - a local outgoing head descended from update
334 # - a local outgoing head descended from update
334 # - a remote head that's known locally and not
335 # - a remote head that's known locally and not
335 # ancestral to an outgoing head
336 # ancestral to an outgoing head
336 if remoteheads == [nullid]:
337 if remoteheads == [nullid]:
337 # remote is empty, nothing to check.
338 # remote is empty, nothing to check.
338 return
339 return
339
340
340 if remote.capable('branchmap'):
341 if remote.capable('branchmap'):
341 headssum = _headssummary(pushop)
342 headssum = _headssummary(pushop)
342 else:
343 else:
343 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
344 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
344 pushop.pushbranchmap = headssum
345 pushop.pushbranchmap = headssum
345 newbranches = [branch for branch, heads in headssum.iteritems()
346 newbranches = [branch for branch, heads in headssum.iteritems()
346 if heads[0] is None]
347 if heads[0] is None]
347 # 1. Check for new branches on the remote.
348 # 1. Check for new branches on the remote.
348 if newbranches and not newbranch: # new branch requires --new-branch
349 if newbranches and not newbranch: # new branch requires --new-branch
349 branchnames = ', '.join(sorted(newbranches))
350 branchnames = ', '.join(sorted(newbranches))
350 raise error.Abort(_("push creates new remote branches: %s!")
351 raise error.Abort(_("push creates new remote branches: %s!")
351 % branchnames,
352 % branchnames,
352 hint=_("use 'hg push --new-branch' to create"
353 hint=_("use 'hg push --new-branch' to create"
353 " new remote branches"))
354 " new remote branches"))
354
355
355 # 2. Find heads that we need not warn about
356 # 2. Find heads that we need not warn about
356 nowarnheads = _nowarnheads(pushop)
357 nowarnheads = _nowarnheads(pushop)
357
358
358 # 3. Check for new heads.
359 # 3. Check for new heads.
359 # If there are more heads after the push than before, a suitable
360 # If there are more heads after the push than before, a suitable
360 # error message, depending on unsynced status, is displayed.
361 # error message, depending on unsynced status, is displayed.
361 errormsg = None
362 errormsg = None
362 for branch, heads in sorted(headssum.iteritems()):
363 for branch, heads in sorted(headssum.iteritems()):
363 remoteheads, newheads, unsyncedheads, discardedheads = heads
364 remoteheads, newheads, unsyncedheads, discardedheads = heads
364 # add unsynced data
365 # add unsynced data
365 if remoteheads is None:
366 if remoteheads is None:
366 oldhs = set()
367 oldhs = set()
367 else:
368 else:
368 oldhs = set(remoteheads)
369 oldhs = set(remoteheads)
369 oldhs.update(unsyncedheads)
370 oldhs.update(unsyncedheads)
370 dhs = None # delta heads, the new heads on branch
371 dhs = None # delta heads, the new heads on branch
371 newhs = set(newheads)
372 newhs = set(newheads)
372 newhs.update(unsyncedheads)
373 newhs.update(unsyncedheads)
373 if unsyncedheads:
374 if unsyncedheads:
374 if None in unsyncedheads:
375 if None in unsyncedheads:
375 # old remote, no heads data
376 # old remote, no heads data
376 heads = None
377 heads = None
377 else:
378 else:
378 heads = scmutil.nodesummaries(repo, unsyncedheads)
379 heads = scmutil.nodesummaries(repo, unsyncedheads)
379 if heads is None:
380 if heads is None:
380 repo.ui.status(_("remote has heads that are "
381 repo.ui.status(_("remote has heads that are "
381 "not known locally\n"))
382 "not known locally\n"))
382 elif branch is None:
383 elif branch is None:
383 repo.ui.status(_("remote has heads that are "
384 repo.ui.status(_("remote has heads that are "
384 "not known locally: %s\n") % heads)
385 "not known locally: %s\n") % heads)
385 else:
386 else:
386 repo.ui.status(_("remote has heads on branch '%s' that are "
387 repo.ui.status(_("remote has heads on branch '%s' that are "
387 "not known locally: %s\n") % (branch, heads))
388 "not known locally: %s\n") % (branch, heads))
388 if remoteheads is None:
389 if remoteheads is None:
389 if len(newhs) > 1:
390 if len(newhs) > 1:
390 dhs = list(newhs)
391 dhs = list(newhs)
391 if errormsg is None:
392 if errormsg is None:
392 errormsg = (_("push creates new branch '%s' "
393 errormsg = (_("push creates new branch '%s' "
393 "with multiple heads") % (branch))
394 "with multiple heads") % (branch))
394 hint = _("merge or"
395 hint = _("merge or"
395 " see 'hg help push' for details about"
396 " see 'hg help push' for details about"
396 " pushing new heads")
397 " pushing new heads")
397 elif len(newhs) > len(oldhs):
398 elif len(newhs) > len(oldhs):
398 # remove bookmarked or existing remote heads from the new heads list
399 # remove bookmarked or existing remote heads from the new heads list
399 dhs = sorted(newhs - nowarnheads - oldhs)
400 dhs = sorted(newhs - nowarnheads - oldhs)
400 if dhs:
401 if dhs:
401 if errormsg is None:
402 if errormsg is None:
402 if branch not in ('default', None):
403 if branch not in ('default', None):
403 errormsg = _("push creates new remote head %s "
404 errormsg = _("push creates new remote head %s "
404 "on branch '%s'!") % (short(dhs[0]), branch)
405 "on branch '%s'!") % (short(dhs[0]), branch)
405 elif repo[dhs[0]].bookmarks():
406 elif repo[dhs[0]].bookmarks():
406 errormsg = _("push creates new remote head %s "
407 errormsg = _("push creates new remote head %s "
407 "with bookmark '%s'!") % (
408 "with bookmark '%s'!") % (
408 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
409 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
409 else:
410 else:
410 errormsg = _("push creates new remote head %s!"
411 errormsg = _("push creates new remote head %s!"
411 ) % short(dhs[0])
412 ) % short(dhs[0])
412 if unsyncedheads:
413 if unsyncedheads:
413 hint = _("pull and merge or"
414 hint = _("pull and merge or"
414 " see 'hg help push' for details about"
415 " see 'hg help push' for details about"
415 " pushing new heads")
416 " pushing new heads")
416 else:
417 else:
417 hint = _("merge or"
418 hint = _("merge or"
418 " see 'hg help push' for details about"
419 " see 'hg help push' for details about"
419 " pushing new heads")
420 " pushing new heads")
420 if branch is None:
421 if branch is None:
421 repo.ui.note(_("new remote heads:\n"))
422 repo.ui.note(_("new remote heads:\n"))
422 else:
423 else:
423 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
424 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
424 for h in dhs:
425 for h in dhs:
425 repo.ui.note((" %s\n") % short(h))
426 repo.ui.note((" %s\n") % short(h))
426 if errormsg:
427 if errormsg:
427 raise error.Abort(errormsg, hint=hint)
428 raise error.Abort(errormsg, hint=hint)
428
429
429 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
430 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
430 """post process the list of new heads with obsolescence information
431 """post process the list of new heads with obsolescence information
431
432
432 Exists as a sub-function to contain the complexity and allow extensions to
433 Exists as a sub-function to contain the complexity and allow extensions to
433 experiment with smarter logic.
434 experiment with smarter logic.
434
435
435 Returns (newheads, discarded_heads) tuple
436 Returns (newheads, discarded_heads) tuple
436 """
437 """
437 # known issue
438 # known issue
438 #
439 #
439 # * We "silently" skip processing on all changeset unknown locally
440 # * We "silently" skip processing on all changeset unknown locally
440 #
441 #
441 # * if <nh> is public on the remote, it won't be affected by obsolete
442 # * if <nh> is public on the remote, it won't be affected by obsolete
442 # marker and a new is created
443 # marker and a new is created
443
444
444 # define various utilities and containers
445 # define various utilities and containers
445 repo = pushop.repo
446 repo = pushop.repo
446 unfi = repo.unfiltered()
447 unfi = repo.unfiltered()
447 tonode = unfi.changelog.node
448 tonode = unfi.changelog.node
448 torev = unfi.changelog.nodemap.get
449 torev = unfi.changelog.nodemap.get
449 public = phases.public
450 public = phases.public
450 getphase = unfi._phasecache.phase
451 getphase = unfi._phasecache.phase
451 ispublic = (lambda r: getphase(unfi, r) == public)
452 ispublic = (lambda r: getphase(unfi, r) == public)
452 ispushed = (lambda n: torev(n) in futurecommon)
453 ispushed = (lambda n: torev(n) in futurecommon)
453 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
454 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
454 successorsmarkers = unfi.obsstore.successors
455 successorsmarkers = unfi.obsstore.successors
455 newhs = set() # final set of new heads
456 newhs = set() # final set of new heads
456 discarded = set() # new head of fully replaced branch
457 discarded = set() # new head of fully replaced branch
457
458
458 localcandidate = set() # candidate heads known locally
459 localcandidate = set() # candidate heads known locally
459 unknownheads = set() # candidate heads unknown locally
460 unknownheads = set() # candidate heads unknown locally
460 for h in candidate_newhs:
461 for h in candidate_newhs:
461 if h in unfi:
462 if h in unfi:
462 localcandidate.add(h)
463 localcandidate.add(h)
463 else:
464 else:
464 if successorsmarkers.get(h) is not None:
465 if successorsmarkers.get(h) is not None:
465 msg = ('checkheads: remote head unknown locally has'
466 msg = ('checkheads: remote head unknown locally has'
466 ' local marker: %s\n')
467 ' local marker: %s\n')
467 repo.ui.debug(msg % hex(h))
468 repo.ui.debug(msg % hex(h))
468 unknownheads.add(h)
469 unknownheads.add(h)
469
470
470 # fast path the simple case
471 # fast path the simple case
471 if len(localcandidate) == 1:
472 if len(localcandidate) == 1:
472 return unknownheads | set(candidate_newhs), set()
473 return unknownheads | set(candidate_newhs), set()
473
474
474 # actually process branch replacement
475 # actually process branch replacement
475 while localcandidate:
476 while localcandidate:
476 nh = localcandidate.pop()
477 nh = localcandidate.pop()
477 # run this check early to skip the evaluation of the whole branch
478 # run this check early to skip the evaluation of the whole branch
478 if (torev(nh) in futurecommon or ispublic(torev(nh))):
479 if (torev(nh) in futurecommon or ispublic(torev(nh))):
479 newhs.add(nh)
480 newhs.add(nh)
480 continue
481 continue
481
482
482 # Get all revs/nodes on the branch exclusive to this head
483 # Get all revs/nodes on the branch exclusive to this head
483 # (already filtered heads are "ignored"))
484 # (already filtered heads are "ignored"))
484 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
485 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
485 nh, localcandidate, newhs)
486 nh, localcandidate, newhs)
486 branchnodes = [tonode(r) for r in branchrevs]
487 branchnodes = [tonode(r) for r in branchrevs]
487
488
488 # The branch won't be hidden on the remote if
489 # The branch won't be hidden on the remote if
489 # * any part of it is public,
490 # * any part of it is public,
490 # * any part of it is considered part of the result by previous logic,
491 # * any part of it is considered part of the result by previous logic,
491 # * if we have no markers to push to obsolete it.
492 # * if we have no markers to push to obsolete it.
492 if (any(ispublic(r) for r in branchrevs)
493 if (any(ispublic(r) for r in branchrevs)
493 or any(torev(n) in futurecommon for n in branchnodes)
494 or any(torev(n) in futurecommon for n in branchnodes)
494 or any(not hasoutmarker(n) for n in branchnodes)):
495 or any(not hasoutmarker(n) for n in branchnodes)):
495 newhs.add(nh)
496 newhs.add(nh)
496 else:
497 else:
497 # note: there is a corner case if there is a merge in the branch.
498 # note: there is a corner case if there is a merge in the branch.
498 # we might end up with -more- heads. However, these heads are not
499 # we might end up with -more- heads. However, these heads are not
499 # "added" by the push, but more by the "removal" on the remote so I
500 # "added" by the push, but more by the "removal" on the remote so I
500 # think is a okay to ignore them,
501 # think is a okay to ignore them,
501 discarded.add(nh)
502 discarded.add(nh)
502 newhs |= unknownheads
503 newhs |= unknownheads
503 return newhs, discarded
504 return newhs, discarded
504
505
505 def pushingmarkerfor(obsstore, ispushed, node):
506 def pushingmarkerfor(obsstore, ispushed, node):
506 """true if some markers are to be pushed for node
507 """true if some markers are to be pushed for node
507
508
508 We cannot just look in to the pushed obsmarkers from the pushop because
509 We cannot just look in to the pushed obsmarkers from the pushop because
509 discovery might have filtered relevant markers. In addition listing all
510 discovery might have filtered relevant markers. In addition listing all
510 markers relevant to all changesets in the pushed set would be too expensive
511 markers relevant to all changesets in the pushed set would be too expensive
511 (O(len(repo)))
512 (O(len(repo)))
512
513
513 (note: There are cache opportunity in this function. but it would requires
514 (note: There are cache opportunity in this function. but it would requires
514 a two dimensional stack.)
515 a two dimensional stack.)
515 """
516 """
516 successorsmarkers = obsstore.successors
517 successorsmarkers = obsstore.successors
517 stack = [node]
518 stack = [node]
518 seen = set(stack)
519 seen = set(stack)
519 while stack:
520 while stack:
520 current = stack.pop()
521 current = stack.pop()
521 if ispushed(current):
522 if ispushed(current):
522 return True
523 return True
523 markers = successorsmarkers.get(current, ())
524 markers = successorsmarkers.get(current, ())
524 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
525 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
525 for m in markers:
526 for m in markers:
526 nexts = m[1] # successors
527 nexts = m[1] # successors
527 if not nexts: # this is a prune marker
528 if not nexts: # this is a prune marker
528 nexts = m[5] or () # parents
529 nexts = m[5] or () # parents
529 for n in nexts:
530 for n in nexts:
530 if n not in seen:
531 if n not in seen:
531 seen.add(n)
532 seen.add(n)
532 stack.append(n)
533 stack.append(n)
533 return False
534 return False
General Comments 0
You need to be logged in to leave comments. Login now