##// END OF EJS Templates
checkheads: perform obsolescence post processing directly in _headssummary...
marmoute -
r32707:32c8f98a default
parent child Browse files
Show More
@@ -1,522 +1,522
1 # discovery.py - protocol changeset discovery functions
1 # discovery.py - protocol changeset discovery functions
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 hex,
14 hex,
15 nullid,
15 nullid,
16 short,
16 short,
17 )
17 )
18
18
19 from . import (
19 from . import (
20 bookmarks,
20 bookmarks,
21 branchmap,
21 branchmap,
22 error,
22 error,
23 phases,
23 phases,
24 setdiscovery,
24 setdiscovery,
25 treediscovery,
25 treediscovery,
26 util,
26 util,
27 )
27 )
28
28
29 def findcommonincoming(repo, remote, heads=None, force=False):
29 def findcommonincoming(repo, remote, heads=None, force=False):
30 """Return a tuple (common, anyincoming, heads) used to identify the common
30 """Return a tuple (common, anyincoming, heads) used to identify the common
31 subset of nodes between repo and remote.
31 subset of nodes between repo and remote.
32
32
33 "common" is a list of (at least) the heads of the common subset.
33 "common" is a list of (at least) the heads of the common subset.
34 "anyincoming" is testable as a boolean indicating if any nodes are missing
34 "anyincoming" is testable as a boolean indicating if any nodes are missing
35 locally. If remote does not support getbundle, this actually is a list of
35 locally. If remote does not support getbundle, this actually is a list of
36 roots of the nodes that would be incoming, to be supplied to
36 roots of the nodes that would be incoming, to be supplied to
37 changegroupsubset. No code except for pull should be relying on this fact
37 changegroupsubset. No code except for pull should be relying on this fact
38 any longer.
38 any longer.
39 "heads" is either the supplied heads, or else the remote's heads.
39 "heads" is either the supplied heads, or else the remote's heads.
40
40
41 If you pass heads and they are all known locally, the response lists just
41 If you pass heads and they are all known locally, the response lists just
42 these heads in "common" and in "heads".
42 these heads in "common" and in "heads".
43
43
44 Please use findcommonoutgoing to compute the set of outgoing nodes to give
44 Please use findcommonoutgoing to compute the set of outgoing nodes to give
45 extensions a good hook into outgoing.
45 extensions a good hook into outgoing.
46 """
46 """
47
47
48 if not remote.capable('getbundle'):
48 if not remote.capable('getbundle'):
49 return treediscovery.findcommonincoming(repo, remote, heads, force)
49 return treediscovery.findcommonincoming(repo, remote, heads, force)
50
50
51 if heads:
51 if heads:
52 allknown = True
52 allknown = True
53 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
53 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
54 for h in heads:
54 for h in heads:
55 if not knownnode(h):
55 if not knownnode(h):
56 allknown = False
56 allknown = False
57 break
57 break
58 if allknown:
58 if allknown:
59 return (heads, False, heads)
59 return (heads, False, heads)
60
60
61 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
61 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
62 abortwhenunrelated=not force)
62 abortwhenunrelated=not force)
63 common, anyinc, srvheads = res
63 common, anyinc, srvheads = res
64 return (list(common), anyinc, heads or list(srvheads))
64 return (list(common), anyinc, heads or list(srvheads))
65
65
66 class outgoing(object):
66 class outgoing(object):
67 '''Represents the set of nodes present in a local repo but not in a
67 '''Represents the set of nodes present in a local repo but not in a
68 (possibly) remote one.
68 (possibly) remote one.
69
69
70 Members:
70 Members:
71
71
72 missing is a list of all nodes present in local but not in remote.
72 missing is a list of all nodes present in local but not in remote.
73 common is a list of all nodes shared between the two repos.
73 common is a list of all nodes shared between the two repos.
74 excluded is the list of missing changeset that shouldn't be sent remotely.
74 excluded is the list of missing changeset that shouldn't be sent remotely.
75 missingheads is the list of heads of missing.
75 missingheads is the list of heads of missing.
76 commonheads is the list of heads of common.
76 commonheads is the list of heads of common.
77
77
78 The sets are computed on demand from the heads, unless provided upfront
78 The sets are computed on demand from the heads, unless provided upfront
79 by discovery.'''
79 by discovery.'''
80
80
81 def __init__(self, repo, commonheads=None, missingheads=None,
81 def __init__(self, repo, commonheads=None, missingheads=None,
82 missingroots=None):
82 missingroots=None):
83 # at least one of them must not be set
83 # at least one of them must not be set
84 assert None in (commonheads, missingroots)
84 assert None in (commonheads, missingroots)
85 cl = repo.changelog
85 cl = repo.changelog
86 if missingheads is None:
86 if missingheads is None:
87 missingheads = cl.heads()
87 missingheads = cl.heads()
88 if missingroots:
88 if missingroots:
89 discbases = []
89 discbases = []
90 for n in missingroots:
90 for n in missingroots:
91 discbases.extend([p for p in cl.parents(n) if p != nullid])
91 discbases.extend([p for p in cl.parents(n) if p != nullid])
92 # TODO remove call to nodesbetween.
92 # TODO remove call to nodesbetween.
93 # TODO populate attributes on outgoing instance instead of setting
93 # TODO populate attributes on outgoing instance instead of setting
94 # discbases.
94 # discbases.
95 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
95 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
96 included = set(csets)
96 included = set(csets)
97 missingheads = heads
97 missingheads = heads
98 commonheads = [n for n in discbases if n not in included]
98 commonheads = [n for n in discbases if n not in included]
99 elif not commonheads:
99 elif not commonheads:
100 commonheads = [nullid]
100 commonheads = [nullid]
101 self.commonheads = commonheads
101 self.commonheads = commonheads
102 self.missingheads = missingheads
102 self.missingheads = missingheads
103 self._revlog = cl
103 self._revlog = cl
104 self._common = None
104 self._common = None
105 self._missing = None
105 self._missing = None
106 self.excluded = []
106 self.excluded = []
107
107
108 def _computecommonmissing(self):
108 def _computecommonmissing(self):
109 sets = self._revlog.findcommonmissing(self.commonheads,
109 sets = self._revlog.findcommonmissing(self.commonheads,
110 self.missingheads)
110 self.missingheads)
111 self._common, self._missing = sets
111 self._common, self._missing = sets
112
112
113 @util.propertycache
113 @util.propertycache
114 def common(self):
114 def common(self):
115 if self._common is None:
115 if self._common is None:
116 self._computecommonmissing()
116 self._computecommonmissing()
117 return self._common
117 return self._common
118
118
119 @util.propertycache
119 @util.propertycache
120 def missing(self):
120 def missing(self):
121 if self._missing is None:
121 if self._missing is None:
122 self._computecommonmissing()
122 self._computecommonmissing()
123 return self._missing
123 return self._missing
124
124
125 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
125 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
126 commoninc=None, portable=False):
126 commoninc=None, portable=False):
127 '''Return an outgoing instance to identify the nodes present in repo but
127 '''Return an outgoing instance to identify the nodes present in repo but
128 not in other.
128 not in other.
129
129
130 If onlyheads is given, only nodes ancestral to nodes in onlyheads
130 If onlyheads is given, only nodes ancestral to nodes in onlyheads
131 (inclusive) are included. If you already know the local repo's heads,
131 (inclusive) are included. If you already know the local repo's heads,
132 passing them in onlyheads is faster than letting them be recomputed here.
132 passing them in onlyheads is faster than letting them be recomputed here.
133
133
134 If commoninc is given, it must be the result of a prior call to
134 If commoninc is given, it must be the result of a prior call to
135 findcommonincoming(repo, other, force) to avoid recomputing it here.
135 findcommonincoming(repo, other, force) to avoid recomputing it here.
136
136
137 If portable is given, compute more conservative common and missingheads,
137 If portable is given, compute more conservative common and missingheads,
138 to make bundles created from the instance more portable.'''
138 to make bundles created from the instance more portable.'''
139 # declare an empty outgoing object to be filled later
139 # declare an empty outgoing object to be filled later
140 og = outgoing(repo, None, None)
140 og = outgoing(repo, None, None)
141
141
142 # get common set if not provided
142 # get common set if not provided
143 if commoninc is None:
143 if commoninc is None:
144 commoninc = findcommonincoming(repo, other, force=force)
144 commoninc = findcommonincoming(repo, other, force=force)
145 og.commonheads, _any, _hds = commoninc
145 og.commonheads, _any, _hds = commoninc
146
146
147 # compute outgoing
147 # compute outgoing
148 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
148 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
149 if not mayexclude:
149 if not mayexclude:
150 og.missingheads = onlyheads or repo.heads()
150 og.missingheads = onlyheads or repo.heads()
151 elif onlyheads is None:
151 elif onlyheads is None:
152 # use visible heads as it should be cached
152 # use visible heads as it should be cached
153 og.missingheads = repo.filtered("served").heads()
153 og.missingheads = repo.filtered("served").heads()
154 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
154 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
155 else:
155 else:
156 # compute common, missing and exclude secret stuff
156 # compute common, missing and exclude secret stuff
157 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
157 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
158 og._common, allmissing = sets
158 og._common, allmissing = sets
159 og._missing = missing = []
159 og._missing = missing = []
160 og.excluded = excluded = []
160 og.excluded = excluded = []
161 for node in allmissing:
161 for node in allmissing:
162 ctx = repo[node]
162 ctx = repo[node]
163 if ctx.phase() >= phases.secret or ctx.extinct():
163 if ctx.phase() >= phases.secret or ctx.extinct():
164 excluded.append(node)
164 excluded.append(node)
165 else:
165 else:
166 missing.append(node)
166 missing.append(node)
167 if len(missing) == len(allmissing):
167 if len(missing) == len(allmissing):
168 missingheads = onlyheads
168 missingheads = onlyheads
169 else: # update missing heads
169 else: # update missing heads
170 missingheads = phases.newheads(repo, onlyheads, excluded)
170 missingheads = phases.newheads(repo, onlyheads, excluded)
171 og.missingheads = missingheads
171 og.missingheads = missingheads
172 if portable:
172 if portable:
173 # recompute common and missingheads as if -r<rev> had been given for
173 # recompute common and missingheads as if -r<rev> had been given for
174 # each head of missing, and --base <rev> for each head of the proper
174 # each head of missing, and --base <rev> for each head of the proper
175 # ancestors of missing
175 # ancestors of missing
176 og._computecommonmissing()
176 og._computecommonmissing()
177 cl = repo.changelog
177 cl = repo.changelog
178 missingrevs = set(cl.rev(n) for n in og._missing)
178 missingrevs = set(cl.rev(n) for n in og._missing)
179 og._common = set(cl.ancestors(missingrevs)) - missingrevs
179 og._common = set(cl.ancestors(missingrevs)) - missingrevs
180 commonheads = set(og.commonheads)
180 commonheads = set(og.commonheads)
181 og.missingheads = [h for h in og.missingheads if h not in commonheads]
181 og.missingheads = [h for h in og.missingheads if h not in commonheads]
182
182
183 return og
183 return og
184
184
185 def _headssummary(pushop):
185 def _headssummary(pushop):
186 """compute a summary of branch and heads status before and after push
186 """compute a summary of branch and heads status before and after push
187
187
188 return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
188 return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
189
189
190 - branch: the branch name
190 - branch: the branch name
191 - remoteheads: the list of remote heads known locally
191 - remoteheads: the list of remote heads known locally
192 None if the branch is new
192 None if the branch is new
193 - newheads: the new remote heads (known locally) with outgoing pushed
193 - newheads: the new remote heads (known locally) with outgoing pushed
194 - unsyncedheads: the list of remote heads unknown locally.
194 - unsyncedheads: the list of remote heads unknown locally.
195 """
195 """
196 repo = pushop.repo.unfiltered()
196 repo = pushop.repo.unfiltered()
197 remote = pushop.remote
197 remote = pushop.remote
198 outgoing = pushop.outgoing
198 outgoing = pushop.outgoing
199 cl = repo.changelog
199 cl = repo.changelog
200 headssum = {}
200 headssum = {}
201 # A. Create set of branches involved in the push.
201 # A. Create set of branches involved in the push.
202 branches = set(repo[n].branch() for n in outgoing.missing)
202 branches = set(repo[n].branch() for n in outgoing.missing)
203 remotemap = remote.branchmap()
203 remotemap = remote.branchmap()
204 newbranches = branches - set(remotemap)
204 newbranches = branches - set(remotemap)
205 branches.difference_update(newbranches)
205 branches.difference_update(newbranches)
206
206
207 # A. register remote heads
207 # A. register remote heads
208 remotebranches = set()
208 remotebranches = set()
209 for branch, heads in remote.branchmap().iteritems():
209 for branch, heads in remote.branchmap().iteritems():
210 remotebranches.add(branch)
210 remotebranches.add(branch)
211 known = []
211 known = []
212 unsynced = []
212 unsynced = []
213 knownnode = cl.hasnode # do not use nodemap until it is filtered
213 knownnode = cl.hasnode # do not use nodemap until it is filtered
214 for h in heads:
214 for h in heads:
215 if knownnode(h):
215 if knownnode(h):
216 known.append(h)
216 known.append(h)
217 else:
217 else:
218 unsynced.append(h)
218 unsynced.append(h)
219 headssum[branch] = (known, list(known), unsynced)
219 headssum[branch] = (known, list(known), unsynced)
220 # B. add new branch data
220 # B. add new branch data
221 missingctx = list(repo[n] for n in outgoing.missing)
221 missingctx = list(repo[n] for n in outgoing.missing)
222 touchedbranches = set()
222 touchedbranches = set()
223 for ctx in missingctx:
223 for ctx in missingctx:
224 branch = ctx.branch()
224 branch = ctx.branch()
225 touchedbranches.add(branch)
225 touchedbranches.add(branch)
226 if branch not in headssum:
226 if branch not in headssum:
227 headssum[branch] = (None, [], [])
227 headssum[branch] = (None, [], [])
228
228
229 # C drop data about untouched branches:
229 # C drop data about untouched branches:
230 for branch in remotebranches - touchedbranches:
230 for branch in remotebranches - touchedbranches:
231 del headssum[branch]
231 del headssum[branch]
232
232
233 # D. Update newmap with outgoing changes.
233 # D. Update newmap with outgoing changes.
234 # This will possibly add new heads and remove existing ones.
234 # This will possibly add new heads and remove existing ones.
235 newmap = branchmap.branchcache((branch, heads[1])
235 newmap = branchmap.branchcache((branch, heads[1])
236 for branch, heads in headssum.iteritems()
236 for branch, heads in headssum.iteritems()
237 if heads[0] is not None)
237 if heads[0] is not None)
238 newmap.update(repo, (ctx.rev() for ctx in missingctx))
238 newmap.update(repo, (ctx.rev() for ctx in missingctx))
239 for branch, newheads in newmap.iteritems():
239 for branch, newheads in newmap.iteritems():
240 headssum[branch][1][:] = newheads
240 headssum[branch][1][:] = newheads
241 for branch, items in headssum.iteritems():
241 for branch, items in headssum.iteritems():
242 for l in items:
242 for l in items:
243 if l is not None:
243 if l is not None:
244 l.sort()
244 l.sort()
245 # If there are no obsstore, no post processing are needed.
246 if repo.obsstore:
247 allmissing = set(outgoing.missing)
248 cctx = repo.set('%ld', outgoing.common)
249 allfuturecommon = set(c.node() for c in cctx)
250 allfuturecommon.update(allmissing)
251 for branch, heads in sorted(headssum.iteritems()):
252 remoteheads, newheads, unsyncedheads = heads
253 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
254 newheads = sorted(result[0])
255 headssum[branch] = (remoteheads, newheads, unsyncedheads)
245 return headssum
256 return headssum
246
257
247 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
258 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
248 """Compute branchmapsummary for repo without branchmap support"""
259 """Compute branchmapsummary for repo without branchmap support"""
249
260
250 # 1-4b. old servers: Check for new topological heads.
261 # 1-4b. old servers: Check for new topological heads.
251 # Construct {old,new}map with branch = None (topological branch).
262 # Construct {old,new}map with branch = None (topological branch).
252 # (code based on update)
263 # (code based on update)
253 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
264 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
254 oldheads = sorted(h for h in remoteheads if knownnode(h))
265 oldheads = sorted(h for h in remoteheads if knownnode(h))
255 # all nodes in outgoing.missing are children of either:
266 # all nodes in outgoing.missing are children of either:
256 # - an element of oldheads
267 # - an element of oldheads
257 # - another element of outgoing.missing
268 # - another element of outgoing.missing
258 # - nullrev
269 # - nullrev
259 # This explains why the new head are very simple to compute.
270 # This explains why the new head are very simple to compute.
260 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
271 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
261 newheads = sorted(c.node() for c in r)
272 newheads = sorted(c.node() for c in r)
262 # set some unsynced head to issue the "unsynced changes" warning
273 # set some unsynced head to issue the "unsynced changes" warning
263 if inc:
274 if inc:
264 unsynced = [None]
275 unsynced = [None]
265 else:
276 else:
266 unsynced = []
277 unsynced = []
267 return {None: (oldheads, newheads, unsynced)}
278 return {None: (oldheads, newheads, unsynced)}
268
279
269 def _nowarnheads(pushop):
280 def _nowarnheads(pushop):
270 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
281 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
271 repo = pushop.repo.unfiltered()
282 repo = pushop.repo.unfiltered()
272 remote = pushop.remote
283 remote = pushop.remote
273 localbookmarks = repo._bookmarks
284 localbookmarks = repo._bookmarks
274 remotebookmarks = remote.listkeys('bookmarks')
285 remotebookmarks = remote.listkeys('bookmarks')
275 bookmarkedheads = set()
286 bookmarkedheads = set()
276
287
277 # internal config: bookmarks.pushing
288 # internal config: bookmarks.pushing
278 newbookmarks = [localbookmarks.expandname(b)
289 newbookmarks = [localbookmarks.expandname(b)
279 for b in pushop.ui.configlist('bookmarks', 'pushing')]
290 for b in pushop.ui.configlist('bookmarks', 'pushing')]
280
291
281 for bm in localbookmarks:
292 for bm in localbookmarks:
282 rnode = remotebookmarks.get(bm)
293 rnode = remotebookmarks.get(bm)
283 if rnode and rnode in repo:
294 if rnode and rnode in repo:
284 lctx, rctx = repo[bm], repo[rnode]
295 lctx, rctx = repo[bm], repo[rnode]
285 if bookmarks.validdest(repo, rctx, lctx):
296 if bookmarks.validdest(repo, rctx, lctx):
286 bookmarkedheads.add(lctx.node())
297 bookmarkedheads.add(lctx.node())
287 else:
298 else:
288 if bm in newbookmarks and bm not in remotebookmarks:
299 if bm in newbookmarks and bm not in remotebookmarks:
289 bookmarkedheads.add(repo[bm].node())
300 bookmarkedheads.add(repo[bm].node())
290
301
291 return bookmarkedheads
302 return bookmarkedheads
292
303
293 def checkheads(pushop):
304 def checkheads(pushop):
294 """Check that a push won't add any outgoing head
305 """Check that a push won't add any outgoing head
295
306
296 raise Abort error and display ui message as needed.
307 raise Abort error and display ui message as needed.
297 """
308 """
298
309
299 repo = pushop.repo.unfiltered()
310 repo = pushop.repo.unfiltered()
300 remote = pushop.remote
311 remote = pushop.remote
301 outgoing = pushop.outgoing
312 outgoing = pushop.outgoing
302 remoteheads = pushop.remoteheads
313 remoteheads = pushop.remoteheads
303 newbranch = pushop.newbranch
314 newbranch = pushop.newbranch
304 inc = bool(pushop.incoming)
315 inc = bool(pushop.incoming)
305
316
306 # Check for each named branch if we're creating new remote heads.
317 # Check for each named branch if we're creating new remote heads.
307 # To be a remote head after push, node must be either:
318 # To be a remote head after push, node must be either:
308 # - unknown locally
319 # - unknown locally
309 # - a local outgoing head descended from update
320 # - a local outgoing head descended from update
310 # - a remote head that's known locally and not
321 # - a remote head that's known locally and not
311 # ancestral to an outgoing head
322 # ancestral to an outgoing head
312 if remoteheads == [nullid]:
323 if remoteheads == [nullid]:
313 # remote is empty, nothing to check.
324 # remote is empty, nothing to check.
314 return
325 return
315
326
316 if remote.capable('branchmap'):
327 if remote.capable('branchmap'):
317 headssum = _headssummary(pushop)
328 headssum = _headssummary(pushop)
318 else:
329 else:
319 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
330 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
320 newbranches = [branch for branch, heads in headssum.iteritems()
331 newbranches = [branch for branch, heads in headssum.iteritems()
321 if heads[0] is None]
332 if heads[0] is None]
322 # 1. Check for new branches on the remote.
333 # 1. Check for new branches on the remote.
323 if newbranches and not newbranch: # new branch requires --new-branch
334 if newbranches and not newbranch: # new branch requires --new-branch
324 branchnames = ', '.join(sorted(newbranches))
335 branchnames = ', '.join(sorted(newbranches))
325 raise error.Abort(_("push creates new remote branches: %s!")
336 raise error.Abort(_("push creates new remote branches: %s!")
326 % branchnames,
337 % branchnames,
327 hint=_("use 'hg push --new-branch' to create"
338 hint=_("use 'hg push --new-branch' to create"
328 " new remote branches"))
339 " new remote branches"))
329
340
330 # 2. Find heads that we need not warn about
341 # 2. Find heads that we need not warn about
331 nowarnheads = _nowarnheads(pushop)
342 nowarnheads = _nowarnheads(pushop)
332
343
333 # 3. Check for new heads.
344 # 3. Check for new heads.
334 # If there are more heads after the push than before, a suitable
345 # If there are more heads after the push than before, a suitable
335 # error message, depending on unsynced status, is displayed.
346 # error message, depending on unsynced status, is displayed.
336 errormsg = None
347 errormsg = None
337 # If there are no obsstore, no post-processing are needed.
338 if repo.obsstore:
339 allmissing = set(outgoing.missing)
340 cctx = repo.set('%ld', outgoing.common)
341 allfuturecommon = set(c.node() for c in cctx)
342 allfuturecommon.update(allmissing)
343 for branch, heads in sorted(headssum.iteritems()):
344 remoteheads, newheads, unsyncedheads = heads
345 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
346 newheads = sorted(result[0])
347 headssum[branch] = (remoteheads, newheads, unsyncedheads)
348 for branch, heads in sorted(headssum.iteritems()):
348 for branch, heads in sorted(headssum.iteritems()):
349 remoteheads, newheads, unsyncedheads = heads
349 remoteheads, newheads, unsyncedheads = heads
350 # add unsynced data
350 # add unsynced data
351 if remoteheads is None:
351 if remoteheads is None:
352 oldhs = set()
352 oldhs = set()
353 else:
353 else:
354 oldhs = set(remoteheads)
354 oldhs = set(remoteheads)
355 oldhs.update(unsyncedheads)
355 oldhs.update(unsyncedheads)
356 dhs = None # delta heads, the new heads on branch
356 dhs = None # delta heads, the new heads on branch
357 newhs = set(newheads)
357 newhs = set(newheads)
358 newhs.update(unsyncedheads)
358 newhs.update(unsyncedheads)
359 if unsyncedheads:
359 if unsyncedheads:
360 if None in unsyncedheads:
360 if None in unsyncedheads:
361 # old remote, no heads data
361 # old remote, no heads data
362 heads = None
362 heads = None
363 elif len(unsyncedheads) <= 4 or repo.ui.verbose:
363 elif len(unsyncedheads) <= 4 or repo.ui.verbose:
364 heads = ' '.join(short(h) for h in unsyncedheads)
364 heads = ' '.join(short(h) for h in unsyncedheads)
365 else:
365 else:
366 heads = (' '.join(short(h) for h in unsyncedheads[:4]) +
366 heads = (' '.join(short(h) for h in unsyncedheads[:4]) +
367 ' ' + _("and %s others") % (len(unsyncedheads) - 4))
367 ' ' + _("and %s others") % (len(unsyncedheads) - 4))
368 if heads is None:
368 if heads is None:
369 repo.ui.status(_("remote has heads that are "
369 repo.ui.status(_("remote has heads that are "
370 "not known locally\n"))
370 "not known locally\n"))
371 elif branch is None:
371 elif branch is None:
372 repo.ui.status(_("remote has heads that are "
372 repo.ui.status(_("remote has heads that are "
373 "not known locally: %s\n") % heads)
373 "not known locally: %s\n") % heads)
374 else:
374 else:
375 repo.ui.status(_("remote has heads on branch '%s' that are "
375 repo.ui.status(_("remote has heads on branch '%s' that are "
376 "not known locally: %s\n") % (branch, heads))
376 "not known locally: %s\n") % (branch, heads))
377 if remoteheads is None:
377 if remoteheads is None:
378 if len(newhs) > 1:
378 if len(newhs) > 1:
379 dhs = list(newhs)
379 dhs = list(newhs)
380 if errormsg is None:
380 if errormsg is None:
381 errormsg = (_("push creates new branch '%s' "
381 errormsg = (_("push creates new branch '%s' "
382 "with multiple heads") % (branch))
382 "with multiple heads") % (branch))
383 hint = _("merge or"
383 hint = _("merge or"
384 " see 'hg help push' for details about"
384 " see 'hg help push' for details about"
385 " pushing new heads")
385 " pushing new heads")
386 elif len(newhs) > len(oldhs):
386 elif len(newhs) > len(oldhs):
387 # remove bookmarked or existing remote heads from the new heads list
387 # remove bookmarked or existing remote heads from the new heads list
388 dhs = sorted(newhs - nowarnheads - oldhs)
388 dhs = sorted(newhs - nowarnheads - oldhs)
389 if dhs:
389 if dhs:
390 if errormsg is None:
390 if errormsg is None:
391 if branch not in ('default', None):
391 if branch not in ('default', None):
392 errormsg = _("push creates new remote head %s "
392 errormsg = _("push creates new remote head %s "
393 "on branch '%s'!") % (short(dhs[0]), branch)
393 "on branch '%s'!") % (short(dhs[0]), branch)
394 elif repo[dhs[0]].bookmarks():
394 elif repo[dhs[0]].bookmarks():
395 errormsg = _("push creates new remote head %s "
395 errormsg = _("push creates new remote head %s "
396 "with bookmark '%s'!") % (
396 "with bookmark '%s'!") % (
397 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
397 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
398 else:
398 else:
399 errormsg = _("push creates new remote head %s!"
399 errormsg = _("push creates new remote head %s!"
400 ) % short(dhs[0])
400 ) % short(dhs[0])
401 if unsyncedheads:
401 if unsyncedheads:
402 hint = _("pull and merge or"
402 hint = _("pull and merge or"
403 " see 'hg help push' for details about"
403 " see 'hg help push' for details about"
404 " pushing new heads")
404 " pushing new heads")
405 else:
405 else:
406 hint = _("merge or"
406 hint = _("merge or"
407 " see 'hg help push' for details about"
407 " see 'hg help push' for details about"
408 " pushing new heads")
408 " pushing new heads")
409 if branch is None:
409 if branch is None:
410 repo.ui.note(_("new remote heads:\n"))
410 repo.ui.note(_("new remote heads:\n"))
411 else:
411 else:
412 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
412 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
413 for h in dhs:
413 for h in dhs:
414 repo.ui.note((" %s\n") % short(h))
414 repo.ui.note((" %s\n") % short(h))
415 if errormsg:
415 if errormsg:
416 raise error.Abort(errormsg, hint=hint)
416 raise error.Abort(errormsg, hint=hint)
417
417
418 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
418 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
419 """post process the list of new heads with obsolescence information
419 """post process the list of new heads with obsolescence information
420
420
421 Exists as a sub-function to contain the complexity and allow extensions to
421 Exists as a sub-function to contain the complexity and allow extensions to
422 experiment with smarter logic.
422 experiment with smarter logic.
423
423
424 Returns (newheads, discarded_heads) tuple
424 Returns (newheads, discarded_heads) tuple
425 """
425 """
426 # known issue
426 # known issue
427 #
427 #
428 # * We "silently" skip processing on all changeset unknown locally
428 # * We "silently" skip processing on all changeset unknown locally
429 #
429 #
430 # * if <nh> is public on the remote, it won't be affected by obsolete
430 # * if <nh> is public on the remote, it won't be affected by obsolete
431 # marker and a new is created
431 # marker and a new is created
432
432
433 # define various utilities and containers
433 # define various utilities and containers
434 repo = pushop.repo
434 repo = pushop.repo
435 unfi = repo.unfiltered()
435 unfi = repo.unfiltered()
436 tonode = unfi.changelog.node
436 tonode = unfi.changelog.node
437 torev = unfi.changelog.rev
437 torev = unfi.changelog.rev
438 public = phases.public
438 public = phases.public
439 getphase = unfi._phasecache.phase
439 getphase = unfi._phasecache.phase
440 ispublic = (lambda r: getphase(unfi, r) == public)
440 ispublic = (lambda r: getphase(unfi, r) == public)
441 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore,
441 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore,
442 futurecommon)
442 futurecommon)
443 successorsmarkers = unfi.obsstore.successors
443 successorsmarkers = unfi.obsstore.successors
444 newhs = set() # final set of new heads
444 newhs = set() # final set of new heads
445 discarded = set() # new head of fully replaced branch
445 discarded = set() # new head of fully replaced branch
446
446
447 localcandidate = set() # candidate heads known locally
447 localcandidate = set() # candidate heads known locally
448 unknownheads = set() # candidate heads unknown locally
448 unknownheads = set() # candidate heads unknown locally
449 for h in candidate_newhs:
449 for h in candidate_newhs:
450 if h in unfi:
450 if h in unfi:
451 localcandidate.add(h)
451 localcandidate.add(h)
452 else:
452 else:
453 if successorsmarkers.get(h) is not None:
453 if successorsmarkers.get(h) is not None:
454 msg = ('checkheads: remote head unknown locally has'
454 msg = ('checkheads: remote head unknown locally has'
455 ' local marker: %s\n')
455 ' local marker: %s\n')
456 repo.ui.debug(msg % hex(h))
456 repo.ui.debug(msg % hex(h))
457 unknownheads.add(h)
457 unknownheads.add(h)
458
458
459 # fast path the simple case
459 # fast path the simple case
460 if len(localcandidate) == 1:
460 if len(localcandidate) == 1:
461 return unknownheads | set(candidate_newhs), set()
461 return unknownheads | set(candidate_newhs), set()
462
462
463 # actually process branch replacement
463 # actually process branch replacement
464 while localcandidate:
464 while localcandidate:
465 nh = localcandidate.pop()
465 nh = localcandidate.pop()
466 # run this check early to skip the evaluation of the whole branch
466 # run this check early to skip the evaluation of the whole branch
467 if (nh in futurecommon or ispublic(torev(nh))):
467 if (nh in futurecommon or ispublic(torev(nh))):
468 newhs.add(nh)
468 newhs.add(nh)
469 continue
469 continue
470
470
471 # Get all revs/nodes on the branch exclusive to this head
471 # Get all revs/nodes on the branch exclusive to this head
472 # (already filtered heads are "ignored"))
472 # (already filtered heads are "ignored"))
473 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
473 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
474 nh, localcandidate, newhs)
474 nh, localcandidate, newhs)
475 branchnodes = [tonode(r) for r in branchrevs]
475 branchnodes = [tonode(r) for r in branchrevs]
476
476
477 # The branch won't be hidden on the remote if
477 # The branch won't be hidden on the remote if
478 # * any part of it is public,
478 # * any part of it is public,
479 # * any part of it is considered part of the result by previous logic,
479 # * any part of it is considered part of the result by previous logic,
480 # * if we have no markers to push to obsolete it.
480 # * if we have no markers to push to obsolete it.
481 if (any(ispublic(r) for r in branchrevs)
481 if (any(ispublic(r) for r in branchrevs)
482 or any(n in futurecommon for n in branchnodes)
482 or any(n in futurecommon for n in branchnodes)
483 or any(not hasoutmarker(n) for n in branchnodes)):
483 or any(not hasoutmarker(n) for n in branchnodes)):
484 newhs.add(nh)
484 newhs.add(nh)
485 else:
485 else:
486 # note: there is a corner case if there is a merge in the branch.
486 # note: there is a corner case if there is a merge in the branch.
487 # we might end up with -more- heads. However, these heads are not
487 # we might end up with -more- heads. However, these heads are not
488 # "added" by the push, but more by the "removal" on the remote so I
488 # "added" by the push, but more by the "removal" on the remote so I
489 # think is a okay to ignore them,
489 # think is a okay to ignore them,
490 discarded.add(nh)
490 discarded.add(nh)
491 newhs |= unknownheads
491 newhs |= unknownheads
492 return newhs, discarded
492 return newhs, discarded
493
493
494 def pushingmarkerfor(obsstore, pushset, node):
494 def pushingmarkerfor(obsstore, pushset, node):
495 """true if some markers are to be pushed for node
495 """true if some markers are to be pushed for node
496
496
497 We cannot just look in to the pushed obsmarkers from the pushop because
497 We cannot just look in to the pushed obsmarkers from the pushop because
498 discovery might have filtered relevant markers. In addition listing all
498 discovery might have filtered relevant markers. In addition listing all
499 markers relevant to all changesets in the pushed set would be too expensive
499 markers relevant to all changesets in the pushed set would be too expensive
500 (O(len(repo)))
500 (O(len(repo)))
501
501
502 (note: There are cache opportunity in this function. but it would requires
502 (note: There are cache opportunity in this function. but it would requires
503 a two dimensional stack.)
503 a two dimensional stack.)
504 """
504 """
505 successorsmarkers = obsstore.successors
505 successorsmarkers = obsstore.successors
506 stack = [node]
506 stack = [node]
507 seen = set(stack)
507 seen = set(stack)
508 while stack:
508 while stack:
509 current = stack.pop()
509 current = stack.pop()
510 if current in pushset:
510 if current in pushset:
511 return True
511 return True
512 markers = successorsmarkers.get(current, ())
512 markers = successorsmarkers.get(current, ())
513 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
513 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
514 for m in markers:
514 for m in markers:
515 nexts = m[1] # successors
515 nexts = m[1] # successors
516 if not nexts: # this is a prune marker
516 if not nexts: # this is a prune marker
517 nexts = m[5] or () # parents
517 nexts = m[5] or () # parents
518 for n in nexts:
518 for n in nexts:
519 if n not in seen:
519 if n not in seen:
520 seen.add(n)
520 seen.add(n)
521 stack.append(n)
521 stack.append(n)
522 return False
522 return False
General Comments 0
You need to be logged in to leave comments. Login now