##// END OF EJS Templates
checkheads: extract obsolete post processing in its own function...
Pierre-Yves David -
r31586:df82f375 default
parent child Browse files
Show More
@@ -1,436 +1,450
1 # discovery.py - protocol changeset discovery functions
1 # discovery.py - protocol changeset discovery functions
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from .node import (
11 from .node import (
12 nullid,
12 nullid,
13 short,
13 short,
14 )
14 )
15
15
16 from . import (
16 from . import (
17 bookmarks,
17 bookmarks,
18 branchmap,
18 branchmap,
19 error,
19 error,
20 obsolete,
20 obsolete,
21 phases,
21 phases,
22 setdiscovery,
22 setdiscovery,
23 treediscovery,
23 treediscovery,
24 util,
24 util,
25 )
25 )
26
26
27 def findcommonincoming(repo, remote, heads=None, force=False):
27 def findcommonincoming(repo, remote, heads=None, force=False):
28 """Return a tuple (common, anyincoming, heads) used to identify the common
28 """Return a tuple (common, anyincoming, heads) used to identify the common
29 subset of nodes between repo and remote.
29 subset of nodes between repo and remote.
30
30
31 "common" is a list of (at least) the heads of the common subset.
31 "common" is a list of (at least) the heads of the common subset.
32 "anyincoming" is testable as a boolean indicating if any nodes are missing
32 "anyincoming" is testable as a boolean indicating if any nodes are missing
33 locally. If remote does not support getbundle, this actually is a list of
33 locally. If remote does not support getbundle, this actually is a list of
34 roots of the nodes that would be incoming, to be supplied to
34 roots of the nodes that would be incoming, to be supplied to
35 changegroupsubset. No code except for pull should be relying on this fact
35 changegroupsubset. No code except for pull should be relying on this fact
36 any longer.
36 any longer.
37 "heads" is either the supplied heads, or else the remote's heads.
37 "heads" is either the supplied heads, or else the remote's heads.
38
38
39 If you pass heads and they are all known locally, the response lists just
39 If you pass heads and they are all known locally, the response lists just
40 these heads in "common" and in "heads".
40 these heads in "common" and in "heads".
41
41
42 Please use findcommonoutgoing to compute the set of outgoing nodes to give
42 Please use findcommonoutgoing to compute the set of outgoing nodes to give
43 extensions a good hook into outgoing.
43 extensions a good hook into outgoing.
44 """
44 """
45
45
46 if not remote.capable('getbundle'):
46 if not remote.capable('getbundle'):
47 return treediscovery.findcommonincoming(repo, remote, heads, force)
47 return treediscovery.findcommonincoming(repo, remote, heads, force)
48
48
49 if heads:
49 if heads:
50 allknown = True
50 allknown = True
51 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
51 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
52 for h in heads:
52 for h in heads:
53 if not knownnode(h):
53 if not knownnode(h):
54 allknown = False
54 allknown = False
55 break
55 break
56 if allknown:
56 if allknown:
57 return (heads, False, heads)
57 return (heads, False, heads)
58
58
59 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
59 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
60 abortwhenunrelated=not force)
60 abortwhenunrelated=not force)
61 common, anyinc, srvheads = res
61 common, anyinc, srvheads = res
62 return (list(common), anyinc, heads or list(srvheads))
62 return (list(common), anyinc, heads or list(srvheads))
63
63
64 class outgoing(object):
64 class outgoing(object):
65 '''Represents the set of nodes present in a local repo but not in a
65 '''Represents the set of nodes present in a local repo but not in a
66 (possibly) remote one.
66 (possibly) remote one.
67
67
68 Members:
68 Members:
69
69
70 missing is a list of all nodes present in local but not in remote.
70 missing is a list of all nodes present in local but not in remote.
71 common is a list of all nodes shared between the two repos.
71 common is a list of all nodes shared between the two repos.
72 excluded is the list of missing changeset that shouldn't be sent remotely.
72 excluded is the list of missing changeset that shouldn't be sent remotely.
73 missingheads is the list of heads of missing.
73 missingheads is the list of heads of missing.
74 commonheads is the list of heads of common.
74 commonheads is the list of heads of common.
75
75
76 The sets are computed on demand from the heads, unless provided upfront
76 The sets are computed on demand from the heads, unless provided upfront
77 by discovery.'''
77 by discovery.'''
78
78
79 def __init__(self, repo, commonheads=None, missingheads=None,
79 def __init__(self, repo, commonheads=None, missingheads=None,
80 missingroots=None):
80 missingroots=None):
81 # at least one of them must not be set
81 # at least one of them must not be set
82 assert None in (commonheads, missingroots)
82 assert None in (commonheads, missingroots)
83 cl = repo.changelog
83 cl = repo.changelog
84 if missingheads is None:
84 if missingheads is None:
85 missingheads = cl.heads()
85 missingheads = cl.heads()
86 if missingroots:
86 if missingroots:
87 discbases = []
87 discbases = []
88 for n in missingroots:
88 for n in missingroots:
89 discbases.extend([p for p in cl.parents(n) if p != nullid])
89 discbases.extend([p for p in cl.parents(n) if p != nullid])
90 # TODO remove call to nodesbetween.
90 # TODO remove call to nodesbetween.
91 # TODO populate attributes on outgoing instance instead of setting
91 # TODO populate attributes on outgoing instance instead of setting
92 # discbases.
92 # discbases.
93 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
93 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
94 included = set(csets)
94 included = set(csets)
95 missingheads = heads
95 missingheads = heads
96 commonheads = [n for n in discbases if n not in included]
96 commonheads = [n for n in discbases if n not in included]
97 elif not commonheads:
97 elif not commonheads:
98 commonheads = [nullid]
98 commonheads = [nullid]
99 self.commonheads = commonheads
99 self.commonheads = commonheads
100 self.missingheads = missingheads
100 self.missingheads = missingheads
101 self._revlog = cl
101 self._revlog = cl
102 self._common = None
102 self._common = None
103 self._missing = None
103 self._missing = None
104 self.excluded = []
104 self.excluded = []
105
105
106 def _computecommonmissing(self):
106 def _computecommonmissing(self):
107 sets = self._revlog.findcommonmissing(self.commonheads,
107 sets = self._revlog.findcommonmissing(self.commonheads,
108 self.missingheads)
108 self.missingheads)
109 self._common, self._missing = sets
109 self._common, self._missing = sets
110
110
111 @util.propertycache
111 @util.propertycache
112 def common(self):
112 def common(self):
113 if self._common is None:
113 if self._common is None:
114 self._computecommonmissing()
114 self._computecommonmissing()
115 return self._common
115 return self._common
116
116
117 @util.propertycache
117 @util.propertycache
118 def missing(self):
118 def missing(self):
119 if self._missing is None:
119 if self._missing is None:
120 self._computecommonmissing()
120 self._computecommonmissing()
121 return self._missing
121 return self._missing
122
122
123 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
123 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
124 commoninc=None, portable=False):
124 commoninc=None, portable=False):
125 '''Return an outgoing instance to identify the nodes present in repo but
125 '''Return an outgoing instance to identify the nodes present in repo but
126 not in other.
126 not in other.
127
127
128 If onlyheads is given, only nodes ancestral to nodes in onlyheads
128 If onlyheads is given, only nodes ancestral to nodes in onlyheads
129 (inclusive) are included. If you already know the local repo's heads,
129 (inclusive) are included. If you already know the local repo's heads,
130 passing them in onlyheads is faster than letting them be recomputed here.
130 passing them in onlyheads is faster than letting them be recomputed here.
131
131
132 If commoninc is given, it must be the result of a prior call to
132 If commoninc is given, it must be the result of a prior call to
133 findcommonincoming(repo, other, force) to avoid recomputing it here.
133 findcommonincoming(repo, other, force) to avoid recomputing it here.
134
134
135 If portable is given, compute more conservative common and missingheads,
135 If portable is given, compute more conservative common and missingheads,
136 to make bundles created from the instance more portable.'''
136 to make bundles created from the instance more portable.'''
137 # declare an empty outgoing object to be filled later
137 # declare an empty outgoing object to be filled later
138 og = outgoing(repo, None, None)
138 og = outgoing(repo, None, None)
139
139
140 # get common set if not provided
140 # get common set if not provided
141 if commoninc is None:
141 if commoninc is None:
142 commoninc = findcommonincoming(repo, other, force=force)
142 commoninc = findcommonincoming(repo, other, force=force)
143 og.commonheads, _any, _hds = commoninc
143 og.commonheads, _any, _hds = commoninc
144
144
145 # compute outgoing
145 # compute outgoing
146 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
146 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
147 if not mayexclude:
147 if not mayexclude:
148 og.missingheads = onlyheads or repo.heads()
148 og.missingheads = onlyheads or repo.heads()
149 elif onlyheads is None:
149 elif onlyheads is None:
150 # use visible heads as it should be cached
150 # use visible heads as it should be cached
151 og.missingheads = repo.filtered("served").heads()
151 og.missingheads = repo.filtered("served").heads()
152 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
152 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
153 else:
153 else:
154 # compute common, missing and exclude secret stuff
154 # compute common, missing and exclude secret stuff
155 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
155 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
156 og._common, allmissing = sets
156 og._common, allmissing = sets
157 og._missing = missing = []
157 og._missing = missing = []
158 og.excluded = excluded = []
158 og.excluded = excluded = []
159 for node in allmissing:
159 for node in allmissing:
160 ctx = repo[node]
160 ctx = repo[node]
161 if ctx.phase() >= phases.secret or ctx.extinct():
161 if ctx.phase() >= phases.secret or ctx.extinct():
162 excluded.append(node)
162 excluded.append(node)
163 else:
163 else:
164 missing.append(node)
164 missing.append(node)
165 if len(missing) == len(allmissing):
165 if len(missing) == len(allmissing):
166 missingheads = onlyheads
166 missingheads = onlyheads
167 else: # update missing heads
167 else: # update missing heads
168 missingheads = phases.newheads(repo, onlyheads, excluded)
168 missingheads = phases.newheads(repo, onlyheads, excluded)
169 og.missingheads = missingheads
169 og.missingheads = missingheads
170 if portable:
170 if portable:
171 # recompute common and missingheads as if -r<rev> had been given for
171 # recompute common and missingheads as if -r<rev> had been given for
172 # each head of missing, and --base <rev> for each head of the proper
172 # each head of missing, and --base <rev> for each head of the proper
173 # ancestors of missing
173 # ancestors of missing
174 og._computecommonmissing()
174 og._computecommonmissing()
175 cl = repo.changelog
175 cl = repo.changelog
176 missingrevs = set(cl.rev(n) for n in og._missing)
176 missingrevs = set(cl.rev(n) for n in og._missing)
177 og._common = set(cl.ancestors(missingrevs)) - missingrevs
177 og._common = set(cl.ancestors(missingrevs)) - missingrevs
178 commonheads = set(og.commonheads)
178 commonheads = set(og.commonheads)
179 og.missingheads = [h for h in og.missingheads if h not in commonheads]
179 og.missingheads = [h for h in og.missingheads if h not in commonheads]
180
180
181 return og
181 return og
182
182
183 def _headssummary(repo, remote, outgoing):
183 def _headssummary(repo, remote, outgoing):
184 """compute a summary of branch and heads status before and after push
184 """compute a summary of branch and heads status before and after push
185
185
186 return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
186 return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
187
187
188 - branch: the branch name
188 - branch: the branch name
189 - remoteheads: the list of remote heads known locally
189 - remoteheads: the list of remote heads known locally
190 None if the branch is new
190 None if the branch is new
191 - newheads: the new remote heads (known locally) with outgoing pushed
191 - newheads: the new remote heads (known locally) with outgoing pushed
192 - unsyncedheads: the list of remote heads unknown locally.
192 - unsyncedheads: the list of remote heads unknown locally.
193 """
193 """
194 cl = repo.changelog
194 cl = repo.changelog
195 headssum = {}
195 headssum = {}
196 # A. Create set of branches involved in the push.
196 # A. Create set of branches involved in the push.
197 branches = set(repo[n].branch() for n in outgoing.missing)
197 branches = set(repo[n].branch() for n in outgoing.missing)
198 remotemap = remote.branchmap()
198 remotemap = remote.branchmap()
199 newbranches = branches - set(remotemap)
199 newbranches = branches - set(remotemap)
200 branches.difference_update(newbranches)
200 branches.difference_update(newbranches)
201
201
202 # A. register remote heads
202 # A. register remote heads
203 remotebranches = set()
203 remotebranches = set()
204 for branch, heads in remote.branchmap().iteritems():
204 for branch, heads in remote.branchmap().iteritems():
205 remotebranches.add(branch)
205 remotebranches.add(branch)
206 known = []
206 known = []
207 unsynced = []
207 unsynced = []
208 knownnode = cl.hasnode # do not use nodemap until it is filtered
208 knownnode = cl.hasnode # do not use nodemap until it is filtered
209 for h in heads:
209 for h in heads:
210 if knownnode(h):
210 if knownnode(h):
211 known.append(h)
211 known.append(h)
212 else:
212 else:
213 unsynced.append(h)
213 unsynced.append(h)
214 headssum[branch] = (known, list(known), unsynced)
214 headssum[branch] = (known, list(known), unsynced)
215 # B. add new branch data
215 # B. add new branch data
216 missingctx = list(repo[n] for n in outgoing.missing)
216 missingctx = list(repo[n] for n in outgoing.missing)
217 touchedbranches = set()
217 touchedbranches = set()
218 for ctx in missingctx:
218 for ctx in missingctx:
219 branch = ctx.branch()
219 branch = ctx.branch()
220 touchedbranches.add(branch)
220 touchedbranches.add(branch)
221 if branch not in headssum:
221 if branch not in headssum:
222 headssum[branch] = (None, [], [])
222 headssum[branch] = (None, [], [])
223
223
224 # C drop data about untouched branches:
224 # C drop data about untouched branches:
225 for branch in remotebranches - touchedbranches:
225 for branch in remotebranches - touchedbranches:
226 del headssum[branch]
226 del headssum[branch]
227
227
228 # D. Update newmap with outgoing changes.
228 # D. Update newmap with outgoing changes.
229 # This will possibly add new heads and remove existing ones.
229 # This will possibly add new heads and remove existing ones.
230 newmap = branchmap.branchcache((branch, heads[1])
230 newmap = branchmap.branchcache((branch, heads[1])
231 for branch, heads in headssum.iteritems()
231 for branch, heads in headssum.iteritems()
232 if heads[0] is not None)
232 if heads[0] is not None)
233 newmap.update(repo, (ctx.rev() for ctx in missingctx))
233 newmap.update(repo, (ctx.rev() for ctx in missingctx))
234 for branch, newheads in newmap.iteritems():
234 for branch, newheads in newmap.iteritems():
235 headssum[branch][1][:] = newheads
235 headssum[branch][1][:] = newheads
236 return headssum
236 return headssum
237
237
238 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
238 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
239 """Compute branchmapsummary for repo without branchmap support"""
239 """Compute branchmapsummary for repo without branchmap support"""
240
240
241 # 1-4b. old servers: Check for new topological heads.
241 # 1-4b. old servers: Check for new topological heads.
242 # Construct {old,new}map with branch = None (topological branch).
242 # Construct {old,new}map with branch = None (topological branch).
243 # (code based on update)
243 # (code based on update)
244 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
244 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
245 oldheads = set(h for h in remoteheads if knownnode(h))
245 oldheads = set(h for h in remoteheads if knownnode(h))
246 # all nodes in outgoing.missing are children of either:
246 # all nodes in outgoing.missing are children of either:
247 # - an element of oldheads
247 # - an element of oldheads
248 # - another element of outgoing.missing
248 # - another element of outgoing.missing
249 # - nullrev
249 # - nullrev
250 # This explains why the new head are very simple to compute.
250 # This explains why the new head are very simple to compute.
251 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
251 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
252 newheads = list(c.node() for c in r)
252 newheads = list(c.node() for c in r)
253 # set some unsynced head to issue the "unsynced changes" warning
253 # set some unsynced head to issue the "unsynced changes" warning
254 if inc:
254 if inc:
255 unsynced = set([None])
255 unsynced = set([None])
256 else:
256 else:
257 unsynced = set()
257 unsynced = set()
258 return {None: (oldheads, newheads, unsynced)}
258 return {None: (oldheads, newheads, unsynced)}
259
259
260 def _nowarnheads(pushop):
260 def _nowarnheads(pushop):
261 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
261 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
262 repo = pushop.repo.unfiltered()
262 repo = pushop.repo.unfiltered()
263 remote = pushop.remote
263 remote = pushop.remote
264 localbookmarks = repo._bookmarks
264 localbookmarks = repo._bookmarks
265 remotebookmarks = remote.listkeys('bookmarks')
265 remotebookmarks = remote.listkeys('bookmarks')
266 bookmarkedheads = set()
266 bookmarkedheads = set()
267
267
268 # internal config: bookmarks.pushing
268 # internal config: bookmarks.pushing
269 newbookmarks = [localbookmarks.expandname(b)
269 newbookmarks = [localbookmarks.expandname(b)
270 for b in pushop.ui.configlist('bookmarks', 'pushing')]
270 for b in pushop.ui.configlist('bookmarks', 'pushing')]
271
271
272 for bm in localbookmarks:
272 for bm in localbookmarks:
273 rnode = remotebookmarks.get(bm)
273 rnode = remotebookmarks.get(bm)
274 if rnode and rnode in repo:
274 if rnode and rnode in repo:
275 lctx, rctx = repo[bm], repo[rnode]
275 lctx, rctx = repo[bm], repo[rnode]
276 if bookmarks.validdest(repo, rctx, lctx):
276 if bookmarks.validdest(repo, rctx, lctx):
277 bookmarkedheads.add(lctx.node())
277 bookmarkedheads.add(lctx.node())
278 else:
278 else:
279 if bm in newbookmarks and bm not in remotebookmarks:
279 if bm in newbookmarks and bm not in remotebookmarks:
280 bookmarkedheads.add(repo[bm].node())
280 bookmarkedheads.add(repo[bm].node())
281
281
282 return bookmarkedheads
282 return bookmarkedheads
283
283
284 def checkheads(pushop):
284 def checkheads(pushop):
285 """Check that a push won't add any outgoing head
285 """Check that a push won't add any outgoing head
286
286
287 raise Abort error and display ui message as needed.
287 raise Abort error and display ui message as needed.
288 """
288 """
289
289
290 repo = pushop.repo.unfiltered()
290 repo = pushop.repo.unfiltered()
291 remote = pushop.remote
291 remote = pushop.remote
292 outgoing = pushop.outgoing
292 outgoing = pushop.outgoing
293 remoteheads = pushop.remoteheads
293 remoteheads = pushop.remoteheads
294 newbranch = pushop.newbranch
294 newbranch = pushop.newbranch
295 inc = bool(pushop.incoming)
295 inc = bool(pushop.incoming)
296
296
297 # Check for each named branch if we're creating new remote heads.
297 # Check for each named branch if we're creating new remote heads.
298 # To be a remote head after push, node must be either:
298 # To be a remote head after push, node must be either:
299 # - unknown locally
299 # - unknown locally
300 # - a local outgoing head descended from update
300 # - a local outgoing head descended from update
301 # - a remote head that's known locally and not
301 # - a remote head that's known locally and not
302 # ancestral to an outgoing head
302 # ancestral to an outgoing head
303 if remoteheads == [nullid]:
303 if remoteheads == [nullid]:
304 # remote is empty, nothing to check.
304 # remote is empty, nothing to check.
305 return
305 return
306
306
307 if remote.capable('branchmap'):
307 if remote.capable('branchmap'):
308 headssum = _headssummary(repo, remote, outgoing)
308 headssum = _headssummary(repo, remote, outgoing)
309 else:
309 else:
310 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
310 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
311 newbranches = [branch for branch, heads in headssum.iteritems()
311 newbranches = [branch for branch, heads in headssum.iteritems()
312 if heads[0] is None]
312 if heads[0] is None]
313 # 1. Check for new branches on the remote.
313 # 1. Check for new branches on the remote.
314 if newbranches and not newbranch: # new branch requires --new-branch
314 if newbranches and not newbranch: # new branch requires --new-branch
315 branchnames = ', '.join(sorted(newbranches))
315 branchnames = ', '.join(sorted(newbranches))
316 raise error.Abort(_("push creates new remote branches: %s!")
316 raise error.Abort(_("push creates new remote branches: %s!")
317 % branchnames,
317 % branchnames,
318 hint=_("use 'hg push --new-branch' to create"
318 hint=_("use 'hg push --new-branch' to create"
319 " new remote branches"))
319 " new remote branches"))
320
320
321 # 2. Find heads that we need not warn about
321 # 2. Find heads that we need not warn about
322 nowarnheads = _nowarnheads(pushop)
322 nowarnheads = _nowarnheads(pushop)
323
323
324 # 3. Check for new heads.
324 # 3. Check for new heads.
325 # If there are more heads after the push than before, a suitable
325 # If there are more heads after the push than before, a suitable
326 # error message, depending on unsynced status, is displayed.
326 # error message, depending on unsynced status, is displayed.
327 errormsg = None
327 errormsg = None
328 # If there is no obsstore, allfuturecommon won't be used, so no
328 # If there is no obsstore, allfuturecommon won't be used, so no
329 # need to compute it.
329 # need to compute it.
330 if repo.obsstore:
330 if repo.obsstore:
331 allmissing = set(outgoing.missing)
331 allmissing = set(outgoing.missing)
332 cctx = repo.set('%ld', outgoing.common)
332 cctx = repo.set('%ld', outgoing.common)
333 allfuturecommon = set(c.node() for c in cctx)
333 allfuturecommon = set(c.node() for c in cctx)
334 allfuturecommon.update(allmissing)
334 allfuturecommon.update(allmissing)
335 for branch, heads in sorted(headssum.iteritems()):
335 for branch, heads in sorted(headssum.iteritems()):
336 remoteheads, newheads, unsyncedheads = heads
336 remoteheads, newheads, unsyncedheads = heads
337 candidate_newhs = set(newheads)
337 candidate_newhs = set(newheads)
338 # add unsynced data
338 # add unsynced data
339 if remoteheads is None:
339 if remoteheads is None:
340 oldhs = set()
340 oldhs = set()
341 else:
341 else:
342 oldhs = set(remoteheads)
342 oldhs = set(remoteheads)
343 oldhs.update(unsyncedheads)
343 oldhs.update(unsyncedheads)
344 candidate_newhs.update(unsyncedheads)
344 candidate_newhs.update(unsyncedheads)
345 dhs = None # delta heads, the new heads on branch
345 dhs = None # delta heads, the new heads on branch
346 discardedheads = set()
347 if not repo.obsstore:
346 if not repo.obsstore:
347 discardedheads = set()
348 newhs = candidate_newhs
348 newhs = candidate_newhs
349 else:
349 else:
350 # remove future heads which are actually obsoleted by another
350 newhs, discardedheads = _postprocessobsolete(pushop,
351 # pushed element:
351 allfuturecommon,
352 #
352 candidate_newhs)
353 # XXX as above, There are several cases this code does not handle
354 # XXX properly
355 #
356 # (1) if <nh> is public, it won't be affected by obsolete marker
357 # and a new is created
358 #
359 # (2) if the new heads have ancestors which are not obsolete and
360 # not ancestors of any other heads we will have a new head too.
361 #
362 # These two cases will be easy to handle for known changeset but
363 # much more tricky for unsynced changes.
364 #
365 # In addition, this code is confused by prune as it only looks for
366 # successors of the heads (none if pruned) leading to issue4354
367 newhs = set()
368 for nh in candidate_newhs:
369 if nh in repo and repo[nh].phase() <= phases.public:
370 newhs.add(nh)
371 else:
372 for suc in obsolete.allsuccessors(repo.obsstore, [nh]):
373 if suc != nh and suc in allfuturecommon:
374 discardedheads.add(nh)
375 break
376 else:
377 newhs.add(nh)
378 unsynced = sorted(h for h in unsyncedheads if h not in discardedheads)
353 unsynced = sorted(h for h in unsyncedheads if h not in discardedheads)
379 if unsynced:
354 if unsynced:
380 if None in unsynced:
355 if None in unsynced:
381 # old remote, no heads data
356 # old remote, no heads data
382 heads = None
357 heads = None
383 elif len(unsynced) <= 4 or repo.ui.verbose:
358 elif len(unsynced) <= 4 or repo.ui.verbose:
384 heads = ' '.join(short(h) for h in unsynced)
359 heads = ' '.join(short(h) for h in unsynced)
385 else:
360 else:
386 heads = (' '.join(short(h) for h in unsynced[:4]) +
361 heads = (' '.join(short(h) for h in unsynced[:4]) +
387 ' ' + _("and %s others") % (len(unsynced) - 4))
362 ' ' + _("and %s others") % (len(unsynced) - 4))
388 if heads is None:
363 if heads is None:
389 repo.ui.status(_("remote has heads that are "
364 repo.ui.status(_("remote has heads that are "
390 "not known locally\n"))
365 "not known locally\n"))
391 elif branch is None:
366 elif branch is None:
392 repo.ui.status(_("remote has heads that are "
367 repo.ui.status(_("remote has heads that are "
393 "not known locally: %s\n") % heads)
368 "not known locally: %s\n") % heads)
394 else:
369 else:
395 repo.ui.status(_("remote has heads on branch '%s' that are "
370 repo.ui.status(_("remote has heads on branch '%s' that are "
396 "not known locally: %s\n") % (branch, heads))
371 "not known locally: %s\n") % (branch, heads))
397 if remoteheads is None:
372 if remoteheads is None:
398 if len(newhs) > 1:
373 if len(newhs) > 1:
399 dhs = list(newhs)
374 dhs = list(newhs)
400 if errormsg is None:
375 if errormsg is None:
401 errormsg = (_("push creates new branch '%s' "
376 errormsg = (_("push creates new branch '%s' "
402 "with multiple heads") % (branch))
377 "with multiple heads") % (branch))
403 hint = _("merge or"
378 hint = _("merge or"
404 " see 'hg help push' for details about"
379 " see 'hg help push' for details about"
405 " pushing new heads")
380 " pushing new heads")
406 elif len(newhs) > len(oldhs):
381 elif len(newhs) > len(oldhs):
407 # remove bookmarked or existing remote heads from the new heads list
382 # remove bookmarked or existing remote heads from the new heads list
408 dhs = sorted(newhs - nowarnheads - oldhs)
383 dhs = sorted(newhs - nowarnheads - oldhs)
409 if dhs:
384 if dhs:
410 if errormsg is None:
385 if errormsg is None:
411 if branch not in ('default', None):
386 if branch not in ('default', None):
412 errormsg = _("push creates new remote head %s "
387 errormsg = _("push creates new remote head %s "
413 "on branch '%s'!") % (short(dhs[0]), branch)
388 "on branch '%s'!") % (short(dhs[0]), branch)
414 elif repo[dhs[0]].bookmarks():
389 elif repo[dhs[0]].bookmarks():
415 errormsg = _("push creates new remote head %s "
390 errormsg = _("push creates new remote head %s "
416 "with bookmark '%s'!") % (
391 "with bookmark '%s'!") % (
417 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
392 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
418 else:
393 else:
419 errormsg = _("push creates new remote head %s!"
394 errormsg = _("push creates new remote head %s!"
420 ) % short(dhs[0])
395 ) % short(dhs[0])
421 if unsyncedheads:
396 if unsyncedheads:
422 hint = _("pull and merge or"
397 hint = _("pull and merge or"
423 " see 'hg help push' for details about"
398 " see 'hg help push' for details about"
424 " pushing new heads")
399 " pushing new heads")
425 else:
400 else:
426 hint = _("merge or"
401 hint = _("merge or"
427 " see 'hg help push' for details about"
402 " see 'hg help push' for details about"
428 " pushing new heads")
403 " pushing new heads")
429 if branch is None:
404 if branch is None:
430 repo.ui.note(_("new remote heads:\n"))
405 repo.ui.note(_("new remote heads:\n"))
431 else:
406 else:
432 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
407 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
433 for h in dhs:
408 for h in dhs:
434 repo.ui.note((" %s\n") % short(h))
409 repo.ui.note((" %s\n") % short(h))
435 if errormsg:
410 if errormsg:
436 raise error.Abort(errormsg, hint=hint)
411 raise error.Abort(errormsg, hint=hint)
412
413 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
414 """post process the list of new heads with obsolescence information
415
416 Exists as a subfunction to contain the complexity and allow extensions to
417 experiment with smarter logic.
418 Returns (newheads, discarded_heads) tuple
419 """
420 # remove future heads which are actually obsoleted by another
421 # pushed element:
422 #
423 # XXX as above, There are several cases this code does not handle
424 # XXX properly
425 #
426 # (1) if <nh> is public, it won't be affected by obsolete marker
427 # and a new is created
428 #
429 # (2) if the new heads have ancestors which are not obsolete and
430 # not ancestors of any other heads we will have a new head too.
431 #
432 # These two cases will be easy to handle for known changeset but
433 # much more tricky for unsynced changes.
434 #
435 # In addition, this code is confused by prune as it only looks for
436 # successors of the heads (none if pruned) leading to issue4354
437 repo = pushop.repo
438 newhs = set()
439 discarded = set()
440 for nh in candidate_newhs:
441 if nh in repo and repo[nh].phase() <= phases.public:
442 newhs.add(nh)
443 else:
444 for suc in obsolete.allsuccessors(repo.obsstore, [nh]):
445 if suc != nh and suc in futurecommon:
446 discarded.add(nh)
447 break
448 else:
449 newhs.add(nh)
450 return newhs, discarded
General Comments 0
You need to be logged in to leave comments. Login now