##// END OF EJS Templates
checkheads: simplify the code around obsolescence post-processing...
marmoute -
r32675:fc9296c1 default
parent child Browse files
Show More
@@ -1,521 +1,518 b''
1 # discovery.py - protocol changeset discovery functions
1 # discovery.py - protocol changeset discovery functions
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 hex,
14 hex,
15 nullid,
15 nullid,
16 short,
16 short,
17 )
17 )
18
18
19 from . import (
19 from . import (
20 bookmarks,
20 bookmarks,
21 branchmap,
21 branchmap,
22 error,
22 error,
23 phases,
23 phases,
24 setdiscovery,
24 setdiscovery,
25 treediscovery,
25 treediscovery,
26 util,
26 util,
27 )
27 )
28
28
29 def findcommonincoming(repo, remote, heads=None, force=False):
29 def findcommonincoming(repo, remote, heads=None, force=False):
30 """Return a tuple (common, anyincoming, heads) used to identify the common
30 """Return a tuple (common, anyincoming, heads) used to identify the common
31 subset of nodes between repo and remote.
31 subset of nodes between repo and remote.
32
32
33 "common" is a list of (at least) the heads of the common subset.
33 "common" is a list of (at least) the heads of the common subset.
34 "anyincoming" is testable as a boolean indicating if any nodes are missing
34 "anyincoming" is testable as a boolean indicating if any nodes are missing
35 locally. If remote does not support getbundle, this actually is a list of
35 locally. If remote does not support getbundle, this actually is a list of
36 roots of the nodes that would be incoming, to be supplied to
36 roots of the nodes that would be incoming, to be supplied to
37 changegroupsubset. No code except for pull should be relying on this fact
37 changegroupsubset. No code except for pull should be relying on this fact
38 any longer.
38 any longer.
39 "heads" is either the supplied heads, or else the remote's heads.
39 "heads" is either the supplied heads, or else the remote's heads.
40
40
41 If you pass heads and they are all known locally, the response lists just
41 If you pass heads and they are all known locally, the response lists just
42 these heads in "common" and in "heads".
42 these heads in "common" and in "heads".
43
43
44 Please use findcommonoutgoing to compute the set of outgoing nodes to give
44 Please use findcommonoutgoing to compute the set of outgoing nodes to give
45 extensions a good hook into outgoing.
45 extensions a good hook into outgoing.
46 """
46 """
47
47
48 if not remote.capable('getbundle'):
48 if not remote.capable('getbundle'):
49 return treediscovery.findcommonincoming(repo, remote, heads, force)
49 return treediscovery.findcommonincoming(repo, remote, heads, force)
50
50
51 if heads:
51 if heads:
52 allknown = True
52 allknown = True
53 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
53 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
54 for h in heads:
54 for h in heads:
55 if not knownnode(h):
55 if not knownnode(h):
56 allknown = False
56 allknown = False
57 break
57 break
58 if allknown:
58 if allknown:
59 return (heads, False, heads)
59 return (heads, False, heads)
60
60
61 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
61 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
62 abortwhenunrelated=not force)
62 abortwhenunrelated=not force)
63 common, anyinc, srvheads = res
63 common, anyinc, srvheads = res
64 return (list(common), anyinc, heads or list(srvheads))
64 return (list(common), anyinc, heads or list(srvheads))
65
65
66 class outgoing(object):
66 class outgoing(object):
67 '''Represents the set of nodes present in a local repo but not in a
67 '''Represents the set of nodes present in a local repo but not in a
68 (possibly) remote one.
68 (possibly) remote one.
69
69
70 Members:
70 Members:
71
71
72 missing is a list of all nodes present in local but not in remote.
72 missing is a list of all nodes present in local but not in remote.
73 common is a list of all nodes shared between the two repos.
73 common is a list of all nodes shared between the two repos.
74 excluded is the list of missing changeset that shouldn't be sent remotely.
74 excluded is the list of missing changeset that shouldn't be sent remotely.
75 missingheads is the list of heads of missing.
75 missingheads is the list of heads of missing.
76 commonheads is the list of heads of common.
76 commonheads is the list of heads of common.
77
77
78 The sets are computed on demand from the heads, unless provided upfront
78 The sets are computed on demand from the heads, unless provided upfront
79 by discovery.'''
79 by discovery.'''
80
80
81 def __init__(self, repo, commonheads=None, missingheads=None,
81 def __init__(self, repo, commonheads=None, missingheads=None,
82 missingroots=None):
82 missingroots=None):
83 # at least one of them must not be set
83 # at least one of them must not be set
84 assert None in (commonheads, missingroots)
84 assert None in (commonheads, missingroots)
85 cl = repo.changelog
85 cl = repo.changelog
86 if missingheads is None:
86 if missingheads is None:
87 missingheads = cl.heads()
87 missingheads = cl.heads()
88 if missingroots:
88 if missingroots:
89 discbases = []
89 discbases = []
90 for n in missingroots:
90 for n in missingroots:
91 discbases.extend([p for p in cl.parents(n) if p != nullid])
91 discbases.extend([p for p in cl.parents(n) if p != nullid])
92 # TODO remove call to nodesbetween.
92 # TODO remove call to nodesbetween.
93 # TODO populate attributes on outgoing instance instead of setting
93 # TODO populate attributes on outgoing instance instead of setting
94 # discbases.
94 # discbases.
95 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
95 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
96 included = set(csets)
96 included = set(csets)
97 missingheads = heads
97 missingheads = heads
98 commonheads = [n for n in discbases if n not in included]
98 commonheads = [n for n in discbases if n not in included]
99 elif not commonheads:
99 elif not commonheads:
100 commonheads = [nullid]
100 commonheads = [nullid]
101 self.commonheads = commonheads
101 self.commonheads = commonheads
102 self.missingheads = missingheads
102 self.missingheads = missingheads
103 self._revlog = cl
103 self._revlog = cl
104 self._common = None
104 self._common = None
105 self._missing = None
105 self._missing = None
106 self.excluded = []
106 self.excluded = []
107
107
108 def _computecommonmissing(self):
108 def _computecommonmissing(self):
109 sets = self._revlog.findcommonmissing(self.commonheads,
109 sets = self._revlog.findcommonmissing(self.commonheads,
110 self.missingheads)
110 self.missingheads)
111 self._common, self._missing = sets
111 self._common, self._missing = sets
112
112
113 @util.propertycache
113 @util.propertycache
114 def common(self):
114 def common(self):
115 if self._common is None:
115 if self._common is None:
116 self._computecommonmissing()
116 self._computecommonmissing()
117 return self._common
117 return self._common
118
118
119 @util.propertycache
119 @util.propertycache
120 def missing(self):
120 def missing(self):
121 if self._missing is None:
121 if self._missing is None:
122 self._computecommonmissing()
122 self._computecommonmissing()
123 return self._missing
123 return self._missing
124
124
125 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
125 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
126 commoninc=None, portable=False):
126 commoninc=None, portable=False):
127 '''Return an outgoing instance to identify the nodes present in repo but
127 '''Return an outgoing instance to identify the nodes present in repo but
128 not in other.
128 not in other.
129
129
130 If onlyheads is given, only nodes ancestral to nodes in onlyheads
130 If onlyheads is given, only nodes ancestral to nodes in onlyheads
131 (inclusive) are included. If you already know the local repo's heads,
131 (inclusive) are included. If you already know the local repo's heads,
132 passing them in onlyheads is faster than letting them be recomputed here.
132 passing them in onlyheads is faster than letting them be recomputed here.
133
133
134 If commoninc is given, it must be the result of a prior call to
134 If commoninc is given, it must be the result of a prior call to
135 findcommonincoming(repo, other, force) to avoid recomputing it here.
135 findcommonincoming(repo, other, force) to avoid recomputing it here.
136
136
137 If portable is given, compute more conservative common and missingheads,
137 If portable is given, compute more conservative common and missingheads,
138 to make bundles created from the instance more portable.'''
138 to make bundles created from the instance more portable.'''
139 # declare an empty outgoing object to be filled later
139 # declare an empty outgoing object to be filled later
140 og = outgoing(repo, None, None)
140 og = outgoing(repo, None, None)
141
141
142 # get common set if not provided
142 # get common set if not provided
143 if commoninc is None:
143 if commoninc is None:
144 commoninc = findcommonincoming(repo, other, force=force)
144 commoninc = findcommonincoming(repo, other, force=force)
145 og.commonheads, _any, _hds = commoninc
145 og.commonheads, _any, _hds = commoninc
146
146
147 # compute outgoing
147 # compute outgoing
148 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
148 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
149 if not mayexclude:
149 if not mayexclude:
150 og.missingheads = onlyheads or repo.heads()
150 og.missingheads = onlyheads or repo.heads()
151 elif onlyheads is None:
151 elif onlyheads is None:
152 # use visible heads as it should be cached
152 # use visible heads as it should be cached
153 og.missingheads = repo.filtered("served").heads()
153 og.missingheads = repo.filtered("served").heads()
154 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
154 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
155 else:
155 else:
156 # compute common, missing and exclude secret stuff
156 # compute common, missing and exclude secret stuff
157 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
157 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
158 og._common, allmissing = sets
158 og._common, allmissing = sets
159 og._missing = missing = []
159 og._missing = missing = []
160 og.excluded = excluded = []
160 og.excluded = excluded = []
161 for node in allmissing:
161 for node in allmissing:
162 ctx = repo[node]
162 ctx = repo[node]
163 if ctx.phase() >= phases.secret or ctx.extinct():
163 if ctx.phase() >= phases.secret or ctx.extinct():
164 excluded.append(node)
164 excluded.append(node)
165 else:
165 else:
166 missing.append(node)
166 missing.append(node)
167 if len(missing) == len(allmissing):
167 if len(missing) == len(allmissing):
168 missingheads = onlyheads
168 missingheads = onlyheads
169 else: # update missing heads
169 else: # update missing heads
170 missingheads = phases.newheads(repo, onlyheads, excluded)
170 missingheads = phases.newheads(repo, onlyheads, excluded)
171 og.missingheads = missingheads
171 og.missingheads = missingheads
172 if portable:
172 if portable:
173 # recompute common and missingheads as if -r<rev> had been given for
173 # recompute common and missingheads as if -r<rev> had been given for
174 # each head of missing, and --base <rev> for each head of the proper
174 # each head of missing, and --base <rev> for each head of the proper
175 # ancestors of missing
175 # ancestors of missing
176 og._computecommonmissing()
176 og._computecommonmissing()
177 cl = repo.changelog
177 cl = repo.changelog
178 missingrevs = set(cl.rev(n) for n in og._missing)
178 missingrevs = set(cl.rev(n) for n in og._missing)
179 og._common = set(cl.ancestors(missingrevs)) - missingrevs
179 og._common = set(cl.ancestors(missingrevs)) - missingrevs
180 commonheads = set(og.commonheads)
180 commonheads = set(og.commonheads)
181 og.missingheads = [h for h in og.missingheads if h not in commonheads]
181 og.missingheads = [h for h in og.missingheads if h not in commonheads]
182
182
183 return og
183 return og
184
184
185 def _headssummary(repo, remote, outgoing):
185 def _headssummary(repo, remote, outgoing):
186 """compute a summary of branch and heads status before and after push
186 """compute a summary of branch and heads status before and after push
187
187
188 return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
188 return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
189
189
190 - branch: the branch name
190 - branch: the branch name
191 - remoteheads: the list of remote heads known locally
191 - remoteheads: the list of remote heads known locally
192 None if the branch is new
192 None if the branch is new
193 - newheads: the new remote heads (known locally) with outgoing pushed
193 - newheads: the new remote heads (known locally) with outgoing pushed
194 - unsyncedheads: the list of remote heads unknown locally.
194 - unsyncedheads: the list of remote heads unknown locally.
195 """
195 """
196 cl = repo.changelog
196 cl = repo.changelog
197 headssum = {}
197 headssum = {}
198 # A. Create set of branches involved in the push.
198 # A. Create set of branches involved in the push.
199 branches = set(repo[n].branch() for n in outgoing.missing)
199 branches = set(repo[n].branch() for n in outgoing.missing)
200 remotemap = remote.branchmap()
200 remotemap = remote.branchmap()
201 newbranches = branches - set(remotemap)
201 newbranches = branches - set(remotemap)
202 branches.difference_update(newbranches)
202 branches.difference_update(newbranches)
203
203
204 # A. register remote heads
204 # A. register remote heads
205 remotebranches = set()
205 remotebranches = set()
206 for branch, heads in remote.branchmap().iteritems():
206 for branch, heads in remote.branchmap().iteritems():
207 remotebranches.add(branch)
207 remotebranches.add(branch)
208 known = []
208 known = []
209 unsynced = []
209 unsynced = []
210 knownnode = cl.hasnode # do not use nodemap until it is filtered
210 knownnode = cl.hasnode # do not use nodemap until it is filtered
211 for h in heads:
211 for h in heads:
212 if knownnode(h):
212 if knownnode(h):
213 known.append(h)
213 known.append(h)
214 else:
214 else:
215 unsynced.append(h)
215 unsynced.append(h)
216 headssum[branch] = (known, list(known), unsynced)
216 headssum[branch] = (known, list(known), unsynced)
217 # B. add new branch data
217 # B. add new branch data
218 missingctx = list(repo[n] for n in outgoing.missing)
218 missingctx = list(repo[n] for n in outgoing.missing)
219 touchedbranches = set()
219 touchedbranches = set()
220 for ctx in missingctx:
220 for ctx in missingctx:
221 branch = ctx.branch()
221 branch = ctx.branch()
222 touchedbranches.add(branch)
222 touchedbranches.add(branch)
223 if branch not in headssum:
223 if branch not in headssum:
224 headssum[branch] = (None, [], [])
224 headssum[branch] = (None, [], [])
225
225
226 # C drop data about untouched branches:
226 # C drop data about untouched branches:
227 for branch in remotebranches - touchedbranches:
227 for branch in remotebranches - touchedbranches:
228 del headssum[branch]
228 del headssum[branch]
229
229
230 # D. Update newmap with outgoing changes.
230 # D. Update newmap with outgoing changes.
231 # This will possibly add new heads and remove existing ones.
231 # This will possibly add new heads and remove existing ones.
232 newmap = branchmap.branchcache((branch, heads[1])
232 newmap = branchmap.branchcache((branch, heads[1])
233 for branch, heads in headssum.iteritems()
233 for branch, heads in headssum.iteritems()
234 if heads[0] is not None)
234 if heads[0] is not None)
235 newmap.update(repo, (ctx.rev() for ctx in missingctx))
235 newmap.update(repo, (ctx.rev() for ctx in missingctx))
236 for branch, newheads in newmap.iteritems():
236 for branch, newheads in newmap.iteritems():
237 headssum[branch][1][:] = newheads
237 headssum[branch][1][:] = newheads
238 for branch, items in headssum.iteritems():
238 for branch, items in headssum.iteritems():
239 for l in items:
239 for l in items:
240 if l is not None:
240 if l is not None:
241 l.sort()
241 l.sort()
242 return headssum
242 return headssum
243
243
244 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
244 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
245 """Compute branchmapsummary for repo without branchmap support"""
245 """Compute branchmapsummary for repo without branchmap support"""
246
246
247 # 1-4b. old servers: Check for new topological heads.
247 # 1-4b. old servers: Check for new topological heads.
248 # Construct {old,new}map with branch = None (topological branch).
248 # Construct {old,new}map with branch = None (topological branch).
249 # (code based on update)
249 # (code based on update)
250 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
250 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
251 oldheads = sorted(h for h in remoteheads if knownnode(h))
251 oldheads = sorted(h for h in remoteheads if knownnode(h))
252 # all nodes in outgoing.missing are children of either:
252 # all nodes in outgoing.missing are children of either:
253 # - an element of oldheads
253 # - an element of oldheads
254 # - another element of outgoing.missing
254 # - another element of outgoing.missing
255 # - nullrev
255 # - nullrev
256 # This explains why the new head are very simple to compute.
256 # This explains why the new head are very simple to compute.
257 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
257 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
258 newheads = sorted(c.node() for c in r)
258 newheads = sorted(c.node() for c in r)
259 # set some unsynced head to issue the "unsynced changes" warning
259 # set some unsynced head to issue the "unsynced changes" warning
260 if inc:
260 if inc:
261 unsynced = [None]
261 unsynced = [None]
262 else:
262 else:
263 unsynced = []
263 unsynced = []
264 return {None: (oldheads, newheads, unsynced)}
264 return {None: (oldheads, newheads, unsynced)}
265
265
266 def _nowarnheads(pushop):
266 def _nowarnheads(pushop):
267 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
267 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
268 repo = pushop.repo.unfiltered()
268 repo = pushop.repo.unfiltered()
269 remote = pushop.remote
269 remote = pushop.remote
270 localbookmarks = repo._bookmarks
270 localbookmarks = repo._bookmarks
271 remotebookmarks = remote.listkeys('bookmarks')
271 remotebookmarks = remote.listkeys('bookmarks')
272 bookmarkedheads = set()
272 bookmarkedheads = set()
273
273
274 # internal config: bookmarks.pushing
274 # internal config: bookmarks.pushing
275 newbookmarks = [localbookmarks.expandname(b)
275 newbookmarks = [localbookmarks.expandname(b)
276 for b in pushop.ui.configlist('bookmarks', 'pushing')]
276 for b in pushop.ui.configlist('bookmarks', 'pushing')]
277
277
278 for bm in localbookmarks:
278 for bm in localbookmarks:
279 rnode = remotebookmarks.get(bm)
279 rnode = remotebookmarks.get(bm)
280 if rnode and rnode in repo:
280 if rnode and rnode in repo:
281 lctx, rctx = repo[bm], repo[rnode]
281 lctx, rctx = repo[bm], repo[rnode]
282 if bookmarks.validdest(repo, rctx, lctx):
282 if bookmarks.validdest(repo, rctx, lctx):
283 bookmarkedheads.add(lctx.node())
283 bookmarkedheads.add(lctx.node())
284 else:
284 else:
285 if bm in newbookmarks and bm not in remotebookmarks:
285 if bm in newbookmarks and bm not in remotebookmarks:
286 bookmarkedheads.add(repo[bm].node())
286 bookmarkedheads.add(repo[bm].node())
287
287
288 return bookmarkedheads
288 return bookmarkedheads
289
289
290 def checkheads(pushop):
290 def checkheads(pushop):
291 """Check that a push won't add any outgoing head
291 """Check that a push won't add any outgoing head
292
292
293 raise Abort error and display ui message as needed.
293 raise Abort error and display ui message as needed.
294 """
294 """
295
295
296 repo = pushop.repo.unfiltered()
296 repo = pushop.repo.unfiltered()
297 remote = pushop.remote
297 remote = pushop.remote
298 outgoing = pushop.outgoing
298 outgoing = pushop.outgoing
299 remoteheads = pushop.remoteheads
299 remoteheads = pushop.remoteheads
300 newbranch = pushop.newbranch
300 newbranch = pushop.newbranch
301 inc = bool(pushop.incoming)
301 inc = bool(pushop.incoming)
302
302
303 # Check for each named branch if we're creating new remote heads.
303 # Check for each named branch if we're creating new remote heads.
304 # To be a remote head after push, node must be either:
304 # To be a remote head after push, node must be either:
305 # - unknown locally
305 # - unknown locally
306 # - a local outgoing head descended from update
306 # - a local outgoing head descended from update
307 # - a remote head that's known locally and not
307 # - a remote head that's known locally and not
308 # ancestral to an outgoing head
308 # ancestral to an outgoing head
309 if remoteheads == [nullid]:
309 if remoteheads == [nullid]:
310 # remote is empty, nothing to check.
310 # remote is empty, nothing to check.
311 return
311 return
312
312
313 if remote.capable('branchmap'):
313 if remote.capable('branchmap'):
314 headssum = _headssummary(repo, remote, outgoing)
314 headssum = _headssummary(repo, remote, outgoing)
315 else:
315 else:
316 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
316 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
317 newbranches = [branch for branch, heads in headssum.iteritems()
317 newbranches = [branch for branch, heads in headssum.iteritems()
318 if heads[0] is None]
318 if heads[0] is None]
319 # 1. Check for new branches on the remote.
319 # 1. Check for new branches on the remote.
320 if newbranches and not newbranch: # new branch requires --new-branch
320 if newbranches and not newbranch: # new branch requires --new-branch
321 branchnames = ', '.join(sorted(newbranches))
321 branchnames = ', '.join(sorted(newbranches))
322 raise error.Abort(_("push creates new remote branches: %s!")
322 raise error.Abort(_("push creates new remote branches: %s!")
323 % branchnames,
323 % branchnames,
324 hint=_("use 'hg push --new-branch' to create"
324 hint=_("use 'hg push --new-branch' to create"
325 " new remote branches"))
325 " new remote branches"))
326
326
327 # 2. Find heads that we need not warn about
327 # 2. Find heads that we need not warn about
328 nowarnheads = _nowarnheads(pushop)
328 nowarnheads = _nowarnheads(pushop)
329
329
330 # 3. Check for new heads.
330 # 3. Check for new heads.
331 # If there are more heads after the push than before, a suitable
331 # If there are more heads after the push than before, a suitable
332 # error message, depending on unsynced status, is displayed.
332 # error message, depending on unsynced status, is displayed.
333 errormsg = None
333 errormsg = None
334 # If there is no obsstore, allfuturecommon won't be used, so no
334 # If there is no obsstore, allfuturecommon won't be used, so no
335 # need to compute it.
335 # need to compute it.
336 if repo.obsstore:
336 if repo.obsstore:
337 allmissing = set(outgoing.missing)
337 allmissing = set(outgoing.missing)
338 cctx = repo.set('%ld', outgoing.common)
338 cctx = repo.set('%ld', outgoing.common)
339 allfuturecommon = set(c.node() for c in cctx)
339 allfuturecommon = set(c.node() for c in cctx)
340 allfuturecommon.update(allmissing)
340 allfuturecommon.update(allmissing)
341 for branch, heads in sorted(headssum.iteritems()):
341 for branch, heads in sorted(headssum.iteritems()):
342 remoteheads, newheads, unsyncedheads = heads
342 remoteheads, newheads, unsyncedheads = heads
343 # add unsynced data
343 # add unsynced data
344 if remoteheads is None:
344 if remoteheads is None:
345 oldhs = set()
345 oldhs = set()
346 else:
346 else:
347 oldhs = set(remoteheads)
347 oldhs = set(remoteheads)
348 oldhs.update(unsyncedheads)
348 oldhs.update(unsyncedheads)
349 dhs = None # delta heads, the new heads on branch
349 dhs = None # delta heads, the new heads on branch
350 if not repo.obsstore:
350 if repo.obsstore:
351 discardedheads = set()
351 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
352 newhs = set(newheads)
352 newheads = sorted(result[0])
353 else:
353 newhs = set(newheads)
354 newhs, discardedheads = _postprocessobsolete(pushop,
355 allfuturecommon,
356 newheads)
357 newhs.update(unsyncedheads)
354 newhs.update(unsyncedheads)
358 if unsyncedheads:
355 if unsyncedheads:
359 if None in unsyncedheads:
356 if None in unsyncedheads:
360 # old remote, no heads data
357 # old remote, no heads data
361 heads = None
358 heads = None
362 elif len(unsyncedheads) <= 4 or repo.ui.verbose:
359 elif len(unsyncedheads) <= 4 or repo.ui.verbose:
363 heads = ' '.join(short(h) for h in unsyncedheads)
360 heads = ' '.join(short(h) for h in unsyncedheads)
364 else:
361 else:
365 heads = (' '.join(short(h) for h in unsyncedheads[:4]) +
362 heads = (' '.join(short(h) for h in unsyncedheads[:4]) +
366 ' ' + _("and %s others") % (len(unsyncedheads) - 4))
363 ' ' + _("and %s others") % (len(unsyncedheads) - 4))
367 if heads is None:
364 if heads is None:
368 repo.ui.status(_("remote has heads that are "
365 repo.ui.status(_("remote has heads that are "
369 "not known locally\n"))
366 "not known locally\n"))
370 elif branch is None:
367 elif branch is None:
371 repo.ui.status(_("remote has heads that are "
368 repo.ui.status(_("remote has heads that are "
372 "not known locally: %s\n") % heads)
369 "not known locally: %s\n") % heads)
373 else:
370 else:
374 repo.ui.status(_("remote has heads on branch '%s' that are "
371 repo.ui.status(_("remote has heads on branch '%s' that are "
375 "not known locally: %s\n") % (branch, heads))
372 "not known locally: %s\n") % (branch, heads))
376 if remoteheads is None:
373 if remoteheads is None:
377 if len(newhs) > 1:
374 if len(newhs) > 1:
378 dhs = list(newhs)
375 dhs = list(newhs)
379 if errormsg is None:
376 if errormsg is None:
380 errormsg = (_("push creates new branch '%s' "
377 errormsg = (_("push creates new branch '%s' "
381 "with multiple heads") % (branch))
378 "with multiple heads") % (branch))
382 hint = _("merge or"
379 hint = _("merge or"
383 " see 'hg help push' for details about"
380 " see 'hg help push' for details about"
384 " pushing new heads")
381 " pushing new heads")
385 elif len(newhs) > len(oldhs):
382 elif len(newhs) > len(oldhs):
386 # remove bookmarked or existing remote heads from the new heads list
383 # remove bookmarked or existing remote heads from the new heads list
387 dhs = sorted(newhs - nowarnheads - oldhs)
384 dhs = sorted(newhs - nowarnheads - oldhs)
388 if dhs:
385 if dhs:
389 if errormsg is None:
386 if errormsg is None:
390 if branch not in ('default', None):
387 if branch not in ('default', None):
391 errormsg = _("push creates new remote head %s "
388 errormsg = _("push creates new remote head %s "
392 "on branch '%s'!") % (short(dhs[0]), branch)
389 "on branch '%s'!") % (short(dhs[0]), branch)
393 elif repo[dhs[0]].bookmarks():
390 elif repo[dhs[0]].bookmarks():
394 errormsg = _("push creates new remote head %s "
391 errormsg = _("push creates new remote head %s "
395 "with bookmark '%s'!") % (
392 "with bookmark '%s'!") % (
396 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
393 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
397 else:
394 else:
398 errormsg = _("push creates new remote head %s!"
395 errormsg = _("push creates new remote head %s!"
399 ) % short(dhs[0])
396 ) % short(dhs[0])
400 if unsyncedheads:
397 if unsyncedheads:
401 hint = _("pull and merge or"
398 hint = _("pull and merge or"
402 " see 'hg help push' for details about"
399 " see 'hg help push' for details about"
403 " pushing new heads")
400 " pushing new heads")
404 else:
401 else:
405 hint = _("merge or"
402 hint = _("merge or"
406 " see 'hg help push' for details about"
403 " see 'hg help push' for details about"
407 " pushing new heads")
404 " pushing new heads")
408 if branch is None:
405 if branch is None:
409 repo.ui.note(_("new remote heads:\n"))
406 repo.ui.note(_("new remote heads:\n"))
410 else:
407 else:
411 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
408 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
412 for h in dhs:
409 for h in dhs:
413 repo.ui.note((" %s\n") % short(h))
410 repo.ui.note((" %s\n") % short(h))
414 if errormsg:
411 if errormsg:
415 raise error.Abort(errormsg, hint=hint)
412 raise error.Abort(errormsg, hint=hint)
416
413
417 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
414 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
418 """post process the list of new heads with obsolescence information
415 """post process the list of new heads with obsolescence information
419
416
420 Exists as a sub-function to contain the complexity and allow extensions to
417 Exists as a sub-function to contain the complexity and allow extensions to
421 experiment with smarter logic.
418 experiment with smarter logic.
422
419
423 Returns (newheads, discarded_heads) tuple
420 Returns (newheads, discarded_heads) tuple
424 """
421 """
425 # known issue
422 # known issue
426 #
423 #
427 # * We "silently" skip processing on all changeset unknown locally
424 # * We "silently" skip processing on all changeset unknown locally
428 #
425 #
429 # * if <nh> is public on the remote, it won't be affected by obsolete
426 # * if <nh> is public on the remote, it won't be affected by obsolete
430 # marker and a new is created
427 # marker and a new is created
431
428
432 # define various utilities and containers
429 # define various utilities and containers
433 repo = pushop.repo
430 repo = pushop.repo
434 unfi = repo.unfiltered()
431 unfi = repo.unfiltered()
435 tonode = unfi.changelog.node
432 tonode = unfi.changelog.node
436 torev = unfi.changelog.rev
433 torev = unfi.changelog.rev
437 public = phases.public
434 public = phases.public
438 getphase = unfi._phasecache.phase
435 getphase = unfi._phasecache.phase
439 ispublic = (lambda r: getphase(unfi, r) == public)
436 ispublic = (lambda r: getphase(unfi, r) == public)
440 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore,
437 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore,
441 futurecommon)
438 futurecommon)
442 successorsmarkers = unfi.obsstore.successors
439 successorsmarkers = unfi.obsstore.successors
443 newhs = set() # final set of new heads
440 newhs = set() # final set of new heads
444 discarded = set() # new head of fully replaced branch
441 discarded = set() # new head of fully replaced branch
445
442
446 localcandidate = set() # candidate heads known locally
443 localcandidate = set() # candidate heads known locally
447 unknownheads = set() # candidate heads unknown locally
444 unknownheads = set() # candidate heads unknown locally
448 for h in candidate_newhs:
445 for h in candidate_newhs:
449 if h in unfi:
446 if h in unfi:
450 localcandidate.add(h)
447 localcandidate.add(h)
451 else:
448 else:
452 if successorsmarkers.get(h) is not None:
449 if successorsmarkers.get(h) is not None:
453 msg = ('checkheads: remote head unknown locally has'
450 msg = ('checkheads: remote head unknown locally has'
454 ' local marker: %s\n')
451 ' local marker: %s\n')
455 repo.ui.debug(msg % hex(h))
452 repo.ui.debug(msg % hex(h))
456 unknownheads.add(h)
453 unknownheads.add(h)
457
454
458 # fast path the simple case
455 # fast path the simple case
459 if len(localcandidate) == 1:
456 if len(localcandidate) == 1:
460 return unknownheads | set(candidate_newhs), set()
457 return unknownheads | set(candidate_newhs), set()
461
458
462 # actually process branch replacement
459 # actually process branch replacement
463 while localcandidate:
460 while localcandidate:
464 nh = localcandidate.pop()
461 nh = localcandidate.pop()
465 # run this check early to skip the evaluation of the whole branch
462 # run this check early to skip the evaluation of the whole branch
466 if (nh in futurecommon or ispublic(torev(nh))):
463 if (nh in futurecommon or ispublic(torev(nh))):
467 newhs.add(nh)
464 newhs.add(nh)
468 continue
465 continue
469
466
470 # Get all revs/nodes on the branch exclusive to this head
467 # Get all revs/nodes on the branch exclusive to this head
471 # (already filtered heads are "ignored"))
468 # (already filtered heads are "ignored"))
472 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
469 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
473 nh, localcandidate, newhs)
470 nh, localcandidate, newhs)
474 branchnodes = [tonode(r) for r in branchrevs]
471 branchnodes = [tonode(r) for r in branchrevs]
475
472
476 # The branch won't be hidden on the remote if
473 # The branch won't be hidden on the remote if
477 # * any part of it is public,
474 # * any part of it is public,
478 # * any part of it is considered part of the result by previous logic,
475 # * any part of it is considered part of the result by previous logic,
479 # * if we have no markers to push to obsolete it.
476 # * if we have no markers to push to obsolete it.
480 if (any(ispublic(r) for r in branchrevs)
477 if (any(ispublic(r) for r in branchrevs)
481 or any(n in futurecommon for n in branchnodes)
478 or any(n in futurecommon for n in branchnodes)
482 or any(not hasoutmarker(n) for n in branchnodes)):
479 or any(not hasoutmarker(n) for n in branchnodes)):
483 newhs.add(nh)
480 newhs.add(nh)
484 else:
481 else:
485 # note: there is a corner case if there is a merge in the branch.
482 # note: there is a corner case if there is a merge in the branch.
486 # we might end up with -more- heads. However, these heads are not
483 # we might end up with -more- heads. However, these heads are not
487 # "added" by the push, but more by the "removal" on the remote so I
484 # "added" by the push, but more by the "removal" on the remote so I
488 # think is a okay to ignore them,
485 # think is a okay to ignore them,
489 discarded.add(nh)
486 discarded.add(nh)
490 newhs |= unknownheads
487 newhs |= unknownheads
491 return newhs, discarded
488 return newhs, discarded
492
489
493 def pushingmarkerfor(obsstore, pushset, node):
490 def pushingmarkerfor(obsstore, pushset, node):
494 """true if some markers are to be pushed for node
491 """true if some markers are to be pushed for node
495
492
496 We cannot just look in to the pushed obsmarkers from the pushop because
493 We cannot just look in to the pushed obsmarkers from the pushop because
497 discovery might have filtered relevant markers. In addition listing all
494 discovery might have filtered relevant markers. In addition listing all
498 markers relevant to all changesets in the pushed set would be too expensive
495 markers relevant to all changesets in the pushed set would be too expensive
499 (O(len(repo)))
496 (O(len(repo)))
500
497
501 (note: There are cache opportunity in this function. but it would requires
498 (note: There are cache opportunity in this function. but it would requires
502 a two dimensional stack.)
499 a two dimensional stack.)
503 """
500 """
504 successorsmarkers = obsstore.successors
501 successorsmarkers = obsstore.successors
505 stack = [node]
502 stack = [node]
506 seen = set(stack)
503 seen = set(stack)
507 while stack:
504 while stack:
508 current = stack.pop()
505 current = stack.pop()
509 if current in pushset:
506 if current in pushset:
510 return True
507 return True
511 markers = successorsmarkers.get(current, ())
508 markers = successorsmarkers.get(current, ())
512 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
509 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
513 for m in markers:
510 for m in markers:
514 nexts = m[1] # successors
511 nexts = m[1] # successors
515 if not nexts: # this is a prune marker
512 if not nexts: # this is a prune marker
516 nexts = m[5] or () # parents
513 nexts = m[5] or () # parents
517 for n in nexts:
514 for n in nexts:
518 if n not in seen:
515 if n not in seen:
519 seen.add(n)
516 seen.add(n)
520 stack.append(n)
517 stack.append(n)
521 return False
518 return False
General Comments 0
You need to be logged in to leave comments. Login now