##// END OF EJS Templates
discovery: also use lists for the returns of '_oldheadssummary'...
marmoute -
r32671:81cbfaea default
parent child Browse files
Show More
@@ -1,519 +1,519 b''
1 # discovery.py - protocol changeset discovery functions
1 # discovery.py - protocol changeset discovery functions
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 hex,
14 hex,
15 nullid,
15 nullid,
16 short,
16 short,
17 )
17 )
18
18
19 from . import (
19 from . import (
20 bookmarks,
20 bookmarks,
21 branchmap,
21 branchmap,
22 error,
22 error,
23 phases,
23 phases,
24 setdiscovery,
24 setdiscovery,
25 treediscovery,
25 treediscovery,
26 util,
26 util,
27 )
27 )
28
28
29 def findcommonincoming(repo, remote, heads=None, force=False):
29 def findcommonincoming(repo, remote, heads=None, force=False):
30 """Return a tuple (common, anyincoming, heads) used to identify the common
30 """Return a tuple (common, anyincoming, heads) used to identify the common
31 subset of nodes between repo and remote.
31 subset of nodes between repo and remote.
32
32
33 "common" is a list of (at least) the heads of the common subset.
33 "common" is a list of (at least) the heads of the common subset.
34 "anyincoming" is testable as a boolean indicating if any nodes are missing
34 "anyincoming" is testable as a boolean indicating if any nodes are missing
35 locally. If remote does not support getbundle, this actually is a list of
35 locally. If remote does not support getbundle, this actually is a list of
36 roots of the nodes that would be incoming, to be supplied to
36 roots of the nodes that would be incoming, to be supplied to
37 changegroupsubset. No code except for pull should be relying on this fact
37 changegroupsubset. No code except for pull should be relying on this fact
38 any longer.
38 any longer.
39 "heads" is either the supplied heads, or else the remote's heads.
39 "heads" is either the supplied heads, or else the remote's heads.
40
40
41 If you pass heads and they are all known locally, the response lists just
41 If you pass heads and they are all known locally, the response lists just
42 these heads in "common" and in "heads".
42 these heads in "common" and in "heads".
43
43
44 Please use findcommonoutgoing to compute the set of outgoing nodes to give
44 Please use findcommonoutgoing to compute the set of outgoing nodes to give
45 extensions a good hook into outgoing.
45 extensions a good hook into outgoing.
46 """
46 """
47
47
48 if not remote.capable('getbundle'):
48 if not remote.capable('getbundle'):
49 return treediscovery.findcommonincoming(repo, remote, heads, force)
49 return treediscovery.findcommonincoming(repo, remote, heads, force)
50
50
51 if heads:
51 if heads:
52 allknown = True
52 allknown = True
53 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
53 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
54 for h in heads:
54 for h in heads:
55 if not knownnode(h):
55 if not knownnode(h):
56 allknown = False
56 allknown = False
57 break
57 break
58 if allknown:
58 if allknown:
59 return (heads, False, heads)
59 return (heads, False, heads)
60
60
61 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
61 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
62 abortwhenunrelated=not force)
62 abortwhenunrelated=not force)
63 common, anyinc, srvheads = res
63 common, anyinc, srvheads = res
64 return (list(common), anyinc, heads or list(srvheads))
64 return (list(common), anyinc, heads or list(srvheads))
65
65
66 class outgoing(object):
66 class outgoing(object):
67 '''Represents the set of nodes present in a local repo but not in a
67 '''Represents the set of nodes present in a local repo but not in a
68 (possibly) remote one.
68 (possibly) remote one.
69
69
70 Members:
70 Members:
71
71
72 missing is a list of all nodes present in local but not in remote.
72 missing is a list of all nodes present in local but not in remote.
73 common is a list of all nodes shared between the two repos.
73 common is a list of all nodes shared between the two repos.
74 excluded is the list of missing changeset that shouldn't be sent remotely.
74 excluded is the list of missing changeset that shouldn't be sent remotely.
75 missingheads is the list of heads of missing.
75 missingheads is the list of heads of missing.
76 commonheads is the list of heads of common.
76 commonheads is the list of heads of common.
77
77
78 The sets are computed on demand from the heads, unless provided upfront
78 The sets are computed on demand from the heads, unless provided upfront
79 by discovery.'''
79 by discovery.'''
80
80
81 def __init__(self, repo, commonheads=None, missingheads=None,
81 def __init__(self, repo, commonheads=None, missingheads=None,
82 missingroots=None):
82 missingroots=None):
83 # at least one of them must not be set
83 # at least one of them must not be set
84 assert None in (commonheads, missingroots)
84 assert None in (commonheads, missingroots)
85 cl = repo.changelog
85 cl = repo.changelog
86 if missingheads is None:
86 if missingheads is None:
87 missingheads = cl.heads()
87 missingheads = cl.heads()
88 if missingroots:
88 if missingroots:
89 discbases = []
89 discbases = []
90 for n in missingroots:
90 for n in missingroots:
91 discbases.extend([p for p in cl.parents(n) if p != nullid])
91 discbases.extend([p for p in cl.parents(n) if p != nullid])
92 # TODO remove call to nodesbetween.
92 # TODO remove call to nodesbetween.
93 # TODO populate attributes on outgoing instance instead of setting
93 # TODO populate attributes on outgoing instance instead of setting
94 # discbases.
94 # discbases.
95 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
95 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
96 included = set(csets)
96 included = set(csets)
97 missingheads = heads
97 missingheads = heads
98 commonheads = [n for n in discbases if n not in included]
98 commonheads = [n for n in discbases if n not in included]
99 elif not commonheads:
99 elif not commonheads:
100 commonheads = [nullid]
100 commonheads = [nullid]
101 self.commonheads = commonheads
101 self.commonheads = commonheads
102 self.missingheads = missingheads
102 self.missingheads = missingheads
103 self._revlog = cl
103 self._revlog = cl
104 self._common = None
104 self._common = None
105 self._missing = None
105 self._missing = None
106 self.excluded = []
106 self.excluded = []
107
107
108 def _computecommonmissing(self):
108 def _computecommonmissing(self):
109 sets = self._revlog.findcommonmissing(self.commonheads,
109 sets = self._revlog.findcommonmissing(self.commonheads,
110 self.missingheads)
110 self.missingheads)
111 self._common, self._missing = sets
111 self._common, self._missing = sets
112
112
113 @util.propertycache
113 @util.propertycache
114 def common(self):
114 def common(self):
115 if self._common is None:
115 if self._common is None:
116 self._computecommonmissing()
116 self._computecommonmissing()
117 return self._common
117 return self._common
118
118
119 @util.propertycache
119 @util.propertycache
120 def missing(self):
120 def missing(self):
121 if self._missing is None:
121 if self._missing is None:
122 self._computecommonmissing()
122 self._computecommonmissing()
123 return self._missing
123 return self._missing
124
124
125 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
125 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
126 commoninc=None, portable=False):
126 commoninc=None, portable=False):
127 '''Return an outgoing instance to identify the nodes present in repo but
127 '''Return an outgoing instance to identify the nodes present in repo but
128 not in other.
128 not in other.
129
129
130 If onlyheads is given, only nodes ancestral to nodes in onlyheads
130 If onlyheads is given, only nodes ancestral to nodes in onlyheads
131 (inclusive) are included. If you already know the local repo's heads,
131 (inclusive) are included. If you already know the local repo's heads,
132 passing them in onlyheads is faster than letting them be recomputed here.
132 passing them in onlyheads is faster than letting them be recomputed here.
133
133
134 If commoninc is given, it must be the result of a prior call to
134 If commoninc is given, it must be the result of a prior call to
135 findcommonincoming(repo, other, force) to avoid recomputing it here.
135 findcommonincoming(repo, other, force) to avoid recomputing it here.
136
136
137 If portable is given, compute more conservative common and missingheads,
137 If portable is given, compute more conservative common and missingheads,
138 to make bundles created from the instance more portable.'''
138 to make bundles created from the instance more portable.'''
139 # declare an empty outgoing object to be filled later
139 # declare an empty outgoing object to be filled later
140 og = outgoing(repo, None, None)
140 og = outgoing(repo, None, None)
141
141
142 # get common set if not provided
142 # get common set if not provided
143 if commoninc is None:
143 if commoninc is None:
144 commoninc = findcommonincoming(repo, other, force=force)
144 commoninc = findcommonincoming(repo, other, force=force)
145 og.commonheads, _any, _hds = commoninc
145 og.commonheads, _any, _hds = commoninc
146
146
147 # compute outgoing
147 # compute outgoing
148 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
148 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
149 if not mayexclude:
149 if not mayexclude:
150 og.missingheads = onlyheads or repo.heads()
150 og.missingheads = onlyheads or repo.heads()
151 elif onlyheads is None:
151 elif onlyheads is None:
152 # use visible heads as it should be cached
152 # use visible heads as it should be cached
153 og.missingheads = repo.filtered("served").heads()
153 og.missingheads = repo.filtered("served").heads()
154 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
154 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
155 else:
155 else:
156 # compute common, missing and exclude secret stuff
156 # compute common, missing and exclude secret stuff
157 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
157 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
158 og._common, allmissing = sets
158 og._common, allmissing = sets
159 og._missing = missing = []
159 og._missing = missing = []
160 og.excluded = excluded = []
160 og.excluded = excluded = []
161 for node in allmissing:
161 for node in allmissing:
162 ctx = repo[node]
162 ctx = repo[node]
163 if ctx.phase() >= phases.secret or ctx.extinct():
163 if ctx.phase() >= phases.secret or ctx.extinct():
164 excluded.append(node)
164 excluded.append(node)
165 else:
165 else:
166 missing.append(node)
166 missing.append(node)
167 if len(missing) == len(allmissing):
167 if len(missing) == len(allmissing):
168 missingheads = onlyheads
168 missingheads = onlyheads
169 else: # update missing heads
169 else: # update missing heads
170 missingheads = phases.newheads(repo, onlyheads, excluded)
170 missingheads = phases.newheads(repo, onlyheads, excluded)
171 og.missingheads = missingheads
171 og.missingheads = missingheads
172 if portable:
172 if portable:
173 # recompute common and missingheads as if -r<rev> had been given for
173 # recompute common and missingheads as if -r<rev> had been given for
174 # each head of missing, and --base <rev> for each head of the proper
174 # each head of missing, and --base <rev> for each head of the proper
175 # ancestors of missing
175 # ancestors of missing
176 og._computecommonmissing()
176 og._computecommonmissing()
177 cl = repo.changelog
177 cl = repo.changelog
178 missingrevs = set(cl.rev(n) for n in og._missing)
178 missingrevs = set(cl.rev(n) for n in og._missing)
179 og._common = set(cl.ancestors(missingrevs)) - missingrevs
179 og._common = set(cl.ancestors(missingrevs)) - missingrevs
180 commonheads = set(og.commonheads)
180 commonheads = set(og.commonheads)
181 og.missingheads = [h for h in og.missingheads if h not in commonheads]
181 og.missingheads = [h for h in og.missingheads if h not in commonheads]
182
182
183 return og
183 return og
184
184
185 def _headssummary(repo, remote, outgoing):
185 def _headssummary(repo, remote, outgoing):
186 """compute a summary of branch and heads status before and after push
186 """compute a summary of branch and heads status before and after push
187
187
188 return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
188 return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
189
189
190 - branch: the branch name
190 - branch: the branch name
191 - remoteheads: the list of remote heads known locally
191 - remoteheads: the list of remote heads known locally
192 None if the branch is new
192 None if the branch is new
193 - newheads: the new remote heads (known locally) with outgoing pushed
193 - newheads: the new remote heads (known locally) with outgoing pushed
194 - unsyncedheads: the list of remote heads unknown locally.
194 - unsyncedheads: the list of remote heads unknown locally.
195 """
195 """
196 cl = repo.changelog
196 cl = repo.changelog
197 headssum = {}
197 headssum = {}
198 # A. Create set of branches involved in the push.
198 # A. Create set of branches involved in the push.
199 branches = set(repo[n].branch() for n in outgoing.missing)
199 branches = set(repo[n].branch() for n in outgoing.missing)
200 remotemap = remote.branchmap()
200 remotemap = remote.branchmap()
201 newbranches = branches - set(remotemap)
201 newbranches = branches - set(remotemap)
202 branches.difference_update(newbranches)
202 branches.difference_update(newbranches)
203
203
204 # A. register remote heads
204 # A. register remote heads
205 remotebranches = set()
205 remotebranches = set()
206 for branch, heads in remote.branchmap().iteritems():
206 for branch, heads in remote.branchmap().iteritems():
207 remotebranches.add(branch)
207 remotebranches.add(branch)
208 known = []
208 known = []
209 unsynced = []
209 unsynced = []
210 knownnode = cl.hasnode # do not use nodemap until it is filtered
210 knownnode = cl.hasnode # do not use nodemap until it is filtered
211 for h in heads:
211 for h in heads:
212 if knownnode(h):
212 if knownnode(h):
213 known.append(h)
213 known.append(h)
214 else:
214 else:
215 unsynced.append(h)
215 unsynced.append(h)
216 headssum[branch] = (known, list(known), unsynced)
216 headssum[branch] = (known, list(known), unsynced)
217 # B. add new branch data
217 # B. add new branch data
218 missingctx = list(repo[n] for n in outgoing.missing)
218 missingctx = list(repo[n] for n in outgoing.missing)
219 touchedbranches = set()
219 touchedbranches = set()
220 for ctx in missingctx:
220 for ctx in missingctx:
221 branch = ctx.branch()
221 branch = ctx.branch()
222 touchedbranches.add(branch)
222 touchedbranches.add(branch)
223 if branch not in headssum:
223 if branch not in headssum:
224 headssum[branch] = (None, [], [])
224 headssum[branch] = (None, [], [])
225
225
226 # C drop data about untouched branches:
226 # C drop data about untouched branches:
227 for branch in remotebranches - touchedbranches:
227 for branch in remotebranches - touchedbranches:
228 del headssum[branch]
228 del headssum[branch]
229
229
230 # D. Update newmap with outgoing changes.
230 # D. Update newmap with outgoing changes.
231 # This will possibly add new heads and remove existing ones.
231 # This will possibly add new heads and remove existing ones.
232 newmap = branchmap.branchcache((branch, heads[1])
232 newmap = branchmap.branchcache((branch, heads[1])
233 for branch, heads in headssum.iteritems()
233 for branch, heads in headssum.iteritems()
234 if heads[0] is not None)
234 if heads[0] is not None)
235 newmap.update(repo, (ctx.rev() for ctx in missingctx))
235 newmap.update(repo, (ctx.rev() for ctx in missingctx))
236 for branch, newheads in newmap.iteritems():
236 for branch, newheads in newmap.iteritems():
237 headssum[branch][1][:] = newheads
237 headssum[branch][1][:] = newheads
238 return headssum
238 return headssum
239
239
240 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
240 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
241 """Compute branchmapsummary for repo without branchmap support"""
241 """Compute branchmapsummary for repo without branchmap support"""
242
242
243 # 1-4b. old servers: Check for new topological heads.
243 # 1-4b. old servers: Check for new topological heads.
244 # Construct {old,new}map with branch = None (topological branch).
244 # Construct {old,new}map with branch = None (topological branch).
245 # (code based on update)
245 # (code based on update)
246 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
246 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
247 oldheads = set(h for h in remoteheads if knownnode(h))
247 oldheads = list(h for h in remoteheads if knownnode(h))
248 # all nodes in outgoing.missing are children of either:
248 # all nodes in outgoing.missing are children of either:
249 # - an element of oldheads
249 # - an element of oldheads
250 # - another element of outgoing.missing
250 # - another element of outgoing.missing
251 # - nullrev
251 # - nullrev
252 # This explains why the new head are very simple to compute.
252 # This explains why the new head are very simple to compute.
253 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
253 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
254 newheads = list(c.node() for c in r)
254 newheads = list(c.node() for c in r)
255 # set some unsynced head to issue the "unsynced changes" warning
255 # set some unsynced head to issue the "unsynced changes" warning
256 if inc:
256 if inc:
257 unsynced = {None}
257 unsynced = [None]
258 else:
258 else:
259 unsynced = set()
259 unsynced = []
260 return {None: (oldheads, newheads, unsynced)}
260 return {None: (oldheads, newheads, unsynced)}
261
261
262 def _nowarnheads(pushop):
262 def _nowarnheads(pushop):
263 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
263 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
264 repo = pushop.repo.unfiltered()
264 repo = pushop.repo.unfiltered()
265 remote = pushop.remote
265 remote = pushop.remote
266 localbookmarks = repo._bookmarks
266 localbookmarks = repo._bookmarks
267 remotebookmarks = remote.listkeys('bookmarks')
267 remotebookmarks = remote.listkeys('bookmarks')
268 bookmarkedheads = set()
268 bookmarkedheads = set()
269
269
270 # internal config: bookmarks.pushing
270 # internal config: bookmarks.pushing
271 newbookmarks = [localbookmarks.expandname(b)
271 newbookmarks = [localbookmarks.expandname(b)
272 for b in pushop.ui.configlist('bookmarks', 'pushing')]
272 for b in pushop.ui.configlist('bookmarks', 'pushing')]
273
273
274 for bm in localbookmarks:
274 for bm in localbookmarks:
275 rnode = remotebookmarks.get(bm)
275 rnode = remotebookmarks.get(bm)
276 if rnode and rnode in repo:
276 if rnode and rnode in repo:
277 lctx, rctx = repo[bm], repo[rnode]
277 lctx, rctx = repo[bm], repo[rnode]
278 if bookmarks.validdest(repo, rctx, lctx):
278 if bookmarks.validdest(repo, rctx, lctx):
279 bookmarkedheads.add(lctx.node())
279 bookmarkedheads.add(lctx.node())
280 else:
280 else:
281 if bm in newbookmarks and bm not in remotebookmarks:
281 if bm in newbookmarks and bm not in remotebookmarks:
282 bookmarkedheads.add(repo[bm].node())
282 bookmarkedheads.add(repo[bm].node())
283
283
284 return bookmarkedheads
284 return bookmarkedheads
285
285
286 def checkheads(pushop):
286 def checkheads(pushop):
287 """Check that a push won't add any outgoing head
287 """Check that a push won't add any outgoing head
288
288
289 raise Abort error and display ui message as needed.
289 raise Abort error and display ui message as needed.
290 """
290 """
291
291
292 repo = pushop.repo.unfiltered()
292 repo = pushop.repo.unfiltered()
293 remote = pushop.remote
293 remote = pushop.remote
294 outgoing = pushop.outgoing
294 outgoing = pushop.outgoing
295 remoteheads = pushop.remoteheads
295 remoteheads = pushop.remoteheads
296 newbranch = pushop.newbranch
296 newbranch = pushop.newbranch
297 inc = bool(pushop.incoming)
297 inc = bool(pushop.incoming)
298
298
299 # Check for each named branch if we're creating new remote heads.
299 # Check for each named branch if we're creating new remote heads.
300 # To be a remote head after push, node must be either:
300 # To be a remote head after push, node must be either:
301 # - unknown locally
301 # - unknown locally
302 # - a local outgoing head descended from update
302 # - a local outgoing head descended from update
303 # - a remote head that's known locally and not
303 # - a remote head that's known locally and not
304 # ancestral to an outgoing head
304 # ancestral to an outgoing head
305 if remoteheads == [nullid]:
305 if remoteheads == [nullid]:
306 # remote is empty, nothing to check.
306 # remote is empty, nothing to check.
307 return
307 return
308
308
309 if remote.capable('branchmap'):
309 if remote.capable('branchmap'):
310 headssum = _headssummary(repo, remote, outgoing)
310 headssum = _headssummary(repo, remote, outgoing)
311 else:
311 else:
312 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
312 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
313 newbranches = [branch for branch, heads in headssum.iteritems()
313 newbranches = [branch for branch, heads in headssum.iteritems()
314 if heads[0] is None]
314 if heads[0] is None]
315 # 1. Check for new branches on the remote.
315 # 1. Check for new branches on the remote.
316 if newbranches and not newbranch: # new branch requires --new-branch
316 if newbranches and not newbranch: # new branch requires --new-branch
317 branchnames = ', '.join(sorted(newbranches))
317 branchnames = ', '.join(sorted(newbranches))
318 raise error.Abort(_("push creates new remote branches: %s!")
318 raise error.Abort(_("push creates new remote branches: %s!")
319 % branchnames,
319 % branchnames,
320 hint=_("use 'hg push --new-branch' to create"
320 hint=_("use 'hg push --new-branch' to create"
321 " new remote branches"))
321 " new remote branches"))
322
322
323 # 2. Find heads that we need not warn about
323 # 2. Find heads that we need not warn about
324 nowarnheads = _nowarnheads(pushop)
324 nowarnheads = _nowarnheads(pushop)
325
325
326 # 3. Check for new heads.
326 # 3. Check for new heads.
327 # If there are more heads after the push than before, a suitable
327 # If there are more heads after the push than before, a suitable
328 # error message, depending on unsynced status, is displayed.
328 # error message, depending on unsynced status, is displayed.
329 errormsg = None
329 errormsg = None
330 # If there is no obsstore, allfuturecommon won't be used, so no
330 # If there is no obsstore, allfuturecommon won't be used, so no
331 # need to compute it.
331 # need to compute it.
332 if repo.obsstore:
332 if repo.obsstore:
333 allmissing = set(outgoing.missing)
333 allmissing = set(outgoing.missing)
334 cctx = repo.set('%ld', outgoing.common)
334 cctx = repo.set('%ld', outgoing.common)
335 allfuturecommon = set(c.node() for c in cctx)
335 allfuturecommon = set(c.node() for c in cctx)
336 allfuturecommon.update(allmissing)
336 allfuturecommon.update(allmissing)
337 for branch, heads in sorted(headssum.iteritems()):
337 for branch, heads in sorted(headssum.iteritems()):
338 remoteheads, newheads, unsyncedheads = heads
338 remoteheads, newheads, unsyncedheads = heads
339 candidate_newhs = set(newheads)
339 candidate_newhs = set(newheads)
340 # add unsynced data
340 # add unsynced data
341 if remoteheads is None:
341 if remoteheads is None:
342 oldhs = set()
342 oldhs = set()
343 else:
343 else:
344 oldhs = set(remoteheads)
344 oldhs = set(remoteheads)
345 oldhs.update(unsyncedheads)
345 oldhs.update(unsyncedheads)
346 candidate_newhs.update(unsyncedheads)
346 candidate_newhs.update(unsyncedheads)
347 dhs = None # delta heads, the new heads on branch
347 dhs = None # delta heads, the new heads on branch
348 if not repo.obsstore:
348 if not repo.obsstore:
349 discardedheads = set()
349 discardedheads = set()
350 newhs = candidate_newhs
350 newhs = candidate_newhs
351 else:
351 else:
352 newhs, discardedheads = _postprocessobsolete(pushop,
352 newhs, discardedheads = _postprocessobsolete(pushop,
353 allfuturecommon,
353 allfuturecommon,
354 candidate_newhs)
354 candidate_newhs)
355 unsynced = sorted(h for h in unsyncedheads if h not in discardedheads)
355 unsynced = sorted(h for h in unsyncedheads if h not in discardedheads)
356 if unsynced:
356 if unsynced:
357 if None in unsynced:
357 if None in unsynced:
358 # old remote, no heads data
358 # old remote, no heads data
359 heads = None
359 heads = None
360 elif len(unsynced) <= 4 or repo.ui.verbose:
360 elif len(unsynced) <= 4 or repo.ui.verbose:
361 heads = ' '.join(short(h) for h in unsynced)
361 heads = ' '.join(short(h) for h in unsynced)
362 else:
362 else:
363 heads = (' '.join(short(h) for h in unsynced[:4]) +
363 heads = (' '.join(short(h) for h in unsynced[:4]) +
364 ' ' + _("and %s others") % (len(unsynced) - 4))
364 ' ' + _("and %s others") % (len(unsynced) - 4))
365 if heads is None:
365 if heads is None:
366 repo.ui.status(_("remote has heads that are "
366 repo.ui.status(_("remote has heads that are "
367 "not known locally\n"))
367 "not known locally\n"))
368 elif branch is None:
368 elif branch is None:
369 repo.ui.status(_("remote has heads that are "
369 repo.ui.status(_("remote has heads that are "
370 "not known locally: %s\n") % heads)
370 "not known locally: %s\n") % heads)
371 else:
371 else:
372 repo.ui.status(_("remote has heads on branch '%s' that are "
372 repo.ui.status(_("remote has heads on branch '%s' that are "
373 "not known locally: %s\n") % (branch, heads))
373 "not known locally: %s\n") % (branch, heads))
374 if remoteheads is None:
374 if remoteheads is None:
375 if len(newhs) > 1:
375 if len(newhs) > 1:
376 dhs = list(newhs)
376 dhs = list(newhs)
377 if errormsg is None:
377 if errormsg is None:
378 errormsg = (_("push creates new branch '%s' "
378 errormsg = (_("push creates new branch '%s' "
379 "with multiple heads") % (branch))
379 "with multiple heads") % (branch))
380 hint = _("merge or"
380 hint = _("merge or"
381 " see 'hg help push' for details about"
381 " see 'hg help push' for details about"
382 " pushing new heads")
382 " pushing new heads")
383 elif len(newhs) > len(oldhs):
383 elif len(newhs) > len(oldhs):
384 # remove bookmarked or existing remote heads from the new heads list
384 # remove bookmarked or existing remote heads from the new heads list
385 dhs = sorted(newhs - nowarnheads - oldhs)
385 dhs = sorted(newhs - nowarnheads - oldhs)
386 if dhs:
386 if dhs:
387 if errormsg is None:
387 if errormsg is None:
388 if branch not in ('default', None):
388 if branch not in ('default', None):
389 errormsg = _("push creates new remote head %s "
389 errormsg = _("push creates new remote head %s "
390 "on branch '%s'!") % (short(dhs[0]), branch)
390 "on branch '%s'!") % (short(dhs[0]), branch)
391 elif repo[dhs[0]].bookmarks():
391 elif repo[dhs[0]].bookmarks():
392 errormsg = _("push creates new remote head %s "
392 errormsg = _("push creates new remote head %s "
393 "with bookmark '%s'!") % (
393 "with bookmark '%s'!") % (
394 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
394 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
395 else:
395 else:
396 errormsg = _("push creates new remote head %s!"
396 errormsg = _("push creates new remote head %s!"
397 ) % short(dhs[0])
397 ) % short(dhs[0])
398 if unsyncedheads:
398 if unsyncedheads:
399 hint = _("pull and merge or"
399 hint = _("pull and merge or"
400 " see 'hg help push' for details about"
400 " see 'hg help push' for details about"
401 " pushing new heads")
401 " pushing new heads")
402 else:
402 else:
403 hint = _("merge or"
403 hint = _("merge or"
404 " see 'hg help push' for details about"
404 " see 'hg help push' for details about"
405 " pushing new heads")
405 " pushing new heads")
406 if branch is None:
406 if branch is None:
407 repo.ui.note(_("new remote heads:\n"))
407 repo.ui.note(_("new remote heads:\n"))
408 else:
408 else:
409 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
409 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
410 for h in dhs:
410 for h in dhs:
411 repo.ui.note((" %s\n") % short(h))
411 repo.ui.note((" %s\n") % short(h))
412 if errormsg:
412 if errormsg:
413 raise error.Abort(errormsg, hint=hint)
413 raise error.Abort(errormsg, hint=hint)
414
414
415 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
415 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
416 """post process the list of new heads with obsolescence information
416 """post process the list of new heads with obsolescence information
417
417
418 Exists as a sub-function to contain the complexity and allow extensions to
418 Exists as a sub-function to contain the complexity and allow extensions to
419 experiment with smarter logic.
419 experiment with smarter logic.
420
420
421 Returns (newheads, discarded_heads) tuple
421 Returns (newheads, discarded_heads) tuple
422 """
422 """
423 # known issue
423 # known issue
424 #
424 #
425 # * We "silently" skip processing on all changeset unknown locally
425 # * We "silently" skip processing on all changeset unknown locally
426 #
426 #
427 # * if <nh> is public on the remote, it won't be affected by obsolete
427 # * if <nh> is public on the remote, it won't be affected by obsolete
428 # marker and a new is created
428 # marker and a new is created
429
429
430 # define various utilities and containers
430 # define various utilities and containers
431 repo = pushop.repo
431 repo = pushop.repo
432 unfi = repo.unfiltered()
432 unfi = repo.unfiltered()
433 tonode = unfi.changelog.node
433 tonode = unfi.changelog.node
434 torev = unfi.changelog.rev
434 torev = unfi.changelog.rev
435 public = phases.public
435 public = phases.public
436 getphase = unfi._phasecache.phase
436 getphase = unfi._phasecache.phase
437 ispublic = (lambda r: getphase(unfi, r) == public)
437 ispublic = (lambda r: getphase(unfi, r) == public)
438 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore,
438 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore,
439 futurecommon)
439 futurecommon)
440 successorsmarkers = unfi.obsstore.successors
440 successorsmarkers = unfi.obsstore.successors
441 newhs = set() # final set of new heads
441 newhs = set() # final set of new heads
442 discarded = set() # new head of fully replaced branch
442 discarded = set() # new head of fully replaced branch
443
443
444 localcandidate = set() # candidate heads known locally
444 localcandidate = set() # candidate heads known locally
445 unknownheads = set() # candidate heads unknown locally
445 unknownheads = set() # candidate heads unknown locally
446 for h in candidate_newhs:
446 for h in candidate_newhs:
447 if h in unfi:
447 if h in unfi:
448 localcandidate.add(h)
448 localcandidate.add(h)
449 else:
449 else:
450 if successorsmarkers.get(h) is not None:
450 if successorsmarkers.get(h) is not None:
451 msg = ('checkheads: remote head unknown locally has'
451 msg = ('checkheads: remote head unknown locally has'
452 ' local marker: %s\n')
452 ' local marker: %s\n')
453 repo.ui.debug(msg % hex(h))
453 repo.ui.debug(msg % hex(h))
454 unknownheads.add(h)
454 unknownheads.add(h)
455
455
456 # fast path the simple case
456 # fast path the simple case
457 if len(localcandidate) == 1:
457 if len(localcandidate) == 1:
458 return unknownheads | set(candidate_newhs), set()
458 return unknownheads | set(candidate_newhs), set()
459
459
460 # actually process branch replacement
460 # actually process branch replacement
461 while localcandidate:
461 while localcandidate:
462 nh = localcandidate.pop()
462 nh = localcandidate.pop()
463 # run this check early to skip the evaluation of the whole branch
463 # run this check early to skip the evaluation of the whole branch
464 if (nh in futurecommon or ispublic(torev(nh))):
464 if (nh in futurecommon or ispublic(torev(nh))):
465 newhs.add(nh)
465 newhs.add(nh)
466 continue
466 continue
467
467
468 # Get all revs/nodes on the branch exclusive to this head
468 # Get all revs/nodes on the branch exclusive to this head
469 # (already filtered heads are "ignored"))
469 # (already filtered heads are "ignored"))
470 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
470 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
471 nh, localcandidate, newhs)
471 nh, localcandidate, newhs)
472 branchnodes = [tonode(r) for r in branchrevs]
472 branchnodes = [tonode(r) for r in branchrevs]
473
473
474 # The branch won't be hidden on the remote if
474 # The branch won't be hidden on the remote if
475 # * any part of it is public,
475 # * any part of it is public,
476 # * any part of it is considered part of the result by previous logic,
476 # * any part of it is considered part of the result by previous logic,
477 # * if we have no markers to push to obsolete it.
477 # * if we have no markers to push to obsolete it.
478 if (any(ispublic(r) for r in branchrevs)
478 if (any(ispublic(r) for r in branchrevs)
479 or any(n in futurecommon for n in branchnodes)
479 or any(n in futurecommon for n in branchnodes)
480 or any(not hasoutmarker(n) for n in branchnodes)):
480 or any(not hasoutmarker(n) for n in branchnodes)):
481 newhs.add(nh)
481 newhs.add(nh)
482 else:
482 else:
483 # note: there is a corner case if there is a merge in the branch.
483 # note: there is a corner case if there is a merge in the branch.
484 # we might end up with -more- heads. However, these heads are not
484 # we might end up with -more- heads. However, these heads are not
485 # "added" by the push, but more by the "removal" on the remote so I
485 # "added" by the push, but more by the "removal" on the remote so I
486 # think is a okay to ignore them,
486 # think is a okay to ignore them,
487 discarded.add(nh)
487 discarded.add(nh)
488 newhs |= unknownheads
488 newhs |= unknownheads
489 return newhs, discarded
489 return newhs, discarded
490
490
491 def pushingmarkerfor(obsstore, pushset, node):
491 def pushingmarkerfor(obsstore, pushset, node):
492 """true if some markers are to be pushed for node
492 """true if some markers are to be pushed for node
493
493
494 We cannot just look in to the pushed obsmarkers from the pushop because
494 We cannot just look in to the pushed obsmarkers from the pushop because
495 discovery might have filtered relevant markers. In addition listing all
495 discovery might have filtered relevant markers. In addition listing all
496 markers relevant to all changesets in the pushed set would be too expensive
496 markers relevant to all changesets in the pushed set would be too expensive
497 (O(len(repo)))
497 (O(len(repo)))
498
498
499 (note: There are cache opportunity in this function. but it would requires
499 (note: There are cache opportunity in this function. but it would requires
500 a two dimensional stack.)
500 a two dimensional stack.)
501 """
501 """
502 successorsmarkers = obsstore.successors
502 successorsmarkers = obsstore.successors
503 stack = [node]
503 stack = [node]
504 seen = set(stack)
504 seen = set(stack)
505 while stack:
505 while stack:
506 current = stack.pop()
506 current = stack.pop()
507 if current in pushset:
507 if current in pushset:
508 return True
508 return True
509 markers = successorsmarkers.get(current, ())
509 markers = successorsmarkers.get(current, ())
510 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
510 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
511 for m in markers:
511 for m in markers:
512 nexts = m[1] # successors
512 nexts = m[1] # successors
513 if not nexts: # this is a prune marker
513 if not nexts: # this is a prune marker
514 nexts = m[5] or () # parents
514 nexts = m[5] or () # parents
515 for n in nexts:
515 for n in nexts:
516 if n not in seen:
516 if n not in seen:
517 seen.add(n)
517 seen.add(n)
518 stack.append(n)
518 stack.append(n)
519 return False
519 return False
General Comments 0
You need to be logged in to leave comments. Login now