##// END OF EJS Templates
checkheads: use 'nodemap.get' to convert nodes to revs...
marmoute -
r32790:d4b54687 default
parent child Browse files
Show More
@@ -1,527 +1,527 b''
1 # discovery.py - protocol changeset discovery functions
1 # discovery.py - protocol changeset discovery functions
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 hex,
14 hex,
15 nullid,
15 nullid,
16 short,
16 short,
17 )
17 )
18
18
19 from . import (
19 from . import (
20 bookmarks,
20 bookmarks,
21 branchmap,
21 branchmap,
22 error,
22 error,
23 phases,
23 phases,
24 setdiscovery,
24 setdiscovery,
25 treediscovery,
25 treediscovery,
26 util,
26 util,
27 )
27 )
28
28
29 def findcommonincoming(repo, remote, heads=None, force=False):
29 def findcommonincoming(repo, remote, heads=None, force=False):
30 """Return a tuple (common, anyincoming, heads) used to identify the common
30 """Return a tuple (common, anyincoming, heads) used to identify the common
31 subset of nodes between repo and remote.
31 subset of nodes between repo and remote.
32
32
33 "common" is a list of (at least) the heads of the common subset.
33 "common" is a list of (at least) the heads of the common subset.
34 "anyincoming" is testable as a boolean indicating if any nodes are missing
34 "anyincoming" is testable as a boolean indicating if any nodes are missing
35 locally. If remote does not support getbundle, this actually is a list of
35 locally. If remote does not support getbundle, this actually is a list of
36 roots of the nodes that would be incoming, to be supplied to
36 roots of the nodes that would be incoming, to be supplied to
37 changegroupsubset. No code except for pull should be relying on this fact
37 changegroupsubset. No code except for pull should be relying on this fact
38 any longer.
38 any longer.
39 "heads" is either the supplied heads, or else the remote's heads.
39 "heads" is either the supplied heads, or else the remote's heads.
40
40
41 If you pass heads and they are all known locally, the response lists just
41 If you pass heads and they are all known locally, the response lists just
42 these heads in "common" and in "heads".
42 these heads in "common" and in "heads".
43
43
44 Please use findcommonoutgoing to compute the set of outgoing nodes to give
44 Please use findcommonoutgoing to compute the set of outgoing nodes to give
45 extensions a good hook into outgoing.
45 extensions a good hook into outgoing.
46 """
46 """
47
47
48 if not remote.capable('getbundle'):
48 if not remote.capable('getbundle'):
49 return treediscovery.findcommonincoming(repo, remote, heads, force)
49 return treediscovery.findcommonincoming(repo, remote, heads, force)
50
50
51 if heads:
51 if heads:
52 allknown = True
52 allknown = True
53 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
53 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
54 for h in heads:
54 for h in heads:
55 if not knownnode(h):
55 if not knownnode(h):
56 allknown = False
56 allknown = False
57 break
57 break
58 if allknown:
58 if allknown:
59 return (heads, False, heads)
59 return (heads, False, heads)
60
60
61 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
61 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
62 abortwhenunrelated=not force)
62 abortwhenunrelated=not force)
63 common, anyinc, srvheads = res
63 common, anyinc, srvheads = res
64 return (list(common), anyinc, heads or list(srvheads))
64 return (list(common), anyinc, heads or list(srvheads))
65
65
66 class outgoing(object):
66 class outgoing(object):
67 '''Represents the set of nodes present in a local repo but not in a
67 '''Represents the set of nodes present in a local repo but not in a
68 (possibly) remote one.
68 (possibly) remote one.
69
69
70 Members:
70 Members:
71
71
72 missing is a list of all nodes present in local but not in remote.
72 missing is a list of all nodes present in local but not in remote.
73 common is a list of all nodes shared between the two repos.
73 common is a list of all nodes shared between the two repos.
74 excluded is the list of missing changeset that shouldn't be sent remotely.
74 excluded is the list of missing changeset that shouldn't be sent remotely.
75 missingheads is the list of heads of missing.
75 missingheads is the list of heads of missing.
76 commonheads is the list of heads of common.
76 commonheads is the list of heads of common.
77
77
78 The sets are computed on demand from the heads, unless provided upfront
78 The sets are computed on demand from the heads, unless provided upfront
79 by discovery.'''
79 by discovery.'''
80
80
81 def __init__(self, repo, commonheads=None, missingheads=None,
81 def __init__(self, repo, commonheads=None, missingheads=None,
82 missingroots=None):
82 missingroots=None):
83 # at least one of them must not be set
83 # at least one of them must not be set
84 assert None in (commonheads, missingroots)
84 assert None in (commonheads, missingroots)
85 cl = repo.changelog
85 cl = repo.changelog
86 if missingheads is None:
86 if missingheads is None:
87 missingheads = cl.heads()
87 missingheads = cl.heads()
88 if missingroots:
88 if missingroots:
89 discbases = []
89 discbases = []
90 for n in missingroots:
90 for n in missingroots:
91 discbases.extend([p for p in cl.parents(n) if p != nullid])
91 discbases.extend([p for p in cl.parents(n) if p != nullid])
92 # TODO remove call to nodesbetween.
92 # TODO remove call to nodesbetween.
93 # TODO populate attributes on outgoing instance instead of setting
93 # TODO populate attributes on outgoing instance instead of setting
94 # discbases.
94 # discbases.
95 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
95 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
96 included = set(csets)
96 included = set(csets)
97 missingheads = heads
97 missingheads = heads
98 commonheads = [n for n in discbases if n not in included]
98 commonheads = [n for n in discbases if n not in included]
99 elif not commonheads:
99 elif not commonheads:
100 commonheads = [nullid]
100 commonheads = [nullid]
101 self.commonheads = commonheads
101 self.commonheads = commonheads
102 self.missingheads = missingheads
102 self.missingheads = missingheads
103 self._revlog = cl
103 self._revlog = cl
104 self._common = None
104 self._common = None
105 self._missing = None
105 self._missing = None
106 self.excluded = []
106 self.excluded = []
107
107
108 def _computecommonmissing(self):
108 def _computecommonmissing(self):
109 sets = self._revlog.findcommonmissing(self.commonheads,
109 sets = self._revlog.findcommonmissing(self.commonheads,
110 self.missingheads)
110 self.missingheads)
111 self._common, self._missing = sets
111 self._common, self._missing = sets
112
112
113 @util.propertycache
113 @util.propertycache
114 def common(self):
114 def common(self):
115 if self._common is None:
115 if self._common is None:
116 self._computecommonmissing()
116 self._computecommonmissing()
117 return self._common
117 return self._common
118
118
119 @util.propertycache
119 @util.propertycache
120 def missing(self):
120 def missing(self):
121 if self._missing is None:
121 if self._missing is None:
122 self._computecommonmissing()
122 self._computecommonmissing()
123 return self._missing
123 return self._missing
124
124
125 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
125 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
126 commoninc=None, portable=False):
126 commoninc=None, portable=False):
127 '''Return an outgoing instance to identify the nodes present in repo but
127 '''Return an outgoing instance to identify the nodes present in repo but
128 not in other.
128 not in other.
129
129
130 If onlyheads is given, only nodes ancestral to nodes in onlyheads
130 If onlyheads is given, only nodes ancestral to nodes in onlyheads
131 (inclusive) are included. If you already know the local repo's heads,
131 (inclusive) are included. If you already know the local repo's heads,
132 passing them in onlyheads is faster than letting them be recomputed here.
132 passing them in onlyheads is faster than letting them be recomputed here.
133
133
134 If commoninc is given, it must be the result of a prior call to
134 If commoninc is given, it must be the result of a prior call to
135 findcommonincoming(repo, other, force) to avoid recomputing it here.
135 findcommonincoming(repo, other, force) to avoid recomputing it here.
136
136
137 If portable is given, compute more conservative common and missingheads,
137 If portable is given, compute more conservative common and missingheads,
138 to make bundles created from the instance more portable.'''
138 to make bundles created from the instance more portable.'''
139 # declare an empty outgoing object to be filled later
139 # declare an empty outgoing object to be filled later
140 og = outgoing(repo, None, None)
140 og = outgoing(repo, None, None)
141
141
142 # get common set if not provided
142 # get common set if not provided
143 if commoninc is None:
143 if commoninc is None:
144 commoninc = findcommonincoming(repo, other, force=force)
144 commoninc = findcommonincoming(repo, other, force=force)
145 og.commonheads, _any, _hds = commoninc
145 og.commonheads, _any, _hds = commoninc
146
146
147 # compute outgoing
147 # compute outgoing
148 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
148 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
149 if not mayexclude:
149 if not mayexclude:
150 og.missingheads = onlyheads or repo.heads()
150 og.missingheads = onlyheads or repo.heads()
151 elif onlyheads is None:
151 elif onlyheads is None:
152 # use visible heads as it should be cached
152 # use visible heads as it should be cached
153 og.missingheads = repo.filtered("served").heads()
153 og.missingheads = repo.filtered("served").heads()
154 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
154 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
155 else:
155 else:
156 # compute common, missing and exclude secret stuff
156 # compute common, missing and exclude secret stuff
157 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
157 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
158 og._common, allmissing = sets
158 og._common, allmissing = sets
159 og._missing = missing = []
159 og._missing = missing = []
160 og.excluded = excluded = []
160 og.excluded = excluded = []
161 for node in allmissing:
161 for node in allmissing:
162 ctx = repo[node]
162 ctx = repo[node]
163 if ctx.phase() >= phases.secret or ctx.extinct():
163 if ctx.phase() >= phases.secret or ctx.extinct():
164 excluded.append(node)
164 excluded.append(node)
165 else:
165 else:
166 missing.append(node)
166 missing.append(node)
167 if len(missing) == len(allmissing):
167 if len(missing) == len(allmissing):
168 missingheads = onlyheads
168 missingheads = onlyheads
169 else: # update missing heads
169 else: # update missing heads
170 missingheads = phases.newheads(repo, onlyheads, excluded)
170 missingheads = phases.newheads(repo, onlyheads, excluded)
171 og.missingheads = missingheads
171 og.missingheads = missingheads
172 if portable:
172 if portable:
173 # recompute common and missingheads as if -r<rev> had been given for
173 # recompute common and missingheads as if -r<rev> had been given for
174 # each head of missing, and --base <rev> for each head of the proper
174 # each head of missing, and --base <rev> for each head of the proper
175 # ancestors of missing
175 # ancestors of missing
176 og._computecommonmissing()
176 og._computecommonmissing()
177 cl = repo.changelog
177 cl = repo.changelog
178 missingrevs = set(cl.rev(n) for n in og._missing)
178 missingrevs = set(cl.rev(n) for n in og._missing)
179 og._common = set(cl.ancestors(missingrevs)) - missingrevs
179 og._common = set(cl.ancestors(missingrevs)) - missingrevs
180 commonheads = set(og.commonheads)
180 commonheads = set(og.commonheads)
181 og.missingheads = [h for h in og.missingheads if h not in commonheads]
181 og.missingheads = [h for h in og.missingheads if h not in commonheads]
182
182
183 return og
183 return og
184
184
185 def _headssummary(pushop):
185 def _headssummary(pushop):
186 """compute a summary of branch and heads status before and after push
186 """compute a summary of branch and heads status before and after push
187
187
188 return {'branch': ([remoteheads], [newheads],
188 return {'branch': ([remoteheads], [newheads],
189 [unsyncedheads], [discardedheads])} mapping
189 [unsyncedheads], [discardedheads])} mapping
190
190
191 - branch: the branch name,
191 - branch: the branch name,
192 - remoteheads: the list of remote heads known locally
192 - remoteheads: the list of remote heads known locally
193 None if the branch is new,
193 None if the branch is new,
194 - newheads: the new remote heads (known locally) with outgoing pushed,
194 - newheads: the new remote heads (known locally) with outgoing pushed,
195 - unsyncedheads: the list of remote heads unknown locally,
195 - unsyncedheads: the list of remote heads unknown locally,
196 - discardedheads: the list of heads made obsolete by the push.
196 - discardedheads: the list of heads made obsolete by the push.
197 """
197 """
198 repo = pushop.repo.unfiltered()
198 repo = pushop.repo.unfiltered()
199 remote = pushop.remote
199 remote = pushop.remote
200 outgoing = pushop.outgoing
200 outgoing = pushop.outgoing
201 cl = repo.changelog
201 cl = repo.changelog
202 headssum = {}
202 headssum = {}
203 # A. Create set of branches involved in the push.
203 # A. Create set of branches involved in the push.
204 branches = set(repo[n].branch() for n in outgoing.missing)
204 branches = set(repo[n].branch() for n in outgoing.missing)
205 remotemap = remote.branchmap()
205 remotemap = remote.branchmap()
206 newbranches = branches - set(remotemap)
206 newbranches = branches - set(remotemap)
207 branches.difference_update(newbranches)
207 branches.difference_update(newbranches)
208
208
209 # A. register remote heads
209 # A. register remote heads
210 remotebranches = set()
210 remotebranches = set()
211 for branch, heads in remote.branchmap().iteritems():
211 for branch, heads in remote.branchmap().iteritems():
212 remotebranches.add(branch)
212 remotebranches.add(branch)
213 known = []
213 known = []
214 unsynced = []
214 unsynced = []
215 knownnode = cl.hasnode # do not use nodemap until it is filtered
215 knownnode = cl.hasnode # do not use nodemap until it is filtered
216 for h in heads:
216 for h in heads:
217 if knownnode(h):
217 if knownnode(h):
218 known.append(h)
218 known.append(h)
219 else:
219 else:
220 unsynced.append(h)
220 unsynced.append(h)
221 headssum[branch] = (known, list(known), unsynced)
221 headssum[branch] = (known, list(known), unsynced)
222 # B. add new branch data
222 # B. add new branch data
223 missingctx = list(repo[n] for n in outgoing.missing)
223 missingctx = list(repo[n] for n in outgoing.missing)
224 touchedbranches = set()
224 touchedbranches = set()
225 for ctx in missingctx:
225 for ctx in missingctx:
226 branch = ctx.branch()
226 branch = ctx.branch()
227 touchedbranches.add(branch)
227 touchedbranches.add(branch)
228 if branch not in headssum:
228 if branch not in headssum:
229 headssum[branch] = (None, [], [])
229 headssum[branch] = (None, [], [])
230
230
231 # C drop data about untouched branches:
231 # C drop data about untouched branches:
232 for branch in remotebranches - touchedbranches:
232 for branch in remotebranches - touchedbranches:
233 del headssum[branch]
233 del headssum[branch]
234
234
235 # D. Update newmap with outgoing changes.
235 # D. Update newmap with outgoing changes.
236 # This will possibly add new heads and remove existing ones.
236 # This will possibly add new heads and remove existing ones.
237 newmap = branchmap.branchcache((branch, heads[1])
237 newmap = branchmap.branchcache((branch, heads[1])
238 for branch, heads in headssum.iteritems()
238 for branch, heads in headssum.iteritems()
239 if heads[0] is not None)
239 if heads[0] is not None)
240 newmap.update(repo, (ctx.rev() for ctx in missingctx))
240 newmap.update(repo, (ctx.rev() for ctx in missingctx))
241 for branch, newheads in newmap.iteritems():
241 for branch, newheads in newmap.iteritems():
242 headssum[branch][1][:] = newheads
242 headssum[branch][1][:] = newheads
243 for branch, items in headssum.iteritems():
243 for branch, items in headssum.iteritems():
244 for l in items:
244 for l in items:
245 if l is not None:
245 if l is not None:
246 l.sort()
246 l.sort()
247 headssum[branch] = items + ([],)
247 headssum[branch] = items + ([],)
248
248
249 # If there are no obsstore, no post processing are needed.
249 # If there are no obsstore, no post processing are needed.
250 if repo.obsstore:
250 if repo.obsstore:
251 allmissing = set(outgoing.missing)
251 allmissing = set(outgoing.missing)
252 cctx = repo.set('%ld', outgoing.common)
252 cctx = repo.set('%ld', outgoing.common)
253 allfuturecommon = set(c.node() for c in cctx)
253 allfuturecommon = set(c.node() for c in cctx)
254 allfuturecommon.update(allmissing)
254 allfuturecommon.update(allmissing)
255 for branch, heads in sorted(headssum.iteritems()):
255 for branch, heads in sorted(headssum.iteritems()):
256 remoteheads, newheads, unsyncedheads, placeholder = heads
256 remoteheads, newheads, unsyncedheads, placeholder = heads
257 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
257 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
258 headssum[branch] = (remoteheads, sorted(result[0]), unsyncedheads,
258 headssum[branch] = (remoteheads, sorted(result[0]), unsyncedheads,
259 sorted(result[1]))
259 sorted(result[1]))
260 return headssum
260 return headssum
261
261
262 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
262 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
263 """Compute branchmapsummary for repo without branchmap support"""
263 """Compute branchmapsummary for repo without branchmap support"""
264
264
265 # 1-4b. old servers: Check for new topological heads.
265 # 1-4b. old servers: Check for new topological heads.
266 # Construct {old,new}map with branch = None (topological branch).
266 # Construct {old,new}map with branch = None (topological branch).
267 # (code based on update)
267 # (code based on update)
268 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
268 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
269 oldheads = sorted(h for h in remoteheads if knownnode(h))
269 oldheads = sorted(h for h in remoteheads if knownnode(h))
270 # all nodes in outgoing.missing are children of either:
270 # all nodes in outgoing.missing are children of either:
271 # - an element of oldheads
271 # - an element of oldheads
272 # - another element of outgoing.missing
272 # - another element of outgoing.missing
273 # - nullrev
273 # - nullrev
274 # This explains why the new head are very simple to compute.
274 # This explains why the new head are very simple to compute.
275 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
275 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
276 newheads = sorted(c.node() for c in r)
276 newheads = sorted(c.node() for c in r)
277 # set some unsynced head to issue the "unsynced changes" warning
277 # set some unsynced head to issue the "unsynced changes" warning
278 if inc:
278 if inc:
279 unsynced = [None]
279 unsynced = [None]
280 else:
280 else:
281 unsynced = []
281 unsynced = []
282 return {None: (oldheads, newheads, unsynced, [])}
282 return {None: (oldheads, newheads, unsynced, [])}
283
283
284 def _nowarnheads(pushop):
284 def _nowarnheads(pushop):
285 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
285 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
286 repo = pushop.repo.unfiltered()
286 repo = pushop.repo.unfiltered()
287 remote = pushop.remote
287 remote = pushop.remote
288 localbookmarks = repo._bookmarks
288 localbookmarks = repo._bookmarks
289 remotebookmarks = remote.listkeys('bookmarks')
289 remotebookmarks = remote.listkeys('bookmarks')
290 bookmarkedheads = set()
290 bookmarkedheads = set()
291
291
292 # internal config: bookmarks.pushing
292 # internal config: bookmarks.pushing
293 newbookmarks = [localbookmarks.expandname(b)
293 newbookmarks = [localbookmarks.expandname(b)
294 for b in pushop.ui.configlist('bookmarks', 'pushing')]
294 for b in pushop.ui.configlist('bookmarks', 'pushing')]
295
295
296 for bm in localbookmarks:
296 for bm in localbookmarks:
297 rnode = remotebookmarks.get(bm)
297 rnode = remotebookmarks.get(bm)
298 if rnode and rnode in repo:
298 if rnode and rnode in repo:
299 lctx, rctx = repo[bm], repo[rnode]
299 lctx, rctx = repo[bm], repo[rnode]
300 if bookmarks.validdest(repo, rctx, lctx):
300 if bookmarks.validdest(repo, rctx, lctx):
301 bookmarkedheads.add(lctx.node())
301 bookmarkedheads.add(lctx.node())
302 else:
302 else:
303 if bm in newbookmarks and bm not in remotebookmarks:
303 if bm in newbookmarks and bm not in remotebookmarks:
304 bookmarkedheads.add(repo[bm].node())
304 bookmarkedheads.add(repo[bm].node())
305
305
306 return bookmarkedheads
306 return bookmarkedheads
307
307
308 def checkheads(pushop):
308 def checkheads(pushop):
309 """Check that a push won't add any outgoing head
309 """Check that a push won't add any outgoing head
310
310
311 raise Abort error and display ui message as needed.
311 raise Abort error and display ui message as needed.
312 """
312 """
313
313
314 repo = pushop.repo.unfiltered()
314 repo = pushop.repo.unfiltered()
315 remote = pushop.remote
315 remote = pushop.remote
316 outgoing = pushop.outgoing
316 outgoing = pushop.outgoing
317 remoteheads = pushop.remoteheads
317 remoteheads = pushop.remoteheads
318 newbranch = pushop.newbranch
318 newbranch = pushop.newbranch
319 inc = bool(pushop.incoming)
319 inc = bool(pushop.incoming)
320
320
321 # Check for each named branch if we're creating new remote heads.
321 # Check for each named branch if we're creating new remote heads.
322 # To be a remote head after push, node must be either:
322 # To be a remote head after push, node must be either:
323 # - unknown locally
323 # - unknown locally
324 # - a local outgoing head descended from update
324 # - a local outgoing head descended from update
325 # - a remote head that's known locally and not
325 # - a remote head that's known locally and not
326 # ancestral to an outgoing head
326 # ancestral to an outgoing head
327 if remoteheads == [nullid]:
327 if remoteheads == [nullid]:
328 # remote is empty, nothing to check.
328 # remote is empty, nothing to check.
329 return
329 return
330
330
331 if remote.capable('branchmap'):
331 if remote.capable('branchmap'):
332 headssum = _headssummary(pushop)
332 headssum = _headssummary(pushop)
333 else:
333 else:
334 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
334 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
335 pushop.pushbranchmap = headssum
335 pushop.pushbranchmap = headssum
336 newbranches = [branch for branch, heads in headssum.iteritems()
336 newbranches = [branch for branch, heads in headssum.iteritems()
337 if heads[0] is None]
337 if heads[0] is None]
338 # 1. Check for new branches on the remote.
338 # 1. Check for new branches on the remote.
339 if newbranches and not newbranch: # new branch requires --new-branch
339 if newbranches and not newbranch: # new branch requires --new-branch
340 branchnames = ', '.join(sorted(newbranches))
340 branchnames = ', '.join(sorted(newbranches))
341 raise error.Abort(_("push creates new remote branches: %s!")
341 raise error.Abort(_("push creates new remote branches: %s!")
342 % branchnames,
342 % branchnames,
343 hint=_("use 'hg push --new-branch' to create"
343 hint=_("use 'hg push --new-branch' to create"
344 " new remote branches"))
344 " new remote branches"))
345
345
346 # 2. Find heads that we need not warn about
346 # 2. Find heads that we need not warn about
347 nowarnheads = _nowarnheads(pushop)
347 nowarnheads = _nowarnheads(pushop)
348
348
349 # 3. Check for new heads.
349 # 3. Check for new heads.
350 # If there are more heads after the push than before, a suitable
350 # If there are more heads after the push than before, a suitable
351 # error message, depending on unsynced status, is displayed.
351 # error message, depending on unsynced status, is displayed.
352 errormsg = None
352 errormsg = None
353 for branch, heads in sorted(headssum.iteritems()):
353 for branch, heads in sorted(headssum.iteritems()):
354 remoteheads, newheads, unsyncedheads, discardedheads = heads
354 remoteheads, newheads, unsyncedheads, discardedheads = heads
355 # add unsynced data
355 # add unsynced data
356 if remoteheads is None:
356 if remoteheads is None:
357 oldhs = set()
357 oldhs = set()
358 else:
358 else:
359 oldhs = set(remoteheads)
359 oldhs = set(remoteheads)
360 oldhs.update(unsyncedheads)
360 oldhs.update(unsyncedheads)
361 dhs = None # delta heads, the new heads on branch
361 dhs = None # delta heads, the new heads on branch
362 newhs = set(newheads)
362 newhs = set(newheads)
363 newhs.update(unsyncedheads)
363 newhs.update(unsyncedheads)
364 if unsyncedheads:
364 if unsyncedheads:
365 if None in unsyncedheads:
365 if None in unsyncedheads:
366 # old remote, no heads data
366 # old remote, no heads data
367 heads = None
367 heads = None
368 elif len(unsyncedheads) <= 4 or repo.ui.verbose:
368 elif len(unsyncedheads) <= 4 or repo.ui.verbose:
369 heads = ' '.join(short(h) for h in unsyncedheads)
369 heads = ' '.join(short(h) for h in unsyncedheads)
370 else:
370 else:
371 heads = (' '.join(short(h) for h in unsyncedheads[:4]) +
371 heads = (' '.join(short(h) for h in unsyncedheads[:4]) +
372 ' ' + _("and %s others") % (len(unsyncedheads) - 4))
372 ' ' + _("and %s others") % (len(unsyncedheads) - 4))
373 if heads is None:
373 if heads is None:
374 repo.ui.status(_("remote has heads that are "
374 repo.ui.status(_("remote has heads that are "
375 "not known locally\n"))
375 "not known locally\n"))
376 elif branch is None:
376 elif branch is None:
377 repo.ui.status(_("remote has heads that are "
377 repo.ui.status(_("remote has heads that are "
378 "not known locally: %s\n") % heads)
378 "not known locally: %s\n") % heads)
379 else:
379 else:
380 repo.ui.status(_("remote has heads on branch '%s' that are "
380 repo.ui.status(_("remote has heads on branch '%s' that are "
381 "not known locally: %s\n") % (branch, heads))
381 "not known locally: %s\n") % (branch, heads))
382 if remoteheads is None:
382 if remoteheads is None:
383 if len(newhs) > 1:
383 if len(newhs) > 1:
384 dhs = list(newhs)
384 dhs = list(newhs)
385 if errormsg is None:
385 if errormsg is None:
386 errormsg = (_("push creates new branch '%s' "
386 errormsg = (_("push creates new branch '%s' "
387 "with multiple heads") % (branch))
387 "with multiple heads") % (branch))
388 hint = _("merge or"
388 hint = _("merge or"
389 " see 'hg help push' for details about"
389 " see 'hg help push' for details about"
390 " pushing new heads")
390 " pushing new heads")
391 elif len(newhs) > len(oldhs):
391 elif len(newhs) > len(oldhs):
392 # remove bookmarked or existing remote heads from the new heads list
392 # remove bookmarked or existing remote heads from the new heads list
393 dhs = sorted(newhs - nowarnheads - oldhs)
393 dhs = sorted(newhs - nowarnheads - oldhs)
394 if dhs:
394 if dhs:
395 if errormsg is None:
395 if errormsg is None:
396 if branch not in ('default', None):
396 if branch not in ('default', None):
397 errormsg = _("push creates new remote head %s "
397 errormsg = _("push creates new remote head %s "
398 "on branch '%s'!") % (short(dhs[0]), branch)
398 "on branch '%s'!") % (short(dhs[0]), branch)
399 elif repo[dhs[0]].bookmarks():
399 elif repo[dhs[0]].bookmarks():
400 errormsg = _("push creates new remote head %s "
400 errormsg = _("push creates new remote head %s "
401 "with bookmark '%s'!") % (
401 "with bookmark '%s'!") % (
402 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
402 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
403 else:
403 else:
404 errormsg = _("push creates new remote head %s!"
404 errormsg = _("push creates new remote head %s!"
405 ) % short(dhs[0])
405 ) % short(dhs[0])
406 if unsyncedheads:
406 if unsyncedheads:
407 hint = _("pull and merge or"
407 hint = _("pull and merge or"
408 " see 'hg help push' for details about"
408 " see 'hg help push' for details about"
409 " pushing new heads")
409 " pushing new heads")
410 else:
410 else:
411 hint = _("merge or"
411 hint = _("merge or"
412 " see 'hg help push' for details about"
412 " see 'hg help push' for details about"
413 " pushing new heads")
413 " pushing new heads")
414 if branch is None:
414 if branch is None:
415 repo.ui.note(_("new remote heads:\n"))
415 repo.ui.note(_("new remote heads:\n"))
416 else:
416 else:
417 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
417 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
418 for h in dhs:
418 for h in dhs:
419 repo.ui.note((" %s\n") % short(h))
419 repo.ui.note((" %s\n") % short(h))
420 if errormsg:
420 if errormsg:
421 raise error.Abort(errormsg, hint=hint)
421 raise error.Abort(errormsg, hint=hint)
422
422
423 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
423 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
424 """post process the list of new heads with obsolescence information
424 """post process the list of new heads with obsolescence information
425
425
426 Exists as a sub-function to contain the complexity and allow extensions to
426 Exists as a sub-function to contain the complexity and allow extensions to
427 experiment with smarter logic.
427 experiment with smarter logic.
428
428
429 Returns (newheads, discarded_heads) tuple
429 Returns (newheads, discarded_heads) tuple
430 """
430 """
431 # known issue
431 # known issue
432 #
432 #
433 # * We "silently" skip processing on all changeset unknown locally
433 # * We "silently" skip processing on all changeset unknown locally
434 #
434 #
435 # * if <nh> is public on the remote, it won't be affected by obsolete
435 # * if <nh> is public on the remote, it won't be affected by obsolete
436 # marker and a new is created
436 # marker and a new is created
437
437
438 # define various utilities and containers
438 # define various utilities and containers
439 repo = pushop.repo
439 repo = pushop.repo
440 unfi = repo.unfiltered()
440 unfi = repo.unfiltered()
441 tonode = unfi.changelog.node
441 tonode = unfi.changelog.node
442 torev = unfi.changelog.rev
442 torev = unfi.changelog.nodemap.get
443 public = phases.public
443 public = phases.public
444 getphase = unfi._phasecache.phase
444 getphase = unfi._phasecache.phase
445 ispublic = (lambda r: getphase(unfi, r) == public)
445 ispublic = (lambda r: getphase(unfi, r) == public)
446 ispushed = (lambda n: n in futurecommon)
446 ispushed = (lambda n: n in futurecommon)
447 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
447 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
448 successorsmarkers = unfi.obsstore.successors
448 successorsmarkers = unfi.obsstore.successors
449 newhs = set() # final set of new heads
449 newhs = set() # final set of new heads
450 discarded = set() # new head of fully replaced branch
450 discarded = set() # new head of fully replaced branch
451
451
452 localcandidate = set() # candidate heads known locally
452 localcandidate = set() # candidate heads known locally
453 unknownheads = set() # candidate heads unknown locally
453 unknownheads = set() # candidate heads unknown locally
454 for h in candidate_newhs:
454 for h in candidate_newhs:
455 if h in unfi:
455 if h in unfi:
456 localcandidate.add(h)
456 localcandidate.add(h)
457 else:
457 else:
458 if successorsmarkers.get(h) is not None:
458 if successorsmarkers.get(h) is not None:
459 msg = ('checkheads: remote head unknown locally has'
459 msg = ('checkheads: remote head unknown locally has'
460 ' local marker: %s\n')
460 ' local marker: %s\n')
461 repo.ui.debug(msg % hex(h))
461 repo.ui.debug(msg % hex(h))
462 unknownheads.add(h)
462 unknownheads.add(h)
463
463
464 # fast path the simple case
464 # fast path the simple case
465 if len(localcandidate) == 1:
465 if len(localcandidate) == 1:
466 return unknownheads | set(candidate_newhs), set()
466 return unknownheads | set(candidate_newhs), set()
467
467
468 # actually process branch replacement
468 # actually process branch replacement
469 while localcandidate:
469 while localcandidate:
470 nh = localcandidate.pop()
470 nh = localcandidate.pop()
471 # run this check early to skip the evaluation of the whole branch
471 # run this check early to skip the evaluation of the whole branch
472 if (nh in futurecommon or ispublic(torev(nh))):
472 if (nh in futurecommon or ispublic(torev(nh))):
473 newhs.add(nh)
473 newhs.add(nh)
474 continue
474 continue
475
475
476 # Get all revs/nodes on the branch exclusive to this head
476 # Get all revs/nodes on the branch exclusive to this head
477 # (already filtered heads are "ignored"))
477 # (already filtered heads are "ignored"))
478 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
478 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
479 nh, localcandidate, newhs)
479 nh, localcandidate, newhs)
480 branchnodes = [tonode(r) for r in branchrevs]
480 branchnodes = [tonode(r) for r in branchrevs]
481
481
482 # The branch won't be hidden on the remote if
482 # The branch won't be hidden on the remote if
483 # * any part of it is public,
483 # * any part of it is public,
484 # * any part of it is considered part of the result by previous logic,
484 # * any part of it is considered part of the result by previous logic,
485 # * if we have no markers to push to obsolete it.
485 # * if we have no markers to push to obsolete it.
486 if (any(ispublic(r) for r in branchrevs)
486 if (any(ispublic(r) for r in branchrevs)
487 or any(n in futurecommon for n in branchnodes)
487 or any(n in futurecommon for n in branchnodes)
488 or any(not hasoutmarker(n) for n in branchnodes)):
488 or any(not hasoutmarker(n) for n in branchnodes)):
489 newhs.add(nh)
489 newhs.add(nh)
490 else:
490 else:
491 # note: there is a corner case if there is a merge in the branch.
491 # note: there is a corner case if there is a merge in the branch.
492 # we might end up with -more- heads. However, these heads are not
492 # we might end up with -more- heads. However, these heads are not
493 # "added" by the push, but more by the "removal" on the remote so I
493 # "added" by the push, but more by the "removal" on the remote so I
494 # think is a okay to ignore them,
494 # think is a okay to ignore them,
495 discarded.add(nh)
495 discarded.add(nh)
496 newhs |= unknownheads
496 newhs |= unknownheads
497 return newhs, discarded
497 return newhs, discarded
498
498
499 def pushingmarkerfor(obsstore, ispushed, node):
499 def pushingmarkerfor(obsstore, ispushed, node):
500 """true if some markers are to be pushed for node
500 """true if some markers are to be pushed for node
501
501
502 We cannot just look in to the pushed obsmarkers from the pushop because
502 We cannot just look in to the pushed obsmarkers from the pushop because
503 discovery might have filtered relevant markers. In addition listing all
503 discovery might have filtered relevant markers. In addition listing all
504 markers relevant to all changesets in the pushed set would be too expensive
504 markers relevant to all changesets in the pushed set would be too expensive
505 (O(len(repo)))
505 (O(len(repo)))
506
506
507 (note: There are cache opportunity in this function. but it would requires
507 (note: There are cache opportunity in this function. but it would requires
508 a two dimensional stack.)
508 a two dimensional stack.)
509 """
509 """
510 successorsmarkers = obsstore.successors
510 successorsmarkers = obsstore.successors
511 stack = [node]
511 stack = [node]
512 seen = set(stack)
512 seen = set(stack)
513 while stack:
513 while stack:
514 current = stack.pop()
514 current = stack.pop()
515 if ispushed(current):
515 if ispushed(current):
516 return True
516 return True
517 markers = successorsmarkers.get(current, ())
517 markers = successorsmarkers.get(current, ())
518 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
518 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
519 for m in markers:
519 for m in markers:
520 nexts = m[1] # successors
520 nexts = m[1] # successors
521 if not nexts: # this is a prune marker
521 if not nexts: # this is a prune marker
522 nexts = m[5] or () # parents
522 nexts = m[5] or () # parents
523 for n in nexts:
523 for n in nexts:
524 if n not in seen:
524 if n not in seen:
525 seen.add(n)
525 seen.add(n)
526 stack.append(n)
526 stack.append(n)
527 return False
527 return False
General Comments 0
You need to be logged in to leave comments. Login now