##// END OF EJS Templates
discovery: don't reimplement all()...
Martin von Zweigbergk -
r35897:6c1d3779 default
parent child Browse files
Show More
@@ -1,530 +1,525 b''
1 # discovery.py - protocol changeset discovery functions
1 # discovery.py - protocol changeset discovery functions
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 hex,
14 hex,
15 nullid,
15 nullid,
16 short,
16 short,
17 )
17 )
18
18
19 from . import (
19 from . import (
20 bookmarks,
20 bookmarks,
21 branchmap,
21 branchmap,
22 error,
22 error,
23 phases,
23 phases,
24 scmutil,
24 scmutil,
25 setdiscovery,
25 setdiscovery,
26 treediscovery,
26 treediscovery,
27 util,
27 util,
28 )
28 )
29
29
30 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
30 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
31 """Return a tuple (common, anyincoming, heads) used to identify the common
31 """Return a tuple (common, anyincoming, heads) used to identify the common
32 subset of nodes between repo and remote.
32 subset of nodes between repo and remote.
33
33
34 "common" is a list of (at least) the heads of the common subset.
34 "common" is a list of (at least) the heads of the common subset.
35 "anyincoming" is testable as a boolean indicating if any nodes are missing
35 "anyincoming" is testable as a boolean indicating if any nodes are missing
36 locally. If remote does not support getbundle, this actually is a list of
36 locally. If remote does not support getbundle, this actually is a list of
37 roots of the nodes that would be incoming, to be supplied to
37 roots of the nodes that would be incoming, to be supplied to
38 changegroupsubset. No code except for pull should be relying on this fact
38 changegroupsubset. No code except for pull should be relying on this fact
39 any longer.
39 any longer.
40 "heads" is either the supplied heads, or else the remote's heads.
40 "heads" is either the supplied heads, or else the remote's heads.
41 "ancestorsof" if not None, restrict the discovery to a subset defined by
41 "ancestorsof" if not None, restrict the discovery to a subset defined by
42 these nodes. Changeset outside of this set won't be considered (and
42 these nodes. Changeset outside of this set won't be considered (and
43 won't appears in "common")
43 won't appears in "common")
44
44
45 If you pass heads and they are all known locally, the response lists just
45 If you pass heads and they are all known locally, the response lists just
46 these heads in "common" and in "heads".
46 these heads in "common" and in "heads".
47
47
48 Please use findcommonoutgoing to compute the set of outgoing nodes to give
48 Please use findcommonoutgoing to compute the set of outgoing nodes to give
49 extensions a good hook into outgoing.
49 extensions a good hook into outgoing.
50 """
50 """
51
51
52 if not remote.capable('getbundle'):
52 if not remote.capable('getbundle'):
53 return treediscovery.findcommonincoming(repo, remote, heads, force)
53 return treediscovery.findcommonincoming(repo, remote, heads, force)
54
54
55 if heads:
55 if heads:
56 allknown = True
57 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
56 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
58 for h in heads:
57 if all(knownnode(h) for h in heads):
59 if not knownnode(h):
60 allknown = False
61 break
62 if allknown:
63 return (heads, False, heads)
58 return (heads, False, heads)
64
59
65 res = setdiscovery.findcommonheads(repo.ui, repo, remote, heads,
60 res = setdiscovery.findcommonheads(repo.ui, repo, remote, heads,
66 abortwhenunrelated=not force,
61 abortwhenunrelated=not force,
67 ancestorsof=ancestorsof)
62 ancestorsof=ancestorsof)
68 common, anyinc, srvheads = res
63 common, anyinc, srvheads = res
69 return (list(common), anyinc, heads or list(srvheads))
64 return (list(common), anyinc, heads or list(srvheads))
70
65
71 class outgoing(object):
66 class outgoing(object):
72 '''Represents the set of nodes present in a local repo but not in a
67 '''Represents the set of nodes present in a local repo but not in a
73 (possibly) remote one.
68 (possibly) remote one.
74
69
75 Members:
70 Members:
76
71
77 missing is a list of all nodes present in local but not in remote.
72 missing is a list of all nodes present in local but not in remote.
78 common is a list of all nodes shared between the two repos.
73 common is a list of all nodes shared between the two repos.
79 excluded is the list of missing changeset that shouldn't be sent remotely.
74 excluded is the list of missing changeset that shouldn't be sent remotely.
80 missingheads is the list of heads of missing.
75 missingheads is the list of heads of missing.
81 commonheads is the list of heads of common.
76 commonheads is the list of heads of common.
82
77
83 The sets are computed on demand from the heads, unless provided upfront
78 The sets are computed on demand from the heads, unless provided upfront
84 by discovery.'''
79 by discovery.'''
85
80
86 def __init__(self, repo, commonheads=None, missingheads=None,
81 def __init__(self, repo, commonheads=None, missingheads=None,
87 missingroots=None):
82 missingroots=None):
88 # at least one of them must not be set
83 # at least one of them must not be set
89 assert None in (commonheads, missingroots)
84 assert None in (commonheads, missingroots)
90 cl = repo.changelog
85 cl = repo.changelog
91 if missingheads is None:
86 if missingheads is None:
92 missingheads = cl.heads()
87 missingheads = cl.heads()
93 if missingroots:
88 if missingroots:
94 discbases = []
89 discbases = []
95 for n in missingroots:
90 for n in missingroots:
96 discbases.extend([p for p in cl.parents(n) if p != nullid])
91 discbases.extend([p for p in cl.parents(n) if p != nullid])
97 # TODO remove call to nodesbetween.
92 # TODO remove call to nodesbetween.
98 # TODO populate attributes on outgoing instance instead of setting
93 # TODO populate attributes on outgoing instance instead of setting
99 # discbases.
94 # discbases.
100 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
95 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
101 included = set(csets)
96 included = set(csets)
102 missingheads = heads
97 missingheads = heads
103 commonheads = [n for n in discbases if n not in included]
98 commonheads = [n for n in discbases if n not in included]
104 elif not commonheads:
99 elif not commonheads:
105 commonheads = [nullid]
100 commonheads = [nullid]
106 self.commonheads = commonheads
101 self.commonheads = commonheads
107 self.missingheads = missingheads
102 self.missingheads = missingheads
108 self._revlog = cl
103 self._revlog = cl
109 self._common = None
104 self._common = None
110 self._missing = None
105 self._missing = None
111 self.excluded = []
106 self.excluded = []
112
107
113 def _computecommonmissing(self):
108 def _computecommonmissing(self):
114 sets = self._revlog.findcommonmissing(self.commonheads,
109 sets = self._revlog.findcommonmissing(self.commonheads,
115 self.missingheads)
110 self.missingheads)
116 self._common, self._missing = sets
111 self._common, self._missing = sets
117
112
118 @util.propertycache
113 @util.propertycache
119 def common(self):
114 def common(self):
120 if self._common is None:
115 if self._common is None:
121 self._computecommonmissing()
116 self._computecommonmissing()
122 return self._common
117 return self._common
123
118
124 @util.propertycache
119 @util.propertycache
125 def missing(self):
120 def missing(self):
126 if self._missing is None:
121 if self._missing is None:
127 self._computecommonmissing()
122 self._computecommonmissing()
128 return self._missing
123 return self._missing
129
124
130 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
125 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
131 commoninc=None, portable=False):
126 commoninc=None, portable=False):
132 '''Return an outgoing instance to identify the nodes present in repo but
127 '''Return an outgoing instance to identify the nodes present in repo but
133 not in other.
128 not in other.
134
129
135 If onlyheads is given, only nodes ancestral to nodes in onlyheads
130 If onlyheads is given, only nodes ancestral to nodes in onlyheads
136 (inclusive) are included. If you already know the local repo's heads,
131 (inclusive) are included. If you already know the local repo's heads,
137 passing them in onlyheads is faster than letting them be recomputed here.
132 passing them in onlyheads is faster than letting them be recomputed here.
138
133
139 If commoninc is given, it must be the result of a prior call to
134 If commoninc is given, it must be the result of a prior call to
140 findcommonincoming(repo, other, force) to avoid recomputing it here.
135 findcommonincoming(repo, other, force) to avoid recomputing it here.
141
136
142 If portable is given, compute more conservative common and missingheads,
137 If portable is given, compute more conservative common and missingheads,
143 to make bundles created from the instance more portable.'''
138 to make bundles created from the instance more portable.'''
144 # declare an empty outgoing object to be filled later
139 # declare an empty outgoing object to be filled later
145 og = outgoing(repo, None, None)
140 og = outgoing(repo, None, None)
146
141
147 # get common set if not provided
142 # get common set if not provided
148 if commoninc is None:
143 if commoninc is None:
149 commoninc = findcommonincoming(repo, other, force=force,
144 commoninc = findcommonincoming(repo, other, force=force,
150 ancestorsof=onlyheads)
145 ancestorsof=onlyheads)
151 og.commonheads, _any, _hds = commoninc
146 og.commonheads, _any, _hds = commoninc
152
147
153 # compute outgoing
148 # compute outgoing
154 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
149 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
155 if not mayexclude:
150 if not mayexclude:
156 og.missingheads = onlyheads or repo.heads()
151 og.missingheads = onlyheads or repo.heads()
157 elif onlyheads is None:
152 elif onlyheads is None:
158 # use visible heads as it should be cached
153 # use visible heads as it should be cached
159 og.missingheads = repo.filtered("served").heads()
154 og.missingheads = repo.filtered("served").heads()
160 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
155 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
161 else:
156 else:
162 # compute common, missing and exclude secret stuff
157 # compute common, missing and exclude secret stuff
163 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
158 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
164 og._common, allmissing = sets
159 og._common, allmissing = sets
165 og._missing = missing = []
160 og._missing = missing = []
166 og.excluded = excluded = []
161 og.excluded = excluded = []
167 for node in allmissing:
162 for node in allmissing:
168 ctx = repo[node]
163 ctx = repo[node]
169 if ctx.phase() >= phases.secret or ctx.extinct():
164 if ctx.phase() >= phases.secret or ctx.extinct():
170 excluded.append(node)
165 excluded.append(node)
171 else:
166 else:
172 missing.append(node)
167 missing.append(node)
173 if len(missing) == len(allmissing):
168 if len(missing) == len(allmissing):
174 missingheads = onlyheads
169 missingheads = onlyheads
175 else: # update missing heads
170 else: # update missing heads
176 missingheads = phases.newheads(repo, onlyheads, excluded)
171 missingheads = phases.newheads(repo, onlyheads, excluded)
177 og.missingheads = missingheads
172 og.missingheads = missingheads
178 if portable:
173 if portable:
179 # recompute common and missingheads as if -r<rev> had been given for
174 # recompute common and missingheads as if -r<rev> had been given for
180 # each head of missing, and --base <rev> for each head of the proper
175 # each head of missing, and --base <rev> for each head of the proper
181 # ancestors of missing
176 # ancestors of missing
182 og._computecommonmissing()
177 og._computecommonmissing()
183 cl = repo.changelog
178 cl = repo.changelog
184 missingrevs = set(cl.rev(n) for n in og._missing)
179 missingrevs = set(cl.rev(n) for n in og._missing)
185 og._common = set(cl.ancestors(missingrevs)) - missingrevs
180 og._common = set(cl.ancestors(missingrevs)) - missingrevs
186 commonheads = set(og.commonheads)
181 commonheads = set(og.commonheads)
187 og.missingheads = [h for h in og.missingheads if h not in commonheads]
182 og.missingheads = [h for h in og.missingheads if h not in commonheads]
188
183
189 return og
184 return og
190
185
191 def _headssummary(pushop):
186 def _headssummary(pushop):
192 """compute a summary of branch and heads status before and after push
187 """compute a summary of branch and heads status before and after push
193
188
194 return {'branch': ([remoteheads], [newheads],
189 return {'branch': ([remoteheads], [newheads],
195 [unsyncedheads], [discardedheads])} mapping
190 [unsyncedheads], [discardedheads])} mapping
196
191
197 - branch: the branch name,
192 - branch: the branch name,
198 - remoteheads: the list of remote heads known locally
193 - remoteheads: the list of remote heads known locally
199 None if the branch is new,
194 None if the branch is new,
200 - newheads: the new remote heads (known locally) with outgoing pushed,
195 - newheads: the new remote heads (known locally) with outgoing pushed,
201 - unsyncedheads: the list of remote heads unknown locally,
196 - unsyncedheads: the list of remote heads unknown locally,
202 - discardedheads: the list of heads made obsolete by the push.
197 - discardedheads: the list of heads made obsolete by the push.
203 """
198 """
204 repo = pushop.repo.unfiltered()
199 repo = pushop.repo.unfiltered()
205 remote = pushop.remote
200 remote = pushop.remote
206 outgoing = pushop.outgoing
201 outgoing = pushop.outgoing
207 cl = repo.changelog
202 cl = repo.changelog
208 headssum = {}
203 headssum = {}
209 # A. Create set of branches involved in the push.
204 # A. Create set of branches involved in the push.
210 branches = set(repo[n].branch() for n in outgoing.missing)
205 branches = set(repo[n].branch() for n in outgoing.missing)
211 remotemap = remote.branchmap()
206 remotemap = remote.branchmap()
212 newbranches = branches - set(remotemap)
207 newbranches = branches - set(remotemap)
213 branches.difference_update(newbranches)
208 branches.difference_update(newbranches)
214
209
215 # A. register remote heads
210 # A. register remote heads
216 remotebranches = set()
211 remotebranches = set()
217 for branch, heads in remote.branchmap().iteritems():
212 for branch, heads in remote.branchmap().iteritems():
218 remotebranches.add(branch)
213 remotebranches.add(branch)
219 known = []
214 known = []
220 unsynced = []
215 unsynced = []
221 knownnode = cl.hasnode # do not use nodemap until it is filtered
216 knownnode = cl.hasnode # do not use nodemap until it is filtered
222 for h in heads:
217 for h in heads:
223 if knownnode(h):
218 if knownnode(h):
224 known.append(h)
219 known.append(h)
225 else:
220 else:
226 unsynced.append(h)
221 unsynced.append(h)
227 headssum[branch] = (known, list(known), unsynced)
222 headssum[branch] = (known, list(known), unsynced)
228 # B. add new branch data
223 # B. add new branch data
229 missingctx = list(repo[n] for n in outgoing.missing)
224 missingctx = list(repo[n] for n in outgoing.missing)
230 touchedbranches = set()
225 touchedbranches = set()
231 for ctx in missingctx:
226 for ctx in missingctx:
232 branch = ctx.branch()
227 branch = ctx.branch()
233 touchedbranches.add(branch)
228 touchedbranches.add(branch)
234 if branch not in headssum:
229 if branch not in headssum:
235 headssum[branch] = (None, [], [])
230 headssum[branch] = (None, [], [])
236
231
237 # C drop data about untouched branches:
232 # C drop data about untouched branches:
238 for branch in remotebranches - touchedbranches:
233 for branch in remotebranches - touchedbranches:
239 del headssum[branch]
234 del headssum[branch]
240
235
241 # D. Update newmap with outgoing changes.
236 # D. Update newmap with outgoing changes.
242 # This will possibly add new heads and remove existing ones.
237 # This will possibly add new heads and remove existing ones.
243 newmap = branchmap.branchcache((branch, heads[1])
238 newmap = branchmap.branchcache((branch, heads[1])
244 for branch, heads in headssum.iteritems()
239 for branch, heads in headssum.iteritems()
245 if heads[0] is not None)
240 if heads[0] is not None)
246 newmap.update(repo, (ctx.rev() for ctx in missingctx))
241 newmap.update(repo, (ctx.rev() for ctx in missingctx))
247 for branch, newheads in newmap.iteritems():
242 for branch, newheads in newmap.iteritems():
248 headssum[branch][1][:] = newheads
243 headssum[branch][1][:] = newheads
249 for branch, items in headssum.iteritems():
244 for branch, items in headssum.iteritems():
250 for l in items:
245 for l in items:
251 if l is not None:
246 if l is not None:
252 l.sort()
247 l.sort()
253 headssum[branch] = items + ([],)
248 headssum[branch] = items + ([],)
254
249
255 # If there are no obsstore, no post processing are needed.
250 # If there are no obsstore, no post processing are needed.
256 if repo.obsstore:
251 if repo.obsstore:
257 torev = repo.changelog.rev
252 torev = repo.changelog.rev
258 futureheads = set(torev(h) for h in outgoing.missingheads)
253 futureheads = set(torev(h) for h in outgoing.missingheads)
259 futureheads |= set(torev(h) for h in outgoing.commonheads)
254 futureheads |= set(torev(h) for h in outgoing.commonheads)
260 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
255 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
261 for branch, heads in sorted(headssum.iteritems()):
256 for branch, heads in sorted(headssum.iteritems()):
262 remoteheads, newheads, unsyncedheads, placeholder = heads
257 remoteheads, newheads, unsyncedheads, placeholder = heads
263 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
258 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
264 headssum[branch] = (remoteheads, sorted(result[0]), unsyncedheads,
259 headssum[branch] = (remoteheads, sorted(result[0]), unsyncedheads,
265 sorted(result[1]))
260 sorted(result[1]))
266 return headssum
261 return headssum
267
262
268 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
263 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
269 """Compute branchmapsummary for repo without branchmap support"""
264 """Compute branchmapsummary for repo without branchmap support"""
270
265
271 # 1-4b. old servers: Check for new topological heads.
266 # 1-4b. old servers: Check for new topological heads.
272 # Construct {old,new}map with branch = None (topological branch).
267 # Construct {old,new}map with branch = None (topological branch).
273 # (code based on update)
268 # (code based on update)
274 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
269 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
275 oldheads = sorted(h for h in remoteheads if knownnode(h))
270 oldheads = sorted(h for h in remoteheads if knownnode(h))
276 # all nodes in outgoing.missing are children of either:
271 # all nodes in outgoing.missing are children of either:
277 # - an element of oldheads
272 # - an element of oldheads
278 # - another element of outgoing.missing
273 # - another element of outgoing.missing
279 # - nullrev
274 # - nullrev
280 # This explains why the new head are very simple to compute.
275 # This explains why the new head are very simple to compute.
281 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
276 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
282 newheads = sorted(c.node() for c in r)
277 newheads = sorted(c.node() for c in r)
283 # set some unsynced head to issue the "unsynced changes" warning
278 # set some unsynced head to issue the "unsynced changes" warning
284 if inc:
279 if inc:
285 unsynced = [None]
280 unsynced = [None]
286 else:
281 else:
287 unsynced = []
282 unsynced = []
288 return {None: (oldheads, newheads, unsynced, [])}
283 return {None: (oldheads, newheads, unsynced, [])}
289
284
290 def _nowarnheads(pushop):
285 def _nowarnheads(pushop):
291 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
286 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
292 repo = pushop.repo.unfiltered()
287 repo = pushop.repo.unfiltered()
293 remote = pushop.remote
288 remote = pushop.remote
294 localbookmarks = repo._bookmarks
289 localbookmarks = repo._bookmarks
295 remotebookmarks = remote.listkeys('bookmarks')
290 remotebookmarks = remote.listkeys('bookmarks')
296 bookmarkedheads = set()
291 bookmarkedheads = set()
297
292
298 # internal config: bookmarks.pushing
293 # internal config: bookmarks.pushing
299 newbookmarks = [localbookmarks.expandname(b)
294 newbookmarks = [localbookmarks.expandname(b)
300 for b in pushop.ui.configlist('bookmarks', 'pushing')]
295 for b in pushop.ui.configlist('bookmarks', 'pushing')]
301
296
302 for bm in localbookmarks:
297 for bm in localbookmarks:
303 rnode = remotebookmarks.get(bm)
298 rnode = remotebookmarks.get(bm)
304 if rnode and rnode in repo:
299 if rnode and rnode in repo:
305 lctx, rctx = repo[bm], repo[rnode]
300 lctx, rctx = repo[bm], repo[rnode]
306 if bookmarks.validdest(repo, rctx, lctx):
301 if bookmarks.validdest(repo, rctx, lctx):
307 bookmarkedheads.add(lctx.node())
302 bookmarkedheads.add(lctx.node())
308 else:
303 else:
309 if bm in newbookmarks and bm not in remotebookmarks:
304 if bm in newbookmarks and bm not in remotebookmarks:
310 bookmarkedheads.add(repo[bm].node())
305 bookmarkedheads.add(repo[bm].node())
311
306
312 return bookmarkedheads
307 return bookmarkedheads
313
308
314 def checkheads(pushop):
309 def checkheads(pushop):
315 """Check that a push won't add any outgoing head
310 """Check that a push won't add any outgoing head
316
311
317 raise Abort error and display ui message as needed.
312 raise Abort error and display ui message as needed.
318 """
313 """
319
314
320 repo = pushop.repo.unfiltered()
315 repo = pushop.repo.unfiltered()
321 remote = pushop.remote
316 remote = pushop.remote
322 outgoing = pushop.outgoing
317 outgoing = pushop.outgoing
323 remoteheads = pushop.remoteheads
318 remoteheads = pushop.remoteheads
324 newbranch = pushop.newbranch
319 newbranch = pushop.newbranch
325 inc = bool(pushop.incoming)
320 inc = bool(pushop.incoming)
326
321
327 # Check for each named branch if we're creating new remote heads.
322 # Check for each named branch if we're creating new remote heads.
328 # To be a remote head after push, node must be either:
323 # To be a remote head after push, node must be either:
329 # - unknown locally
324 # - unknown locally
330 # - a local outgoing head descended from update
325 # - a local outgoing head descended from update
331 # - a remote head that's known locally and not
326 # - a remote head that's known locally and not
332 # ancestral to an outgoing head
327 # ancestral to an outgoing head
333 if remoteheads == [nullid]:
328 if remoteheads == [nullid]:
334 # remote is empty, nothing to check.
329 # remote is empty, nothing to check.
335 return
330 return
336
331
337 if remote.capable('branchmap'):
332 if remote.capable('branchmap'):
338 headssum = _headssummary(pushop)
333 headssum = _headssummary(pushop)
339 else:
334 else:
340 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
335 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
341 pushop.pushbranchmap = headssum
336 pushop.pushbranchmap = headssum
342 newbranches = [branch for branch, heads in headssum.iteritems()
337 newbranches = [branch for branch, heads in headssum.iteritems()
343 if heads[0] is None]
338 if heads[0] is None]
344 # 1. Check for new branches on the remote.
339 # 1. Check for new branches on the remote.
345 if newbranches and not newbranch: # new branch requires --new-branch
340 if newbranches and not newbranch: # new branch requires --new-branch
346 branchnames = ', '.join(sorted(newbranches))
341 branchnames = ', '.join(sorted(newbranches))
347 raise error.Abort(_("push creates new remote branches: %s!")
342 raise error.Abort(_("push creates new remote branches: %s!")
348 % branchnames,
343 % branchnames,
349 hint=_("use 'hg push --new-branch' to create"
344 hint=_("use 'hg push --new-branch' to create"
350 " new remote branches"))
345 " new remote branches"))
351
346
352 # 2. Find heads that we need not warn about
347 # 2. Find heads that we need not warn about
353 nowarnheads = _nowarnheads(pushop)
348 nowarnheads = _nowarnheads(pushop)
354
349
355 # 3. Check for new heads.
350 # 3. Check for new heads.
356 # If there are more heads after the push than before, a suitable
351 # If there are more heads after the push than before, a suitable
357 # error message, depending on unsynced status, is displayed.
352 # error message, depending on unsynced status, is displayed.
358 errormsg = None
353 errormsg = None
359 for branch, heads in sorted(headssum.iteritems()):
354 for branch, heads in sorted(headssum.iteritems()):
360 remoteheads, newheads, unsyncedheads, discardedheads = heads
355 remoteheads, newheads, unsyncedheads, discardedheads = heads
361 # add unsynced data
356 # add unsynced data
362 if remoteheads is None:
357 if remoteheads is None:
363 oldhs = set()
358 oldhs = set()
364 else:
359 else:
365 oldhs = set(remoteheads)
360 oldhs = set(remoteheads)
366 oldhs.update(unsyncedheads)
361 oldhs.update(unsyncedheads)
367 dhs = None # delta heads, the new heads on branch
362 dhs = None # delta heads, the new heads on branch
368 newhs = set(newheads)
363 newhs = set(newheads)
369 newhs.update(unsyncedheads)
364 newhs.update(unsyncedheads)
370 if unsyncedheads:
365 if unsyncedheads:
371 if None in unsyncedheads:
366 if None in unsyncedheads:
372 # old remote, no heads data
367 # old remote, no heads data
373 heads = None
368 heads = None
374 else:
369 else:
375 heads = scmutil.nodesummaries(repo, unsyncedheads)
370 heads = scmutil.nodesummaries(repo, unsyncedheads)
376 if heads is None:
371 if heads is None:
377 repo.ui.status(_("remote has heads that are "
372 repo.ui.status(_("remote has heads that are "
378 "not known locally\n"))
373 "not known locally\n"))
379 elif branch is None:
374 elif branch is None:
380 repo.ui.status(_("remote has heads that are "
375 repo.ui.status(_("remote has heads that are "
381 "not known locally: %s\n") % heads)
376 "not known locally: %s\n") % heads)
382 else:
377 else:
383 repo.ui.status(_("remote has heads on branch '%s' that are "
378 repo.ui.status(_("remote has heads on branch '%s' that are "
384 "not known locally: %s\n") % (branch, heads))
379 "not known locally: %s\n") % (branch, heads))
385 if remoteheads is None:
380 if remoteheads is None:
386 if len(newhs) > 1:
381 if len(newhs) > 1:
387 dhs = list(newhs)
382 dhs = list(newhs)
388 if errormsg is None:
383 if errormsg is None:
389 errormsg = (_("push creates new branch '%s' "
384 errormsg = (_("push creates new branch '%s' "
390 "with multiple heads") % (branch))
385 "with multiple heads") % (branch))
391 hint = _("merge or"
386 hint = _("merge or"
392 " see 'hg help push' for details about"
387 " see 'hg help push' for details about"
393 " pushing new heads")
388 " pushing new heads")
394 elif len(newhs) > len(oldhs):
389 elif len(newhs) > len(oldhs):
395 # remove bookmarked or existing remote heads from the new heads list
390 # remove bookmarked or existing remote heads from the new heads list
396 dhs = sorted(newhs - nowarnheads - oldhs)
391 dhs = sorted(newhs - nowarnheads - oldhs)
397 if dhs:
392 if dhs:
398 if errormsg is None:
393 if errormsg is None:
399 if branch not in ('default', None):
394 if branch not in ('default', None):
400 errormsg = _("push creates new remote head %s "
395 errormsg = _("push creates new remote head %s "
401 "on branch '%s'!") % (short(dhs[0]), branch)
396 "on branch '%s'!") % (short(dhs[0]), branch)
402 elif repo[dhs[0]].bookmarks():
397 elif repo[dhs[0]].bookmarks():
403 errormsg = _("push creates new remote head %s "
398 errormsg = _("push creates new remote head %s "
404 "with bookmark '%s'!") % (
399 "with bookmark '%s'!") % (
405 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
400 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
406 else:
401 else:
407 errormsg = _("push creates new remote head %s!"
402 errormsg = _("push creates new remote head %s!"
408 ) % short(dhs[0])
403 ) % short(dhs[0])
409 if unsyncedheads:
404 if unsyncedheads:
410 hint = _("pull and merge or"
405 hint = _("pull and merge or"
411 " see 'hg help push' for details about"
406 " see 'hg help push' for details about"
412 " pushing new heads")
407 " pushing new heads")
413 else:
408 else:
414 hint = _("merge or"
409 hint = _("merge or"
415 " see 'hg help push' for details about"
410 " see 'hg help push' for details about"
416 " pushing new heads")
411 " pushing new heads")
417 if branch is None:
412 if branch is None:
418 repo.ui.note(_("new remote heads:\n"))
413 repo.ui.note(_("new remote heads:\n"))
419 else:
414 else:
420 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
415 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
421 for h in dhs:
416 for h in dhs:
422 repo.ui.note((" %s\n") % short(h))
417 repo.ui.note((" %s\n") % short(h))
423 if errormsg:
418 if errormsg:
424 raise error.Abort(errormsg, hint=hint)
419 raise error.Abort(errormsg, hint=hint)
425
420
426 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
421 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
427 """post process the list of new heads with obsolescence information
422 """post process the list of new heads with obsolescence information
428
423
429 Exists as a sub-function to contain the complexity and allow extensions to
424 Exists as a sub-function to contain the complexity and allow extensions to
430 experiment with smarter logic.
425 experiment with smarter logic.
431
426
432 Returns (newheads, discarded_heads) tuple
427 Returns (newheads, discarded_heads) tuple
433 """
428 """
434 # known issue
429 # known issue
435 #
430 #
436 # * We "silently" skip processing on all changeset unknown locally
431 # * We "silently" skip processing on all changeset unknown locally
437 #
432 #
438 # * if <nh> is public on the remote, it won't be affected by obsolete
433 # * if <nh> is public on the remote, it won't be affected by obsolete
439 # marker and a new is created
434 # marker and a new is created
440
435
441 # define various utilities and containers
436 # define various utilities and containers
442 repo = pushop.repo
437 repo = pushop.repo
443 unfi = repo.unfiltered()
438 unfi = repo.unfiltered()
444 tonode = unfi.changelog.node
439 tonode = unfi.changelog.node
445 torev = unfi.changelog.nodemap.get
440 torev = unfi.changelog.nodemap.get
446 public = phases.public
441 public = phases.public
447 getphase = unfi._phasecache.phase
442 getphase = unfi._phasecache.phase
448 ispublic = (lambda r: getphase(unfi, r) == public)
443 ispublic = (lambda r: getphase(unfi, r) == public)
449 ispushed = (lambda n: torev(n) in futurecommon)
444 ispushed = (lambda n: torev(n) in futurecommon)
450 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
445 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
451 successorsmarkers = unfi.obsstore.successors
446 successorsmarkers = unfi.obsstore.successors
452 newhs = set() # final set of new heads
447 newhs = set() # final set of new heads
453 discarded = set() # new head of fully replaced branch
448 discarded = set() # new head of fully replaced branch
454
449
455 localcandidate = set() # candidate heads known locally
450 localcandidate = set() # candidate heads known locally
456 unknownheads = set() # candidate heads unknown locally
451 unknownheads = set() # candidate heads unknown locally
457 for h in candidate_newhs:
452 for h in candidate_newhs:
458 if h in unfi:
453 if h in unfi:
459 localcandidate.add(h)
454 localcandidate.add(h)
460 else:
455 else:
461 if successorsmarkers.get(h) is not None:
456 if successorsmarkers.get(h) is not None:
462 msg = ('checkheads: remote head unknown locally has'
457 msg = ('checkheads: remote head unknown locally has'
463 ' local marker: %s\n')
458 ' local marker: %s\n')
464 repo.ui.debug(msg % hex(h))
459 repo.ui.debug(msg % hex(h))
465 unknownheads.add(h)
460 unknownheads.add(h)
466
461
467 # fast path the simple case
462 # fast path the simple case
468 if len(localcandidate) == 1:
463 if len(localcandidate) == 1:
469 return unknownheads | set(candidate_newhs), set()
464 return unknownheads | set(candidate_newhs), set()
470
465
471 # actually process branch replacement
466 # actually process branch replacement
472 while localcandidate:
467 while localcandidate:
473 nh = localcandidate.pop()
468 nh = localcandidate.pop()
474 # run this check early to skip the evaluation of the whole branch
469 # run this check early to skip the evaluation of the whole branch
475 if (torev(nh) in futurecommon or ispublic(torev(nh))):
470 if (torev(nh) in futurecommon or ispublic(torev(nh))):
476 newhs.add(nh)
471 newhs.add(nh)
477 continue
472 continue
478
473
479 # Get all revs/nodes on the branch exclusive to this head
474 # Get all revs/nodes on the branch exclusive to this head
480 # (already filtered heads are "ignored"))
475 # (already filtered heads are "ignored"))
481 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
476 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
482 nh, localcandidate, newhs)
477 nh, localcandidate, newhs)
483 branchnodes = [tonode(r) for r in branchrevs]
478 branchnodes = [tonode(r) for r in branchrevs]
484
479
485 # The branch won't be hidden on the remote if
480 # The branch won't be hidden on the remote if
486 # * any part of it is public,
481 # * any part of it is public,
487 # * any part of it is considered part of the result by previous logic,
482 # * any part of it is considered part of the result by previous logic,
488 # * if we have no markers to push to obsolete it.
483 # * if we have no markers to push to obsolete it.
489 if (any(ispublic(r) for r in branchrevs)
484 if (any(ispublic(r) for r in branchrevs)
490 or any(torev(n) in futurecommon for n in branchnodes)
485 or any(torev(n) in futurecommon for n in branchnodes)
491 or any(not hasoutmarker(n) for n in branchnodes)):
486 or any(not hasoutmarker(n) for n in branchnodes)):
492 newhs.add(nh)
487 newhs.add(nh)
493 else:
488 else:
494 # note: there is a corner case if there is a merge in the branch.
489 # note: there is a corner case if there is a merge in the branch.
495 # we might end up with -more- heads. However, these heads are not
490 # we might end up with -more- heads. However, these heads are not
496 # "added" by the push, but more by the "removal" on the remote so I
491 # "added" by the push, but more by the "removal" on the remote so I
497 # think is a okay to ignore them,
492 # think is a okay to ignore them,
498 discarded.add(nh)
493 discarded.add(nh)
499 newhs |= unknownheads
494 newhs |= unknownheads
500 return newhs, discarded
495 return newhs, discarded
501
496
502 def pushingmarkerfor(obsstore, ispushed, node):
497 def pushingmarkerfor(obsstore, ispushed, node):
503 """true if some markers are to be pushed for node
498 """true if some markers are to be pushed for node
504
499
505 We cannot just look in to the pushed obsmarkers from the pushop because
500 We cannot just look in to the pushed obsmarkers from the pushop because
506 discovery might have filtered relevant markers. In addition listing all
501 discovery might have filtered relevant markers. In addition listing all
507 markers relevant to all changesets in the pushed set would be too expensive
502 markers relevant to all changesets in the pushed set would be too expensive
508 (O(len(repo)))
503 (O(len(repo)))
509
504
510 (note: There are cache opportunity in this function. but it would requires
505 (note: There are cache opportunity in this function. but it would requires
511 a two dimensional stack.)
506 a two dimensional stack.)
512 """
507 """
513 successorsmarkers = obsstore.successors
508 successorsmarkers = obsstore.successors
514 stack = [node]
509 stack = [node]
515 seen = set(stack)
510 seen = set(stack)
516 while stack:
511 while stack:
517 current = stack.pop()
512 current = stack.pop()
518 if ispushed(current):
513 if ispushed(current):
519 return True
514 return True
520 markers = successorsmarkers.get(current, ())
515 markers = successorsmarkers.get(current, ())
521 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
516 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
522 for m in markers:
517 for m in markers:
523 nexts = m[1] # successors
518 nexts = m[1] # successors
524 if not nexts: # this is a prune marker
519 if not nexts: # this is a prune marker
525 nexts = m[5] or () # parents
520 nexts = m[5] or () # parents
526 for n in nexts:
521 for n in nexts:
527 if n not in seen:
522 if n not in seen:
528 seen.add(n)
523 seen.add(n)
529 stack.append(n)
524 stack.append(n)
530 return False
525 return False
General Comments 0
You need to be logged in to leave comments. Login now