##// END OF EJS Templates
checkheads: extract branchmap preprocessing...
Pierre-Yves David -
r17209:5cd3e526 default
parent child Browse files
Show More
@@ -1,306 +1,325
1 1 # discovery.py - protocol changeset discovery functions
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, short
9 9 from i18n import _
10 10 import util, setdiscovery, treediscovery, phases
11 11
12 12 def findcommonincoming(repo, remote, heads=None, force=False):
13 13 """Return a tuple (common, anyincoming, heads) used to identify the common
14 14 subset of nodes between repo and remote.
15 15
16 16 "common" is a list of (at least) the heads of the common subset.
17 17 "anyincoming" is testable as a boolean indicating if any nodes are missing
18 18 locally. If remote does not support getbundle, this actually is a list of
19 19 roots of the nodes that would be incoming, to be supplied to
20 20 changegroupsubset. No code except for pull should be relying on this fact
21 21 any longer.
22 22 "heads" is either the supplied heads, or else the remote's heads.
23 23
24 24 If you pass heads and they are all known locally, the reponse lists justs
25 25 these heads in "common" and in "heads".
26 26
27 27 Please use findcommonoutgoing to compute the set of outgoing nodes to give
28 28 extensions a good hook into outgoing.
29 29 """
30 30
31 31 if not remote.capable('getbundle'):
32 32 return treediscovery.findcommonincoming(repo, remote, heads, force)
33 33
34 34 if heads:
35 35 allknown = True
36 36 nm = repo.changelog.nodemap
37 37 for h in heads:
38 38 if nm.get(h) is None:
39 39 allknown = False
40 40 break
41 41 if allknown:
42 42 return (heads, False, heads)
43 43
44 44 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
45 45 abortwhenunrelated=not force)
46 46 common, anyinc, srvheads = res
47 47 return (list(common), anyinc, heads or list(srvheads))
48 48
49 49 class outgoing(object):
50 50 '''Represents the set of nodes present in a local repo but not in a
51 51 (possibly) remote one.
52 52
53 53 Members:
54 54
55 55 missing is a list of all nodes present in local but not in remote.
56 56 common is a list of all nodes shared between the two repos.
57 57 excluded is the list of missing changeset that shouldn't be sent remotely.
58 58 missingheads is the list of heads of missing.
59 59 commonheads is the list of heads of common.
60 60
61 61 The sets are computed on demand from the heads, unless provided upfront
62 62 by discovery.'''
63 63
64 64 def __init__(self, revlog, commonheads, missingheads):
65 65 self.commonheads = commonheads
66 66 self.missingheads = missingheads
67 67 self._revlog = revlog
68 68 self._common = None
69 69 self._missing = None
70 70 self.excluded = []
71 71
72 72 def _computecommonmissing(self):
73 73 sets = self._revlog.findcommonmissing(self.commonheads,
74 74 self.missingheads)
75 75 self._common, self._missing = sets
76 76
77 77 @util.propertycache
78 78 def common(self):
79 79 if self._common is None:
80 80 self._computecommonmissing()
81 81 return self._common
82 82
83 83 @util.propertycache
84 84 def missing(self):
85 85 if self._missing is None:
86 86 self._computecommonmissing()
87 87 return self._missing
88 88
89 89 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
90 90 commoninc=None, portable=False):
91 91 '''Return an outgoing instance to identify the nodes present in repo but
92 92 not in other.
93 93
94 94 If onlyheads is given, only nodes ancestral to nodes in onlyheads
95 95 (inclusive) are included. If you already know the local repo's heads,
96 96 passing them in onlyheads is faster than letting them be recomputed here.
97 97
98 98 If commoninc is given, it must the the result of a prior call to
99 99 findcommonincoming(repo, other, force) to avoid recomputing it here.
100 100
101 101 If portable is given, compute more conservative common and missingheads,
102 102 to make bundles created from the instance more portable.'''
103 103 # declare an empty outgoing object to be filled later
104 104 og = outgoing(repo.changelog, None, None)
105 105
106 106 # get common set if not provided
107 107 if commoninc is None:
108 108 commoninc = findcommonincoming(repo, other, force=force)
109 109 og.commonheads, _any, _hds = commoninc
110 110
111 111 # compute outgoing
112 112 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
113 113 if not mayexclude:
114 114 og.missingheads = onlyheads or repo.heads()
115 115 elif onlyheads is None:
116 116 # use visible heads as it should be cached
117 117 og.missingheads = visibleheads(repo)
118 118 # extinct changesets are silently ignored
119 119 og.excluded = [ctx.node() for ctx in repo.set('secret()')]
120 120 else:
121 121 # compute common, missing and exclude secret stuff
122 122 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
123 123 og._common, allmissing = sets
124 124 og._missing = missing = []
125 125 og.excluded = excluded = []
126 126 for node in allmissing:
127 127 ctx = repo[node]
128 128 if not ctx.extinct():
129 129 # extinct changesets are silently ignored
130 130 if ctx.phase() >= phases.secret:
131 131 excluded.append(node)
132 132 else:
133 133 missing.append(node)
134 134 if len(missing) == len(allmissing):
135 135 missingheads = onlyheads
136 136 else: # update missing heads
137 137 missingheads = phases.newheads(repo, onlyheads, excluded)
138 138 og.missingheads = missingheads
139 139 if portable:
140 140 # recompute common and missingheads as if -r<rev> had been given for
141 141 # each head of missing, and --base <rev> for each head of the proper
142 142 # ancestors of missing
143 143 og._computecommonmissing()
144 144 cl = repo.changelog
145 145 missingrevs = set(cl.rev(n) for n in og._missing)
146 146 og._common = set(cl.ancestors(missingrevs)) - missingrevs
147 147 commonheads = set(og.commonheads)
148 148 og.missingheads = [h for h in og.missingheads if h not in commonheads]
149 149
150 150 return og
151 151
152 def _branchmapsummary(repo, remote, outgoing):
153 """compute a summary of branch and heads status before and after push
154
155 - oldmap: {'branch': [heads]} mapping for remote
156 - newmap: {'branch': [heads]} mapping for local
157 - unsynced: set of branch that have unsynced remote changes
158 - branches: set of all common branch pushed
159 - newbranches: list of plain new pushed branch
160 """
161 cl = repo.changelog
162
163 # A. Create set of branches involved in the push.
164 branches = set(repo[n].branch() for n in outgoing.missing)
165 remotemap = remote.branchmap()
166 newbranches = branches - set(remotemap)
167 branches.difference_update(newbranches)
168
169 # B. Construct the initial oldmap and newmap dicts.
170 # They contain information about the remote heads before and
171 # after the push, respectively.
172 # Heads not found locally are not included in either dict,
173 # since they won't be affected by the push.
174 # unsynced contains all branches with incoming changesets.
175 oldmap = {}
176 newmap = {}
177 unsynced = set()
178 for branch in branches:
179 remotebrheads = remotemap[branch]
180
181 prunedbrheads = [h for h in remotebrheads if h in cl.nodemap]
182 oldmap[branch] = prunedbrheads
183 newmap[branch] = list(prunedbrheads)
184 if len(remotebrheads) > len(prunedbrheads):
185 unsynced.add(branch)
186
187 # C. Update newmap with outgoing changes.
188 # This will possibly add new heads and remove existing ones.
189 ctxgen = (repo[n] for n in outgoing.missing)
190 repo._updatebranchcache(newmap, ctxgen)
191 return oldmap, newmap, unsynced, branches, newbranches
192
193 def _oldbranchmapsummary(repo, remoteheads, outgoing, inc=False):
194 """Compute branchmapsummary for repo without branchmap support"""
195
196 cl = repo.changelog
197 # 1-4b. old servers: Check for new topological heads.
198 # Construct {old,new}map with branch = None (topological branch).
199 # (code based on _updatebranchcache)
200 oldheads = set(h for h in remoteheads if h in cl.nodemap)
201 # all nodes in outgoing.missing are children of either:
202 # - an element of oldheads
203 # - another element of outgoing.missing
204 # - nullrev
205 # This explains why the new head are very simple to compute.
206 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
207 branches = set([None])
208 newmap = {None: list(c.node() for c in r)}
209 oldmap = {None: oldheads}
210 unsynced = inc and branches or set()
211 return oldmap, newmap, unsynced, branches, set()
212
152 213 def checkheads(repo, remote, outgoing, remoteheads, newbranch=False, inc=False):
153 214 """Check that a push won't add any outgoing head
154 215
155 216 raise Abort error and display ui message as needed.
156 217 """
218 # Check for each named branch if we're creating new remote heads.
219 # To be a remote head after push, node must be either:
220 # - unknown locally
221 # - a local outgoing head descended from update
222 # - a remote head that's known locally and not
223 # ancestral to an outgoing head
157 224 if remoteheads == [nullid]:
158 225 # remote is empty, nothing to check.
159 226 return
160 227
161 cl = repo.changelog
162 228 if remote.capable('branchmap'):
163 # Check for each named branch if we're creating new remote heads.
164 # To be a remote head after push, node must be either:
165 # - unknown locally
166 # - a local outgoing head descended from update
167 # - a remote head that's known locally and not
168 # ancestral to an outgoing head
169
170 # 1. Create set of branches involved in the push.
171 branches = set(repo[n].branch() for n in outgoing.missing)
172
173 # 2. Check for new branches on the remote.
174 remotemap = remote.branchmap()
175 newbranches = branches - set(remotemap)
176 if newbranches and not newbranch: # new branch requires --new-branch
177 branchnames = ', '.join(sorted(newbranches))
178 raise util.Abort(_("push creates new remote branches: %s!")
179 % branchnames,
180 hint=_("use 'hg push --new-branch' to create"
181 " new remote branches"))
182 branches.difference_update(newbranches)
229 bms = _branchmapsummary(repo, remote, outgoing)
230 else:
231 bms = _oldbranchmapsummary(repo, remoteheads, outgoing, inc)
232 oldmap, newmap, unsynced, branches, newbranches = bms
233 # 1. Check for new branches on the remote.
234 if newbranches and not newbranch: # new branch requires --new-branch
235 branchnames = ', '.join(sorted(newbranches))
236 raise util.Abort(_("push creates new remote branches: %s!")
237 % branchnames,
238 hint=_("use 'hg push --new-branch' to create"
239 " new remote branches"))
183 240
184 # 3. Construct the initial oldmap and newmap dicts.
185 # They contain information about the remote heads before and
186 # after the push, respectively.
187 # Heads not found locally are not included in either dict,
188 # since they won't be affected by the push.
189 # unsynced contains all branches with incoming changesets.
190 oldmap = {}
191 newmap = {}
192 unsynced = set()
193 for branch in branches:
194 remotebrheads = remotemap[branch]
195 prunedbrheads = [h for h in remotebrheads if h in cl.nodemap]
196 oldmap[branch] = prunedbrheads
197 newmap[branch] = list(prunedbrheads)
198 if len(remotebrheads) > len(prunedbrheads):
199 unsynced.add(branch)
200
201 # 4. Update newmap with outgoing changes.
202 # This will possibly add new heads and remove existing ones.
203 ctxgen = (repo[n] for n in outgoing.missing)
204 repo._updatebranchcache(newmap, ctxgen)
205
206 else:
207 # 1-4b. old servers: Check for new topological heads.
208 # Construct {old,new}map with branch = None (topological branch).
209 # (code based on _updatebranchcache)
210 oldheads = set(h for h in remoteheads if h in cl.nodemap)
211 # all nodes in outgoing.missing are children of either:
212 # - an element of oldheads
213 # - another element of outgoing.missing
214 # - nullrev
215 # This explains why the new head are very simple to compute.
216 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
217 branches = set([None])
218 newmap = {None: list(c.node() for c in r)}
219 oldmap = {None: oldheads}
220 unsynced = inc and branches or set()
221
222 # 5. Check for new heads.
241 # 2. Check for new heads.
223 242 # If there are more heads after the push than before, a suitable
224 243 # error message, depending on unsynced status, is displayed.
225 244 error = None
226 245 localbookmarks = repo._bookmarks
227 246
228 247 for branch in branches:
229 248 newhs = set(newmap[branch])
230 249 oldhs = set(oldmap[branch])
231 250 dhs = None
232 251 if len(newhs) > len(oldhs):
233 252 # strip updates to existing remote heads from the new heads list
234 253 remotebookmarks = remote.listkeys('bookmarks')
235 254 bookmarkedheads = set()
236 255 for bm in localbookmarks:
237 256 rnode = remotebookmarks.get(bm)
238 257 if rnode and rnode in repo:
239 258 lctx, rctx = repo[bm], repo[rnode]
240 259 if rctx == lctx.ancestor(rctx):
241 260 bookmarkedheads.add(lctx.node())
242 261 dhs = list(newhs - bookmarkedheads - oldhs)
243 262 if dhs:
244 263 if error is None:
245 264 if branch not in ('default', None):
246 265 error = _("push creates new remote head %s "
247 266 "on branch '%s'!") % (short(dhs[0]), branch)
248 267 else:
249 268 error = _("push creates new remote head %s!"
250 269 ) % short(dhs[0])
251 270 if branch in unsynced:
252 271 hint = _("you should pull and merge or "
253 272 "use push -f to force")
254 273 else:
255 274 hint = _("did you forget to merge? "
256 275 "use push -f to force")
257 276 if branch is not None:
258 277 repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
259 278 for h in dhs:
260 279 repo.ui.note(_("new remote head %s\n") % short(h))
261 280 if error:
262 281 raise util.Abort(error, hint=hint)
263 282
264 283 # 6. Check for unsynced changes on involved branches.
265 284 if unsynced:
266 285 repo.ui.warn(_("note: unsynced remote changes!\n"))
267 286
268 287 def visibleheads(repo):
269 288 """return the set of visible head of this repo"""
270 289 # XXX we want a cache on this
271 290 sroots = repo._phasecache.phaseroots[phases.secret]
272 291 if sroots or repo.obsstore:
273 292 # XXX very slow revset. storing heads or secret "boundary"
274 293 # would help.
275 294 revset = repo.set('heads(not (%ln:: + extinct()))', sroots)
276 295
277 296 vheads = [ctx.node() for ctx in revset]
278 297 if not vheads:
279 298 vheads.append(nullid)
280 299 else:
281 300 vheads = repo.heads()
282 301 return vheads
283 302
284 303
285 304 def visiblebranchmap(repo):
286 305 """return a branchmap for the visible set"""
287 306 # XXX Recomputing this data on the fly is very slow. We should build a
288 307 # XXX cached version while computin the standard branchmap version.
289 308 sroots = repo._phasecache.phaseroots[phases.secret]
290 309 if sroots or repo.obsstore:
291 310 vbranchmap = {}
292 311 for branch, nodes in repo.branchmap().iteritems():
293 312 # search for secret heads.
294 313 for n in nodes:
295 314 if repo[n].phase() >= phases.secret:
296 315 nodes = None
297 316 break
298 317 # if secret heads were found we must compute them again
299 318 if nodes is None:
300 319 s = repo.set('heads(branch(%s) - secret() - extinct())',
301 320 branch)
302 321 nodes = [c.node() for c in s]
303 322 vbranchmap[branch] = nodes
304 323 else:
305 324 vbranchmap = repo.branchmap()
306 325 return vbranchmap
General Comments 0
You need to be logged in to leave comments. Login now