##// END OF EJS Templates
checkheads: simplify the structure build by preprocessing...
Pierre-Yves David -
r17211:4f321eec default
parent child Browse files
Show More
@@ -1,325 +1,343 b''
1 1 # discovery.py - protocol changeset discovery functions
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, short
9 9 from i18n import _
10 10 import util, setdiscovery, treediscovery, phases
11 11
12 12 def findcommonincoming(repo, remote, heads=None, force=False):
13 13 """Return a tuple (common, anyincoming, heads) used to identify the common
14 14 subset of nodes between repo and remote.
15 15
16 16 "common" is a list of (at least) the heads of the common subset.
17 17 "anyincoming" is testable as a boolean indicating if any nodes are missing
18 18 locally. If remote does not support getbundle, this actually is a list of
19 19 roots of the nodes that would be incoming, to be supplied to
20 20 changegroupsubset. No code except for pull should be relying on this fact
21 21 any longer.
22 22 "heads" is either the supplied heads, or else the remote's heads.
23 23
24 24 If you pass heads and they are all known locally, the reponse lists justs
25 25 these heads in "common" and in "heads".
26 26
27 27 Please use findcommonoutgoing to compute the set of outgoing nodes to give
28 28 extensions a good hook into outgoing.
29 29 """
30 30
31 31 if not remote.capable('getbundle'):
32 32 return treediscovery.findcommonincoming(repo, remote, heads, force)
33 33
34 34 if heads:
35 35 allknown = True
36 36 nm = repo.changelog.nodemap
37 37 for h in heads:
38 38 if nm.get(h) is None:
39 39 allknown = False
40 40 break
41 41 if allknown:
42 42 return (heads, False, heads)
43 43
44 44 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
45 45 abortwhenunrelated=not force)
46 46 common, anyinc, srvheads = res
47 47 return (list(common), anyinc, heads or list(srvheads))
48 48
49 49 class outgoing(object):
50 50 '''Represents the set of nodes present in a local repo but not in a
51 51 (possibly) remote one.
52 52
53 53 Members:
54 54
55 55 missing is a list of all nodes present in local but not in remote.
56 56 common is a list of all nodes shared between the two repos.
57 57 excluded is the list of missing changeset that shouldn't be sent remotely.
58 58 missingheads is the list of heads of missing.
59 59 commonheads is the list of heads of common.
60 60
61 61 The sets are computed on demand from the heads, unless provided upfront
62 62 by discovery.'''
63 63
64 64 def __init__(self, revlog, commonheads, missingheads):
65 65 self.commonheads = commonheads
66 66 self.missingheads = missingheads
67 67 self._revlog = revlog
68 68 self._common = None
69 69 self._missing = None
70 70 self.excluded = []
71 71
72 72 def _computecommonmissing(self):
73 73 sets = self._revlog.findcommonmissing(self.commonheads,
74 74 self.missingheads)
75 75 self._common, self._missing = sets
76 76
77 77 @util.propertycache
78 78 def common(self):
79 79 if self._common is None:
80 80 self._computecommonmissing()
81 81 return self._common
82 82
83 83 @util.propertycache
84 84 def missing(self):
85 85 if self._missing is None:
86 86 self._computecommonmissing()
87 87 return self._missing
88 88
89 89 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
90 90 commoninc=None, portable=False):
91 91 '''Return an outgoing instance to identify the nodes present in repo but
92 92 not in other.
93 93
94 94 If onlyheads is given, only nodes ancestral to nodes in onlyheads
95 95 (inclusive) are included. If you already know the local repo's heads,
96 96 passing them in onlyheads is faster than letting them be recomputed here.
97 97
98 98 If commoninc is given, it must the the result of a prior call to
99 99 findcommonincoming(repo, other, force) to avoid recomputing it here.
100 100
101 101 If portable is given, compute more conservative common and missingheads,
102 102 to make bundles created from the instance more portable.'''
103 103 # declare an empty outgoing object to be filled later
104 104 og = outgoing(repo.changelog, None, None)
105 105
106 106 # get common set if not provided
107 107 if commoninc is None:
108 108 commoninc = findcommonincoming(repo, other, force=force)
109 109 og.commonheads, _any, _hds = commoninc
110 110
111 111 # compute outgoing
112 112 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
113 113 if not mayexclude:
114 114 og.missingheads = onlyheads or repo.heads()
115 115 elif onlyheads is None:
116 116 # use visible heads as it should be cached
117 117 og.missingheads = visibleheads(repo)
118 118 # extinct changesets are silently ignored
119 119 og.excluded = [ctx.node() for ctx in repo.set('secret()')]
120 120 else:
121 121 # compute common, missing and exclude secret stuff
122 122 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
123 123 og._common, allmissing = sets
124 124 og._missing = missing = []
125 125 og.excluded = excluded = []
126 126 for node in allmissing:
127 127 ctx = repo[node]
128 128 if not ctx.extinct():
129 129 # extinct changesets are silently ignored
130 130 if ctx.phase() >= phases.secret:
131 131 excluded.append(node)
132 132 else:
133 133 missing.append(node)
134 134 if len(missing) == len(allmissing):
135 135 missingheads = onlyheads
136 136 else: # update missing heads
137 137 missingheads = phases.newheads(repo, onlyheads, excluded)
138 138 og.missingheads = missingheads
139 139 if portable:
140 140 # recompute common and missingheads as if -r<rev> had been given for
141 141 # each head of missing, and --base <rev> for each head of the proper
142 142 # ancestors of missing
143 143 og._computecommonmissing()
144 144 cl = repo.changelog
145 145 missingrevs = set(cl.rev(n) for n in og._missing)
146 146 og._common = set(cl.ancestors(missingrevs)) - missingrevs
147 147 commonheads = set(og.commonheads)
148 148 og.missingheads = [h for h in og.missingheads if h not in commonheads]
149 149
150 150 return og
151 151
152 def _branchmapsummary(repo, remote, outgoing):
152 def _headssummary(repo, remote, outgoing):
153 153 """compute a summary of branch and heads status before and after push
154 154
155 - oldmap: {'branch': [heads]} mapping for remote
156 - newmap: {'branch': [heads]} mapping for local
157 - unsynced: set of branch that have unsynced remote changes
158 - branches: set of all common branch pushed
159 - newbranches: list of plain new pushed branch
155 return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
156
157 - branch: the branch name
158 - remoteheads: the list of remote heads known locally
159 None is the branch is new
160 - newheads: the new remote heads (known locally) with outgoing pushed
161 - unsyncedheads: the list of remote heads unknown locally.
160 162 """
161 163 cl = repo.changelog
162
164 headssum = {}
163 165 # A. Create set of branches involved in the push.
164 166 branches = set(repo[n].branch() for n in outgoing.missing)
165 167 remotemap = remote.branchmap()
166 168 newbranches = branches - set(remotemap)
167 169 branches.difference_update(newbranches)
168 170
169 # B. Construct the initial oldmap and newmap dicts.
170 # They contain information about the remote heads before and
171 # after the push, respectively.
172 # Heads not found locally are not included in either dict,
173 # since they won't be affected by the push.
174 # unsynced contains all branches with incoming changesets.
175 oldmap = {}
176 newmap = {}
177 unsynced = set()
178 for branch in branches:
179 remotebrheads = remotemap[branch]
171 # A. register remote heads
172 remotebranches = set()
173 for branch, heads in remote.branchmap().iteritems():
174 remotebranches.add(branch)
175 known = []
176 unsynced = []
177 for h in heads:
178 if h in cl.nodemap:
179 known.append(h)
180 else:
181 unsynced.append(h)
182 headssum[branch] = (known, list(known), unsynced)
183 # B. add new branch data
184 missingctx = list(repo[n] for n in outgoing.missing)
185 touchedbranches = set()
186 for ctx in missingctx:
187 branch = ctx.branch()
188 touchedbranches.add(branch)
189 if branch not in headssum:
190 headssum[branch] = (None, [], [])
180 191
181 prunedbrheads = [h for h in remotebrheads if h in cl.nodemap]
182 oldmap[branch] = prunedbrheads
183 newmap[branch] = list(prunedbrheads)
184 if len(remotebrheads) > len(prunedbrheads):
185 unsynced.add(branch)
192 # C drop data about untouched branches:
193 for branch in remotebranches - touchedbranches:
194 del headssum[branch]
186 195
187 # C. Update newmap with outgoing changes.
196 # D. Update newmap with outgoing changes.
188 197 # This will possibly add new heads and remove existing ones.
189 ctxgen = (repo[n] for n in outgoing.missing)
190 repo._updatebranchcache(newmap, ctxgen)
191 return oldmap, newmap, unsynced, branches, newbranches
198 newmap = dict((branch, heads[1]) for branch, heads in headssum.iteritems()
199 if heads[0] is not None)
200 repo._updatebranchcache(newmap, missingctx)
201 for branch, newheads in newmap.iteritems():
202 headssum[branch][1][:] = newheads
203 return headssum
192 204
193 def _oldbranchmapsummary(repo, remoteheads, outgoing, inc=False):
205 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
194 206 """Compute branchmapsummary for repo without branchmap support"""
195 207
196 208 cl = repo.changelog
197 209 # 1-4b. old servers: Check for new topological heads.
198 210 # Construct {old,new}map with branch = None (topological branch).
199 211 # (code based on _updatebranchcache)
200 212 oldheads = set(h for h in remoteheads if h in cl.nodemap)
201 213 # all nodes in outgoing.missing are children of either:
202 214 # - an element of oldheads
203 215 # - another element of outgoing.missing
204 216 # - nullrev
205 217 # This explains why the new head are very simple to compute.
206 218 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
207 branches = set([None])
208 newmap = {None: list(c.node() for c in r)}
209 oldmap = {None: oldheads}
210 unsynced = inc and branches or set()
211 return oldmap, newmap, unsynced, branches, set()
219 newheads = list(c.node() for c in r)
220 unsynced = inc and set([None]) or set()
221 return {None: (oldheads, newheads, unsynced)}
212 222
213 223 def checkheads(repo, remote, outgoing, remoteheads, newbranch=False, inc=False):
214 224 """Check that a push won't add any outgoing head
215 225
216 226 raise Abort error and display ui message as needed.
217 227 """
218 228 # Check for each named branch if we're creating new remote heads.
219 229 # To be a remote head after push, node must be either:
220 230 # - unknown locally
221 231 # - a local outgoing head descended from update
222 232 # - a remote head that's known locally and not
223 233 # ancestral to an outgoing head
224 234 if remoteheads == [nullid]:
225 235 # remote is empty, nothing to check.
226 236 return
227 237
228 238 if remote.capable('branchmap'):
229 bms = _branchmapsummary(repo, remote, outgoing)
239 headssum = _headssummary(repo, remote, outgoing)
230 240 else:
231 bms = _oldbranchmapsummary(repo, remoteheads, outgoing, inc)
232 oldmap, newmap, unsynced, branches, newbranches = bms
241 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
242 newbranches = [branch for branch, heads in headssum.iteritems()
243 if heads[0] is None]
233 244 # 1. Check for new branches on the remote.
234 245 if newbranches and not newbranch: # new branch requires --new-branch
235 246 branchnames = ', '.join(sorted(newbranches))
236 247 raise util.Abort(_("push creates new remote branches: %s!")
237 248 % branchnames,
238 249 hint=_("use 'hg push --new-branch' to create"
239 250 " new remote branches"))
240 251
241 252 # 2. Check for new heads.
242 253 # If there are more heads after the push than before, a suitable
243 254 # error message, depending on unsynced status, is displayed.
244 255 error = None
245 256 localbookmarks = repo._bookmarks
246 257
247 for branch in branches:
248 newhs = set(newmap[branch])
249 oldhs = set(oldmap[branch])
258 unsynced = False
259 for branch, heads in headssum.iteritems():
260 if heads[0] is None:
261 # Maybe we should abort if we push more that one head
262 # for new branches ?
263 continue
264 if heads[2]:
265 unsynced = True
266 oldhs = set(heads[0])
267 newhs = set(heads[1])
250 268 dhs = None
251 269 if len(newhs) > len(oldhs):
252 # strip updates to existing remote heads from the new heads list
253 270 remotebookmarks = remote.listkeys('bookmarks')
254 271 bookmarkedheads = set()
255 272 for bm in localbookmarks:
256 273 rnode = remotebookmarks.get(bm)
257 274 if rnode and rnode in repo:
258 275 lctx, rctx = repo[bm], repo[rnode]
259 276 if rctx == lctx.ancestor(rctx):
260 277 bookmarkedheads.add(lctx.node())
278 # strip updates to existing remote heads from the new heads list
261 279 dhs = list(newhs - bookmarkedheads - oldhs)
262 280 if dhs:
263 281 if error is None:
264 282 if branch not in ('default', None):
265 283 error = _("push creates new remote head %s "
266 284 "on branch '%s'!") % (short(dhs[0]), branch)
267 285 else:
268 286 error = _("push creates new remote head %s!"
269 287 ) % short(dhs[0])
270 if branch in unsynced:
288 if heads[2]: # unsynced
271 289 hint = _("you should pull and merge or "
272 290 "use push -f to force")
273 291 else:
274 292 hint = _("did you forget to merge? "
275 293 "use push -f to force")
276 294 if branch is not None:
277 295 repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
278 296 for h in dhs:
279 297 repo.ui.note(_("new remote head %s\n") % short(h))
280 298 if error:
281 299 raise util.Abort(error, hint=hint)
282 300
283 301 # 6. Check for unsynced changes on involved branches.
284 302 if unsynced:
285 303 repo.ui.warn(_("note: unsynced remote changes!\n"))
286 304
287 305 def visibleheads(repo):
288 306 """return the set of visible head of this repo"""
289 307 # XXX we want a cache on this
290 308 sroots = repo._phasecache.phaseroots[phases.secret]
291 309 if sroots or repo.obsstore:
292 310 # XXX very slow revset. storing heads or secret "boundary"
293 311 # would help.
294 312 revset = repo.set('heads(not (%ln:: + extinct()))', sroots)
295 313
296 314 vheads = [ctx.node() for ctx in revset]
297 315 if not vheads:
298 316 vheads.append(nullid)
299 317 else:
300 318 vheads = repo.heads()
301 319 return vheads
302 320
303 321
304 322 def visiblebranchmap(repo):
305 323 """return a branchmap for the visible set"""
306 324 # XXX Recomputing this data on the fly is very slow. We should build a
307 325 # XXX cached version while computin the standard branchmap version.
308 326 sroots = repo._phasecache.phaseroots[phases.secret]
309 327 if sroots or repo.obsstore:
310 328 vbranchmap = {}
311 329 for branch, nodes in repo.branchmap().iteritems():
312 330 # search for secret heads.
313 331 for n in nodes:
314 332 if repo[n].phase() >= phases.secret:
315 333 nodes = None
316 334 break
317 335 # if secret heads were found we must compute them again
318 336 if nodes is None:
319 337 s = repo.set('heads(branch(%s) - secret() - extinct())',
320 338 branch)
321 339 nodes = [c.node() for c in s]
322 340 vbranchmap[branch] = nodes
323 341 else:
324 342 vbranchmap = repo.branchmap()
325 343 return vbranchmap
General Comments 0
You need to be logged in to leave comments. Login now