##// END OF EJS Templates
checkheads: extract bookmark computation from the branch loop...
Pierre-Yves David -
r17212:246131d6 default
parent child Browse files
Show More
@@ -1,343 +1,345 b''
1 1 # discovery.py - protocol changeset discovery functions
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, short
9 9 from i18n import _
10 10 import util, setdiscovery, treediscovery, phases
11 11
12 12 def findcommonincoming(repo, remote, heads=None, force=False):
13 13 """Return a tuple (common, anyincoming, heads) used to identify the common
14 14 subset of nodes between repo and remote.
15 15
16 16 "common" is a list of (at least) the heads of the common subset.
17 17 "anyincoming" is testable as a boolean indicating if any nodes are missing
18 18 locally. If remote does not support getbundle, this actually is a list of
19 19 roots of the nodes that would be incoming, to be supplied to
20 20 changegroupsubset. No code except for pull should be relying on this fact
21 21 any longer.
22 22 "heads" is either the supplied heads, or else the remote's heads.
23 23
24 24 If you pass heads and they are all known locally, the reponse lists justs
25 25 these heads in "common" and in "heads".
26 26
27 27 Please use findcommonoutgoing to compute the set of outgoing nodes to give
28 28 extensions a good hook into outgoing.
29 29 """
30 30
31 31 if not remote.capable('getbundle'):
32 32 return treediscovery.findcommonincoming(repo, remote, heads, force)
33 33
34 34 if heads:
35 35 allknown = True
36 36 nm = repo.changelog.nodemap
37 37 for h in heads:
38 38 if nm.get(h) is None:
39 39 allknown = False
40 40 break
41 41 if allknown:
42 42 return (heads, False, heads)
43 43
44 44 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
45 45 abortwhenunrelated=not force)
46 46 common, anyinc, srvheads = res
47 47 return (list(common), anyinc, heads or list(srvheads))
48 48
49 49 class outgoing(object):
50 50 '''Represents the set of nodes present in a local repo but not in a
51 51 (possibly) remote one.
52 52
53 53 Members:
54 54
55 55 missing is a list of all nodes present in local but not in remote.
56 56 common is a list of all nodes shared between the two repos.
57 57 excluded is the list of missing changeset that shouldn't be sent remotely.
58 58 missingheads is the list of heads of missing.
59 59 commonheads is the list of heads of common.
60 60
61 61 The sets are computed on demand from the heads, unless provided upfront
62 62 by discovery.'''
63 63
64 64 def __init__(self, revlog, commonheads, missingheads):
65 65 self.commonheads = commonheads
66 66 self.missingheads = missingheads
67 67 self._revlog = revlog
68 68 self._common = None
69 69 self._missing = None
70 70 self.excluded = []
71 71
72 72 def _computecommonmissing(self):
73 73 sets = self._revlog.findcommonmissing(self.commonheads,
74 74 self.missingheads)
75 75 self._common, self._missing = sets
76 76
77 77 @util.propertycache
78 78 def common(self):
79 79 if self._common is None:
80 80 self._computecommonmissing()
81 81 return self._common
82 82
83 83 @util.propertycache
84 84 def missing(self):
85 85 if self._missing is None:
86 86 self._computecommonmissing()
87 87 return self._missing
88 88
89 89 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
90 90 commoninc=None, portable=False):
91 91 '''Return an outgoing instance to identify the nodes present in repo but
92 92 not in other.
93 93
94 94 If onlyheads is given, only nodes ancestral to nodes in onlyheads
95 95 (inclusive) are included. If you already know the local repo's heads,
96 96 passing them in onlyheads is faster than letting them be recomputed here.
97 97
98 98 If commoninc is given, it must the the result of a prior call to
99 99 findcommonincoming(repo, other, force) to avoid recomputing it here.
100 100
101 101 If portable is given, compute more conservative common and missingheads,
102 102 to make bundles created from the instance more portable.'''
103 103 # declare an empty outgoing object to be filled later
104 104 og = outgoing(repo.changelog, None, None)
105 105
106 106 # get common set if not provided
107 107 if commoninc is None:
108 108 commoninc = findcommonincoming(repo, other, force=force)
109 109 og.commonheads, _any, _hds = commoninc
110 110
111 111 # compute outgoing
112 112 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
113 113 if not mayexclude:
114 114 og.missingheads = onlyheads or repo.heads()
115 115 elif onlyheads is None:
116 116 # use visible heads as it should be cached
117 117 og.missingheads = visibleheads(repo)
118 118 # extinct changesets are silently ignored
119 119 og.excluded = [ctx.node() for ctx in repo.set('secret()')]
120 120 else:
121 121 # compute common, missing and exclude secret stuff
122 122 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
123 123 og._common, allmissing = sets
124 124 og._missing = missing = []
125 125 og.excluded = excluded = []
126 126 for node in allmissing:
127 127 ctx = repo[node]
128 128 if not ctx.extinct():
129 129 # extinct changesets are silently ignored
130 130 if ctx.phase() >= phases.secret:
131 131 excluded.append(node)
132 132 else:
133 133 missing.append(node)
134 134 if len(missing) == len(allmissing):
135 135 missingheads = onlyheads
136 136 else: # update missing heads
137 137 missingheads = phases.newheads(repo, onlyheads, excluded)
138 138 og.missingheads = missingheads
139 139 if portable:
140 140 # recompute common and missingheads as if -r<rev> had been given for
141 141 # each head of missing, and --base <rev> for each head of the proper
142 142 # ancestors of missing
143 143 og._computecommonmissing()
144 144 cl = repo.changelog
145 145 missingrevs = set(cl.rev(n) for n in og._missing)
146 146 og._common = set(cl.ancestors(missingrevs)) - missingrevs
147 147 commonheads = set(og.commonheads)
148 148 og.missingheads = [h for h in og.missingheads if h not in commonheads]
149 149
150 150 return og
151 151
152 152 def _headssummary(repo, remote, outgoing):
153 153 """compute a summary of branch and heads status before and after push
154 154
155 155 return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
156 156
157 157 - branch: the branch name
158 158 - remoteheads: the list of remote heads known locally
159 159 None is the branch is new
160 160 - newheads: the new remote heads (known locally) with outgoing pushed
161 161 - unsyncedheads: the list of remote heads unknown locally.
162 162 """
163 163 cl = repo.changelog
164 164 headssum = {}
165 165 # A. Create set of branches involved in the push.
166 166 branches = set(repo[n].branch() for n in outgoing.missing)
167 167 remotemap = remote.branchmap()
168 168 newbranches = branches - set(remotemap)
169 169 branches.difference_update(newbranches)
170 170
171 171 # A. register remote heads
172 172 remotebranches = set()
173 173 for branch, heads in remote.branchmap().iteritems():
174 174 remotebranches.add(branch)
175 175 known = []
176 176 unsynced = []
177 177 for h in heads:
178 178 if h in cl.nodemap:
179 179 known.append(h)
180 180 else:
181 181 unsynced.append(h)
182 182 headssum[branch] = (known, list(known), unsynced)
183 183 # B. add new branch data
184 184 missingctx = list(repo[n] for n in outgoing.missing)
185 185 touchedbranches = set()
186 186 for ctx in missingctx:
187 187 branch = ctx.branch()
188 188 touchedbranches.add(branch)
189 189 if branch not in headssum:
190 190 headssum[branch] = (None, [], [])
191 191
192 192 # C drop data about untouched branches:
193 193 for branch in remotebranches - touchedbranches:
194 194 del headssum[branch]
195 195
196 196 # D. Update newmap with outgoing changes.
197 197 # This will possibly add new heads and remove existing ones.
198 198 newmap = dict((branch, heads[1]) for branch, heads in headssum.iteritems()
199 199 if heads[0] is not None)
200 200 repo._updatebranchcache(newmap, missingctx)
201 201 for branch, newheads in newmap.iteritems():
202 202 headssum[branch][1][:] = newheads
203 203 return headssum
204 204
205 205 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
206 206 """Compute branchmapsummary for repo without branchmap support"""
207 207
208 208 cl = repo.changelog
209 209 # 1-4b. old servers: Check for new topological heads.
210 210 # Construct {old,new}map with branch = None (topological branch).
211 211 # (code based on _updatebranchcache)
212 212 oldheads = set(h for h in remoteheads if h in cl.nodemap)
213 213 # all nodes in outgoing.missing are children of either:
214 214 # - an element of oldheads
215 215 # - another element of outgoing.missing
216 216 # - nullrev
217 217 # This explains why the new head are very simple to compute.
218 218 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
219 219 newheads = list(c.node() for c in r)
220 220 unsynced = inc and set([None]) or set()
221 221 return {None: (oldheads, newheads, unsynced)}
222 222
223 223 def checkheads(repo, remote, outgoing, remoteheads, newbranch=False, inc=False):
224 224 """Check that a push won't add any outgoing head
225 225
226 226 raise Abort error and display ui message as needed.
227 227 """
228 228 # Check for each named branch if we're creating new remote heads.
229 229 # To be a remote head after push, node must be either:
230 230 # - unknown locally
231 231 # - a local outgoing head descended from update
232 232 # - a remote head that's known locally and not
233 233 # ancestral to an outgoing head
234 234 if remoteheads == [nullid]:
235 235 # remote is empty, nothing to check.
236 236 return
237 237
238 238 if remote.capable('branchmap'):
239 239 headssum = _headssummary(repo, remote, outgoing)
240 240 else:
241 241 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
242 242 newbranches = [branch for branch, heads in headssum.iteritems()
243 243 if heads[0] is None]
244 244 # 1. Check for new branches on the remote.
245 245 if newbranches and not newbranch: # new branch requires --new-branch
246 246 branchnames = ', '.join(sorted(newbranches))
247 247 raise util.Abort(_("push creates new remote branches: %s!")
248 248 % branchnames,
249 249 hint=_("use 'hg push --new-branch' to create"
250 250 " new remote branches"))
251 251
252 # 2. Check for new heads.
252 # 2 compute newly pushed bookmarks. We
253 # we don't warned about bookmarked heads.
254 localbookmarks = repo._bookmarks
255 remotebookmarks = remote.listkeys('bookmarks')
256 bookmarkedheads = set()
257 for bm in localbookmarks:
258 rnode = remotebookmarks.get(bm)
259 if rnode and rnode in repo:
260 lctx, rctx = repo[bm], repo[rnode]
261 if rctx == lctx.ancestor(rctx):
262 bookmarkedheads.add(lctx.node())
263
264 # 3. Check for new heads.
253 265 # If there are more heads after the push than before, a suitable
254 266 # error message, depending on unsynced status, is displayed.
255 267 error = None
256 localbookmarks = repo._bookmarks
257
258 268 unsynced = False
259 269 for branch, heads in headssum.iteritems():
260 270 if heads[0] is None:
261 271 # Maybe we should abort if we push more that one head
262 272 # for new branches ?
263 273 continue
264 274 if heads[2]:
265 275 unsynced = True
266 276 oldhs = set(heads[0])
267 277 newhs = set(heads[1])
268 278 dhs = None
269 279 if len(newhs) > len(oldhs):
270 remotebookmarks = remote.listkeys('bookmarks')
271 bookmarkedheads = set()
272 for bm in localbookmarks:
273 rnode = remotebookmarks.get(bm)
274 if rnode and rnode in repo:
275 lctx, rctx = repo[bm], repo[rnode]
276 if rctx == lctx.ancestor(rctx):
277 bookmarkedheads.add(lctx.node())
278 280 # strip updates to existing remote heads from the new heads list
279 281 dhs = list(newhs - bookmarkedheads - oldhs)
280 282 if dhs:
281 283 if error is None:
282 284 if branch not in ('default', None):
283 285 error = _("push creates new remote head %s "
284 286 "on branch '%s'!") % (short(dhs[0]), branch)
285 287 else:
286 288 error = _("push creates new remote head %s!"
287 289 ) % short(dhs[0])
288 290 if heads[2]: # unsynced
289 291 hint = _("you should pull and merge or "
290 292 "use push -f to force")
291 293 else:
292 294 hint = _("did you forget to merge? "
293 295 "use push -f to force")
294 296 if branch is not None:
295 297 repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
296 298 for h in dhs:
297 299 repo.ui.note(_("new remote head %s\n") % short(h))
298 300 if error:
299 301 raise util.Abort(error, hint=hint)
300 302
301 303 # 6. Check for unsynced changes on involved branches.
302 304 if unsynced:
303 305 repo.ui.warn(_("note: unsynced remote changes!\n"))
304 306
305 307 def visibleheads(repo):
306 308 """return the set of visible head of this repo"""
307 309 # XXX we want a cache on this
308 310 sroots = repo._phasecache.phaseroots[phases.secret]
309 311 if sroots or repo.obsstore:
310 312 # XXX very slow revset. storing heads or secret "boundary"
311 313 # would help.
312 314 revset = repo.set('heads(not (%ln:: + extinct()))', sroots)
313 315
314 316 vheads = [ctx.node() for ctx in revset]
315 317 if not vheads:
316 318 vheads.append(nullid)
317 319 else:
318 320 vheads = repo.heads()
319 321 return vheads
320 322
321 323
322 324 def visiblebranchmap(repo):
323 325 """return a branchmap for the visible set"""
324 326 # XXX Recomputing this data on the fly is very slow. We should build a
325 327 # XXX cached version while computin the standard branchmap version.
326 328 sroots = repo._phasecache.phaseroots[phases.secret]
327 329 if sroots or repo.obsstore:
328 330 vbranchmap = {}
329 331 for branch, nodes in repo.branchmap().iteritems():
330 332 # search for secret heads.
331 333 for n in nodes:
332 334 if repo[n].phase() >= phases.secret:
333 335 nodes = None
334 336 break
335 337 # if secret heads were found we must compute them again
336 338 if nodes is None:
337 339 s = repo.set('heads(branch(%s) - secret() - extinct())',
338 340 branch)
339 341 nodes = [c.node() for c in s]
340 342 vbranchmap[branch] = nodes
341 343 else:
342 344 vbranchmap = repo.branchmap()
343 345 return vbranchmap
General Comments 0
You need to be logged in to leave comments. Login now