##// END OF EJS Templates
discovery: drop the visibleheads function...
Pierre-Yves David -
r18284:e4be4e2a default
parent child Browse files
Show More
@@ -1,343 +1,339 b''
1 1 # discovery.py - protocol changeset discovery functions
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, short
9 9 from i18n import _
10 10 import util, setdiscovery, treediscovery, phases, obsolete, bookmarks
11 11 import branchmap
12 12
13 13 def findcommonincoming(repo, remote, heads=None, force=False):
14 14 """Return a tuple (common, anyincoming, heads) used to identify the common
15 15 subset of nodes between repo and remote.
16 16
17 17 "common" is a list of (at least) the heads of the common subset.
18 18 "anyincoming" is testable as a boolean indicating if any nodes are missing
19 19 locally. If remote does not support getbundle, this actually is a list of
20 20 roots of the nodes that would be incoming, to be supplied to
21 21 changegroupsubset. No code except for pull should be relying on this fact
22 22 any longer.
23 23 "heads" is either the supplied heads, or else the remote's heads.
24 24
25 25 If you pass heads and they are all known locally, the response lists just
26 26 these heads in "common" and in "heads".
27 27
28 28 Please use findcommonoutgoing to compute the set of outgoing nodes to give
29 29 extensions a good hook into outgoing.
30 30 """
31 31
32 32 if not remote.capable('getbundle'):
33 33 return treediscovery.findcommonincoming(repo, remote, heads, force)
34 34
35 35 if heads:
36 36 allknown = True
37 37 nm = repo.changelog.nodemap
38 38 for h in heads:
39 39 if nm.get(h) is None:
40 40 allknown = False
41 41 break
42 42 if allknown:
43 43 return (heads, False, heads)
44 44
45 45 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
46 46 abortwhenunrelated=not force)
47 47 common, anyinc, srvheads = res
48 48 return (list(common), anyinc, heads or list(srvheads))
49 49
50 50 class outgoing(object):
51 51 '''Represents the set of nodes present in a local repo but not in a
52 52 (possibly) remote one.
53 53
54 54 Members:
55 55
56 56 missing is a list of all nodes present in local but not in remote.
57 57 common is a list of all nodes shared between the two repos.
58 58 excluded is the list of missing changeset that shouldn't be sent remotely.
59 59 missingheads is the list of heads of missing.
60 60 commonheads is the list of heads of common.
61 61
62 62 The sets are computed on demand from the heads, unless provided upfront
63 63 by discovery.'''
64 64
65 65 def __init__(self, revlog, commonheads, missingheads):
66 66 self.commonheads = commonheads
67 67 self.missingheads = missingheads
68 68 self._revlog = revlog
69 69 self._common = None
70 70 self._missing = None
71 71 self.excluded = []
72 72
73 73 def _computecommonmissing(self):
74 74 sets = self._revlog.findcommonmissing(self.commonheads,
75 75 self.missingheads)
76 76 self._common, self._missing = sets
77 77
78 78 @util.propertycache
79 79 def common(self):
80 80 if self._common is None:
81 81 self._computecommonmissing()
82 82 return self._common
83 83
84 84 @util.propertycache
85 85 def missing(self):
86 86 if self._missing is None:
87 87 self._computecommonmissing()
88 88 return self._missing
89 89
90 90 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
91 91 commoninc=None, portable=False):
92 92 '''Return an outgoing instance to identify the nodes present in repo but
93 93 not in other.
94 94
95 95 If onlyheads is given, only nodes ancestral to nodes in onlyheads
96 96 (inclusive) are included. If you already know the local repo's heads,
97 97 passing them in onlyheads is faster than letting them be recomputed here.
98 98
99 99 If commoninc is given, it must be the result of a prior call to
100 100 findcommonincoming(repo, other, force) to avoid recomputing it here.
101 101
102 102 If portable is given, compute more conservative common and missingheads,
103 103 to make bundles created from the instance more portable.'''
104 104 # declare an empty outgoing object to be filled later
105 105 og = outgoing(repo.changelog, None, None)
106 106
107 107 # get common set if not provided
108 108 if commoninc is None:
109 109 commoninc = findcommonincoming(repo, other, force=force)
110 110 og.commonheads, _any, _hds = commoninc
111 111
112 112 # compute outgoing
113 113 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
114 114 if not mayexclude:
115 115 og.missingheads = onlyheads or repo.heads()
116 116 elif onlyheads is None:
117 117 # use visible heads as it should be cached
118 118 og.missingheads = repo.filtered("unserved").heads()
119 119 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
120 120 else:
121 121 # compute common, missing and exclude secret stuff
122 122 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
123 123 og._common, allmissing = sets
124 124 og._missing = missing = []
125 125 og.excluded = excluded = []
126 126 for node in allmissing:
127 127 ctx = repo[node]
128 128 if ctx.phase() >= phases.secret or ctx.extinct():
129 129 excluded.append(node)
130 130 else:
131 131 missing.append(node)
132 132 if len(missing) == len(allmissing):
133 133 missingheads = onlyheads
134 134 else: # update missing heads
135 135 missingheads = phases.newheads(repo, onlyheads, excluded)
136 136 og.missingheads = missingheads
137 137 if portable:
138 138 # recompute common and missingheads as if -r<rev> had been given for
139 139 # each head of missing, and --base <rev> for each head of the proper
140 140 # ancestors of missing
141 141 og._computecommonmissing()
142 142 cl = repo.changelog
143 143 missingrevs = set(cl.rev(n) for n in og._missing)
144 144 og._common = set(cl.ancestors(missingrevs)) - missingrevs
145 145 commonheads = set(og.commonheads)
146 146 og.missingheads = [h for h in og.missingheads if h not in commonheads]
147 147
148 148 return og
149 149
150 150 def _headssummary(repo, remote, outgoing):
151 151 """compute a summary of branch and heads status before and after push
152 152
153 153 return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
154 154
155 155 - branch: the branch name
156 156 - remoteheads: the list of remote heads known locally
157 157 None is the branch is new
158 158 - newheads: the new remote heads (known locally) with outgoing pushed
159 159 - unsyncedheads: the list of remote heads unknown locally.
160 160 """
161 161 cl = repo.changelog
162 162 headssum = {}
163 163 # A. Create set of branches involved in the push.
164 164 branches = set(repo[n].branch() for n in outgoing.missing)
165 165 remotemap = remote.branchmap()
166 166 newbranches = branches - set(remotemap)
167 167 branches.difference_update(newbranches)
168 168
169 169 # A. register remote heads
170 170 remotebranches = set()
171 171 for branch, heads in remote.branchmap().iteritems():
172 172 remotebranches.add(branch)
173 173 known = []
174 174 unsynced = []
175 175 for h in heads:
176 176 if h in cl.nodemap:
177 177 known.append(h)
178 178 else:
179 179 unsynced.append(h)
180 180 headssum[branch] = (known, list(known), unsynced)
181 181 # B. add new branch data
182 182 missingctx = list(repo[n] for n in outgoing.missing)
183 183 touchedbranches = set()
184 184 for ctx in missingctx:
185 185 branch = ctx.branch()
186 186 touchedbranches.add(branch)
187 187 if branch not in headssum:
188 188 headssum[branch] = (None, [], [])
189 189
190 190 # C drop data about untouched branches:
191 191 for branch in remotebranches - touchedbranches:
192 192 del headssum[branch]
193 193
194 194 # D. Update newmap with outgoing changes.
195 195 # This will possibly add new heads and remove existing ones.
196 196 newmap = branchmap.branchcache((branch, heads[1])
197 197 for branch, heads in headssum.iteritems()
198 198 if heads[0] is not None)
199 199 newmap.update(repo, missingctx)
200 200 for branch, newheads in newmap.iteritems():
201 201 headssum[branch][1][:] = newheads
202 202 return headssum
203 203
204 204 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
205 205 """Compute branchmapsummary for repo without branchmap support"""
206 206
207 207 cl = repo.changelog
208 208 # 1-4b. old servers: Check for new topological heads.
209 209 # Construct {old,new}map with branch = None (topological branch).
210 210 # (code based on update)
211 211 oldheads = set(h for h in remoteheads if h in cl.nodemap)
212 212 # all nodes in outgoing.missing are children of either:
213 213 # - an element of oldheads
214 214 # - another element of outgoing.missing
215 215 # - nullrev
216 216 # This explains why the new head are very simple to compute.
217 217 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
218 218 newheads = list(c.node() for c in r)
219 219 unsynced = inc and set([None]) or set()
220 220 return {None: (oldheads, newheads, unsynced)}
221 221
222 222 def checkheads(repo, remote, outgoing, remoteheads, newbranch=False, inc=False):
223 223 """Check that a push won't add any outgoing head
224 224
225 225 raise Abort error and display ui message as needed.
226 226 """
227 227 # Check for each named branch if we're creating new remote heads.
228 228 # To be a remote head after push, node must be either:
229 229 # - unknown locally
230 230 # - a local outgoing head descended from update
231 231 # - a remote head that's known locally and not
232 232 # ancestral to an outgoing head
233 233 if remoteheads == [nullid]:
234 234 # remote is empty, nothing to check.
235 235 return
236 236
237 237 if remote.capable('branchmap'):
238 238 headssum = _headssummary(repo, remote, outgoing)
239 239 else:
240 240 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
241 241 newbranches = [branch for branch, heads in headssum.iteritems()
242 242 if heads[0] is None]
243 243 # 1. Check for new branches on the remote.
244 244 if newbranches and not newbranch: # new branch requires --new-branch
245 245 branchnames = ', '.join(sorted(newbranches))
246 246 raise util.Abort(_("push creates new remote branches: %s!")
247 247 % branchnames,
248 248 hint=_("use 'hg push --new-branch' to create"
249 249 " new remote branches"))
250 250
251 251 # 2 compute newly pushed bookmarks. We
252 252 # we don't warned about bookmarked heads.
253 253 localbookmarks = repo._bookmarks
254 254 remotebookmarks = remote.listkeys('bookmarks')
255 255 bookmarkedheads = set()
256 256 for bm in localbookmarks:
257 257 rnode = remotebookmarks.get(bm)
258 258 if rnode and rnode in repo:
259 259 lctx, rctx = repo[bm], repo[rnode]
260 260 if bookmarks.validdest(repo, rctx, lctx):
261 261 bookmarkedheads.add(lctx.node())
262 262
263 263 # 3. Check for new heads.
264 264 # If there are more heads after the push than before, a suitable
265 265 # error message, depending on unsynced status, is displayed.
266 266 error = None
267 267 unsynced = False
268 268 allmissing = set(outgoing.missing)
269 269 allfuturecommon = set(c.node() for c in repo.set('%ld', outgoing.common))
270 270 allfuturecommon.update(allmissing)
271 271 for branch, heads in headssum.iteritems():
272 272 if heads[0] is None:
273 273 # Maybe we should abort if we push more that one head
274 274 # for new branches ?
275 275 continue
276 276 candidate_newhs = set(heads[1])
277 277 # add unsynced data
278 278 oldhs = set(heads[0])
279 279 oldhs.update(heads[2])
280 280 candidate_newhs.update(heads[2])
281 281 dhs = None
282 282 discardedheads = set()
283 283 if repo.obsstore:
284 284 # remove future heads which are actually obsolete by another
285 285 # pushed element:
286 286 #
287 287 # XXX as above, There are several cases this case does not handle
288 288 # XXX properly
289 289 #
290 290 # (1) if <nh> is public, it won't be affected by obsolete marker
291 291 # and a new is created
292 292 #
293 293 # (2) if the new heads have ancestors which are not obsolete and
294 294 # not ancestors of any other heads we will have a new head too.
295 295 #
296 296 # This two case will be easy to handle for know changeset but much
297 297 # more tricky for unsynced changes.
298 298 newhs = set()
299 299 for nh in candidate_newhs:
300 300 if nh in repo and repo[nh].phase() <= phases.public:
301 301 newhs.add(nh)
302 302 else:
303 303 for suc in obsolete.allsuccessors(repo.obsstore, [nh]):
304 304 if suc != nh and suc in allfuturecommon:
305 305 discardedheads.add(nh)
306 306 break
307 307 else:
308 308 newhs.add(nh)
309 309 else:
310 310 newhs = candidate_newhs
311 311 if [h for h in heads[2] if h not in discardedheads]:
312 312 unsynced = True
313 313 if len(newhs) > len(oldhs):
314 314 # strip updates to existing remote heads from the new heads list
315 315 dhs = list(newhs - bookmarkedheads - oldhs)
316 316 if dhs:
317 317 if error is None:
318 318 if branch not in ('default', None):
319 319 error = _("push creates new remote head %s "
320 320 "on branch '%s'!") % (short(dhs[0]), branch)
321 321 else:
322 322 error = _("push creates new remote head %s!"
323 323 ) % short(dhs[0])
324 324 if heads[2]: # unsynced
325 325 hint = _("you should pull and merge or "
326 326 "use push -f to force")
327 327 else:
328 328 hint = _("did you forget to merge? "
329 329 "use push -f to force")
330 330 if branch is not None:
331 331 repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
332 332 for h in dhs:
333 333 repo.ui.note(_("new remote head %s\n") % short(h))
334 334 if error:
335 335 raise util.Abort(error, hint=hint)
336 336
337 337 # 6. Check for unsynced changes on involved branches.
338 338 if unsynced:
339 339 repo.ui.warn(_("note: unsynced remote changes!\n"))
340
341 def visibleheads(repo):
342 """return the set of visible head of this repo"""
343 return repo.filtered('unserved').heads()
General Comments 0
You need to be logged in to leave comments. Login now