##// END OF EJS Templates
discovery: prevent crash on unknown remote heads with old repo (issue4337)...
Pierre-Yves David -
r22178:70383c69 stable
parent child Browse files
Show More
@@ -1,366 +1,373
1 1 # discovery.py - protocol changeset discovery functions
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, short
9 9 from i18n import _
10 10 import util, setdiscovery, treediscovery, phases, obsolete, bookmarks
11 11 import branchmap
12 12
13 13 def findcommonincoming(repo, remote, heads=None, force=False):
14 14 """Return a tuple (common, anyincoming, heads) used to identify the common
15 15 subset of nodes between repo and remote.
16 16
17 17 "common" is a list of (at least) the heads of the common subset.
18 18 "anyincoming" is testable as a boolean indicating if any nodes are missing
19 19 locally. If remote does not support getbundle, this actually is a list of
20 20 roots of the nodes that would be incoming, to be supplied to
21 21 changegroupsubset. No code except for pull should be relying on this fact
22 22 any longer.
23 23 "heads" is either the supplied heads, or else the remote's heads.
24 24
25 25 If you pass heads and they are all known locally, the response lists just
26 26 these heads in "common" and in "heads".
27 27
28 28 Please use findcommonoutgoing to compute the set of outgoing nodes to give
29 29 extensions a good hook into outgoing.
30 30 """
31 31
32 32 if not remote.capable('getbundle'):
33 33 return treediscovery.findcommonincoming(repo, remote, heads, force)
34 34
35 35 if heads:
36 36 allknown = True
37 37 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
38 38 for h in heads:
39 39 if not knownnode(h):
40 40 allknown = False
41 41 break
42 42 if allknown:
43 43 return (heads, False, heads)
44 44
45 45 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
46 46 abortwhenunrelated=not force)
47 47 common, anyinc, srvheads = res
48 48 return (list(common), anyinc, heads or list(srvheads))
49 49
50 50 class outgoing(object):
51 51 '''Represents the set of nodes present in a local repo but not in a
52 52 (possibly) remote one.
53 53
54 54 Members:
55 55
56 56 missing is a list of all nodes present in local but not in remote.
57 57 common is a list of all nodes shared between the two repos.
58 58 excluded is the list of missing changeset that shouldn't be sent remotely.
59 59 missingheads is the list of heads of missing.
60 60 commonheads is the list of heads of common.
61 61
62 62 The sets are computed on demand from the heads, unless provided upfront
63 63 by discovery.'''
64 64
65 65 def __init__(self, revlog, commonheads, missingheads):
66 66 self.commonheads = commonheads
67 67 self.missingheads = missingheads
68 68 self._revlog = revlog
69 69 self._common = None
70 70 self._missing = None
71 71 self.excluded = []
72 72
73 73 def _computecommonmissing(self):
74 74 sets = self._revlog.findcommonmissing(self.commonheads,
75 75 self.missingheads)
76 76 self._common, self._missing = sets
77 77
78 78 @util.propertycache
79 79 def common(self):
80 80 if self._common is None:
81 81 self._computecommonmissing()
82 82 return self._common
83 83
84 84 @util.propertycache
85 85 def missing(self):
86 86 if self._missing is None:
87 87 self._computecommonmissing()
88 88 return self._missing
89 89
90 90 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
91 91 commoninc=None, portable=False):
92 92 '''Return an outgoing instance to identify the nodes present in repo but
93 93 not in other.
94 94
95 95 If onlyheads is given, only nodes ancestral to nodes in onlyheads
96 96 (inclusive) are included. If you already know the local repo's heads,
97 97 passing them in onlyheads is faster than letting them be recomputed here.
98 98
99 99 If commoninc is given, it must be the result of a prior call to
100 100 findcommonincoming(repo, other, force) to avoid recomputing it here.
101 101
102 102 If portable is given, compute more conservative common and missingheads,
103 103 to make bundles created from the instance more portable.'''
104 104 # declare an empty outgoing object to be filled later
105 105 og = outgoing(repo.changelog, None, None)
106 106
107 107 # get common set if not provided
108 108 if commoninc is None:
109 109 commoninc = findcommonincoming(repo, other, force=force)
110 110 og.commonheads, _any, _hds = commoninc
111 111
112 112 # compute outgoing
113 113 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
114 114 if not mayexclude:
115 115 og.missingheads = onlyheads or repo.heads()
116 116 elif onlyheads is None:
117 117 # use visible heads as it should be cached
118 118 og.missingheads = repo.filtered("served").heads()
119 119 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
120 120 else:
121 121 # compute common, missing and exclude secret stuff
122 122 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
123 123 og._common, allmissing = sets
124 124 og._missing = missing = []
125 125 og.excluded = excluded = []
126 126 for node in allmissing:
127 127 ctx = repo[node]
128 128 if ctx.phase() >= phases.secret or ctx.extinct():
129 129 excluded.append(node)
130 130 else:
131 131 missing.append(node)
132 132 if len(missing) == len(allmissing):
133 133 missingheads = onlyheads
134 134 else: # update missing heads
135 135 missingheads = phases.newheads(repo, onlyheads, excluded)
136 136 og.missingheads = missingheads
137 137 if portable:
138 138 # recompute common and missingheads as if -r<rev> had been given for
139 139 # each head of missing, and --base <rev> for each head of the proper
140 140 # ancestors of missing
141 141 og._computecommonmissing()
142 142 cl = repo.changelog
143 143 missingrevs = set(cl.rev(n) for n in og._missing)
144 144 og._common = set(cl.ancestors(missingrevs)) - missingrevs
145 145 commonheads = set(og.commonheads)
146 146 og.missingheads = [h for h in og.missingheads if h not in commonheads]
147 147
148 148 return og
149 149
150 150 def _headssummary(repo, remote, outgoing):
151 151 """compute a summary of branch and heads status before and after push
152 152
153 153 return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
154 154
155 155 - branch: the branch name
156 156 - remoteheads: the list of remote heads known locally
157 157 None if the branch is new
158 158 - newheads: the new remote heads (known locally) with outgoing pushed
159 159 - unsyncedheads: the list of remote heads unknown locally.
160 160 """
161 161 cl = repo.changelog
162 162 headssum = {}
163 163 # A. Create set of branches involved in the push.
164 164 branches = set(repo[n].branch() for n in outgoing.missing)
165 165 remotemap = remote.branchmap()
166 166 newbranches = branches - set(remotemap)
167 167 branches.difference_update(newbranches)
168 168
169 169 # A. register remote heads
170 170 remotebranches = set()
171 171 for branch, heads in remote.branchmap().iteritems():
172 172 remotebranches.add(branch)
173 173 known = []
174 174 unsynced = []
175 175 knownnode = cl.hasnode # do not use nodemap until it is filtered
176 176 for h in heads:
177 177 if knownnode(h):
178 178 known.append(h)
179 179 else:
180 180 unsynced.append(h)
181 181 headssum[branch] = (known, list(known), unsynced)
182 182 # B. add new branch data
183 183 missingctx = list(repo[n] for n in outgoing.missing)
184 184 touchedbranches = set()
185 185 for ctx in missingctx:
186 186 branch = ctx.branch()
187 187 touchedbranches.add(branch)
188 188 if branch not in headssum:
189 189 headssum[branch] = (None, [], [])
190 190
191 191 # C drop data about untouched branches:
192 192 for branch in remotebranches - touchedbranches:
193 193 del headssum[branch]
194 194
195 195 # D. Update newmap with outgoing changes.
196 196 # This will possibly add new heads and remove existing ones.
197 197 newmap = branchmap.branchcache((branch, heads[1])
198 198 for branch, heads in headssum.iteritems()
199 199 if heads[0] is not None)
200 200 newmap.update(repo, (ctx.rev() for ctx in missingctx))
201 201 for branch, newheads in newmap.iteritems():
202 202 headssum[branch][1][:] = newheads
203 203 return headssum
204 204
205 205 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
206 206 """Compute branchmapsummary for repo without branchmap support"""
207 207
208 208 # 1-4b. old servers: Check for new topological heads.
209 209 # Construct {old,new}map with branch = None (topological branch).
210 210 # (code based on update)
211 211 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
212 212 oldheads = set(h for h in remoteheads if knownnode(h))
213 213 # all nodes in outgoing.missing are children of either:
214 214 # - an element of oldheads
215 215 # - another element of outgoing.missing
216 216 # - nullrev
217 217 # This explains why the new head are very simple to compute.
218 218 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
219 219 newheads = list(c.node() for c in r)
220 # set some unsynced head to issue the "unsynced changes" warning
220 221 unsynced = inc and set([None]) or set()
221 222 return {None: (oldheads, newheads, unsynced)}
222 223
223 224 def checkheads(repo, remote, outgoing, remoteheads, newbranch=False, inc=False,
224 225 newbookmarks=[]):
225 226 """Check that a push won't add any outgoing head
226 227
227 228 raise Abort error and display ui message as needed.
228 229 """
229 230 # Check for each named branch if we're creating new remote heads.
230 231 # To be a remote head after push, node must be either:
231 232 # - unknown locally
232 233 # - a local outgoing head descended from update
233 234 # - a remote head that's known locally and not
234 235 # ancestral to an outgoing head
235 236 if remoteheads == [nullid]:
236 237 # remote is empty, nothing to check.
237 238 return
238 239
239 240 if remote.capable('branchmap'):
240 241 headssum = _headssummary(repo, remote, outgoing)
241 242 else:
242 243 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
243 244 newbranches = [branch for branch, heads in headssum.iteritems()
244 245 if heads[0] is None]
245 246 # 1. Check for new branches on the remote.
246 247 if newbranches and not newbranch: # new branch requires --new-branch
247 248 branchnames = ', '.join(sorted(newbranches))
248 249 raise util.Abort(_("push creates new remote branches: %s!")
249 250 % branchnames,
250 251 hint=_("use 'hg push --new-branch' to create"
251 252 " new remote branches"))
252 253
253 254 # 2. Compute newly pushed bookmarks. We don't warn about bookmarked heads.
254 255 localbookmarks = repo._bookmarks
255 256 remotebookmarks = remote.listkeys('bookmarks')
256 257 bookmarkedheads = set()
257 258 for bm in localbookmarks:
258 259 rnode = remotebookmarks.get(bm)
259 260 if rnode and rnode in repo:
260 261 lctx, rctx = repo[bm], repo[rnode]
261 262 if bookmarks.validdest(repo, rctx, lctx):
262 263 bookmarkedheads.add(lctx.node())
263 264 else:
264 265 if bm in newbookmarks:
265 266 bookmarkedheads.add(repo[bm].node())
266 267
267 268 # 3. Check for new heads.
268 269 # If there are more heads after the push than before, a suitable
269 270 # error message, depending on unsynced status, is displayed.
270 271 error = None
271 272 allmissing = set(outgoing.missing)
272 273 allfuturecommon = set(c.node() for c in repo.set('%ld', outgoing.common))
273 274 allfuturecommon.update(allmissing)
274 275 for branch, heads in sorted(headssum.iteritems()):
275 276 remoteheads, newheads, unsyncedheads = heads
276 277 candidate_newhs = set(newheads)
277 278 # add unsynced data
278 279 if remoteheads is None:
279 280 oldhs = set()
280 281 else:
281 282 oldhs = set(remoteheads)
282 283 oldhs.update(unsyncedheads)
283 284 candidate_newhs.update(unsyncedheads)
284 285 dhs = None # delta heads, the new heads on branch
285 286 discardedheads = set()
286 287 if repo.obsstore:
287 288 # remove future heads which are actually obsoleted by another
288 289 # pushed element:
289 290 #
290 291 # XXX as above, There are several cases this case does not handle
291 292 # XXX properly
292 293 #
293 294 # (1) if <nh> is public, it won't be affected by obsolete marker
294 295 # and a new is created
295 296 #
296 297 # (2) if the new heads have ancestors which are not obsolete and
297 298 # not ancestors of any other heads we will have a new head too.
298 299 #
299 300 # These two cases will be easy to handle for known changeset but
300 301 # much more tricky for unsynced changes.
301 302 newhs = set()
302 303 for nh in candidate_newhs:
303 304 if nh in repo and repo[nh].phase() <= phases.public:
304 305 newhs.add(nh)
305 306 else:
306 307 for suc in obsolete.allsuccessors(repo.obsstore, [nh]):
307 308 if suc != nh and suc in allfuturecommon:
308 309 discardedheads.add(nh)
309 310 break
310 311 else:
311 312 newhs.add(nh)
312 313 else:
313 314 newhs = candidate_newhs
314 315 unsynced = sorted(h for h in unsyncedheads if h not in discardedheads)
315 316 if unsynced:
316 if len(unsynced) <= 4 or repo.ui.verbose:
317 if None in unsynced:
318 # old remote, no heads data
319 heads = None
320 elif len(unsynced) <= 4 or repo.ui.verbose:
317 321 heads = ' '.join(short(h) for h in unsynced)
318 322 else:
319 323 heads = (' '.join(short(h) for h in unsynced[:4]) +
320 324 ' ' + _("and %s others") % (len(unsynced) - 4))
321 if branch is None:
325 if heads is None:
326 repo.ui.status(_("remote has heads that are "
327 "not known locally\n"))
328 elif branch is None:
322 329 repo.ui.status(_("remote has heads that are "
323 330 "not known locally: %s\n") % heads)
324 331 else:
325 332 repo.ui.status(_("remote has heads on branch '%s' that are "
326 333 "not known locally: %s\n") % (branch, heads))
327 334 if remoteheads is None:
328 335 if len(newhs) > 1:
329 336 dhs = list(newhs)
330 337 if error is None:
331 338 error = (_("push creates new branch '%s' "
332 339 "with multiple heads") % (branch))
333 340 hint = _("merge or"
334 341 " see \"hg help push\" for details about"
335 342 " pushing new heads")
336 343 elif len(newhs) > len(oldhs):
337 344 # remove bookmarked or existing remote heads from the new heads list
338 345 dhs = sorted(newhs - bookmarkedheads - oldhs)
339 346 if dhs:
340 347 if error is None:
341 348 if branch not in ('default', None):
342 349 error = _("push creates new remote head %s "
343 350 "on branch '%s'!") % (short(dhs[0]), branch)
344 351 elif repo[dhs[0]].bookmarks():
345 352 error = _("push creates new remote head %s "
346 353 "with bookmark '%s'!") % (
347 354 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
348 355 else:
349 356 error = _("push creates new remote head %s!"
350 357 ) % short(dhs[0])
351 358 if unsyncedheads:
352 359 hint = _("pull and merge or"
353 360 " see \"hg help push\" for details about"
354 361 " pushing new heads")
355 362 else:
356 363 hint = _("merge or"
357 364 " see \"hg help push\" for details about"
358 365 " pushing new heads")
359 366 if branch is None:
360 367 repo.ui.note(_("new remote heads:\n"))
361 368 else:
362 369 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
363 370 for h in dhs:
364 371 repo.ui.note((" %s\n") % short(h))
365 372 if error:
366 373 raise util.Abort(error, hint=hint)
General Comments 0
You need to be logged in to leave comments. Login now