##// END OF EJS Templates
checkheads: use "revnum" in the "allfuturecommon" set...
marmoute -
r32791:1cb14923 default
parent child Browse files
Show More
@@ -1,527 +1,528 b''
1 1 # discovery.py - protocol changeset discovery functions
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11
12 12 from .i18n import _
13 13 from .node import (
14 14 hex,
15 15 nullid,
16 16 short,
17 17 )
18 18
19 19 from . import (
20 20 bookmarks,
21 21 branchmap,
22 22 error,
23 23 phases,
24 24 setdiscovery,
25 25 treediscovery,
26 26 util,
27 27 )
28 28
29 29 def findcommonincoming(repo, remote, heads=None, force=False):
30 30 """Return a tuple (common, anyincoming, heads) used to identify the common
31 31 subset of nodes between repo and remote.
32 32
33 33 "common" is a list of (at least) the heads of the common subset.
34 34 "anyincoming" is testable as a boolean indicating if any nodes are missing
35 35 locally. If remote does not support getbundle, this actually is a list of
36 36 roots of the nodes that would be incoming, to be supplied to
37 37 changegroupsubset. No code except for pull should be relying on this fact
38 38 any longer.
39 39 "heads" is either the supplied heads, or else the remote's heads.
40 40
41 41 If you pass heads and they are all known locally, the response lists just
42 42 these heads in "common" and in "heads".
43 43
44 44 Please use findcommonoutgoing to compute the set of outgoing nodes to give
45 45 extensions a good hook into outgoing.
46 46 """
47 47
48 48 if not remote.capable('getbundle'):
49 49 return treediscovery.findcommonincoming(repo, remote, heads, force)
50 50
51 51 if heads:
52 52 allknown = True
53 53 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
54 54 for h in heads:
55 55 if not knownnode(h):
56 56 allknown = False
57 57 break
58 58 if allknown:
59 59 return (heads, False, heads)
60 60
61 61 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
62 62 abortwhenunrelated=not force)
63 63 common, anyinc, srvheads = res
64 64 return (list(common), anyinc, heads or list(srvheads))
65 65
66 66 class outgoing(object):
67 67 '''Represents the set of nodes present in a local repo but not in a
68 68 (possibly) remote one.
69 69
70 70 Members:
71 71
72 72 missing is a list of all nodes present in local but not in remote.
73 73 common is a list of all nodes shared between the two repos.
74 74 excluded is the list of missing changeset that shouldn't be sent remotely.
75 75 missingheads is the list of heads of missing.
76 76 commonheads is the list of heads of common.
77 77
78 78 The sets are computed on demand from the heads, unless provided upfront
79 79 by discovery.'''
80 80
81 81 def __init__(self, repo, commonheads=None, missingheads=None,
82 82 missingroots=None):
83 83 # at least one of them must not be set
84 84 assert None in (commonheads, missingroots)
85 85 cl = repo.changelog
86 86 if missingheads is None:
87 87 missingheads = cl.heads()
88 88 if missingroots:
89 89 discbases = []
90 90 for n in missingroots:
91 91 discbases.extend([p for p in cl.parents(n) if p != nullid])
92 92 # TODO remove call to nodesbetween.
93 93 # TODO populate attributes on outgoing instance instead of setting
94 94 # discbases.
95 95 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
96 96 included = set(csets)
97 97 missingheads = heads
98 98 commonheads = [n for n in discbases if n not in included]
99 99 elif not commonheads:
100 100 commonheads = [nullid]
101 101 self.commonheads = commonheads
102 102 self.missingheads = missingheads
103 103 self._revlog = cl
104 104 self._common = None
105 105 self._missing = None
106 106 self.excluded = []
107 107
108 108 def _computecommonmissing(self):
109 109 sets = self._revlog.findcommonmissing(self.commonheads,
110 110 self.missingheads)
111 111 self._common, self._missing = sets
112 112
113 113 @util.propertycache
114 114 def common(self):
115 115 if self._common is None:
116 116 self._computecommonmissing()
117 117 return self._common
118 118
119 119 @util.propertycache
120 120 def missing(self):
121 121 if self._missing is None:
122 122 self._computecommonmissing()
123 123 return self._missing
124 124
125 125 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
126 126 commoninc=None, portable=False):
127 127 '''Return an outgoing instance to identify the nodes present in repo but
128 128 not in other.
129 129
130 130 If onlyheads is given, only nodes ancestral to nodes in onlyheads
131 131 (inclusive) are included. If you already know the local repo's heads,
132 132 passing them in onlyheads is faster than letting them be recomputed here.
133 133
134 134 If commoninc is given, it must be the result of a prior call to
135 135 findcommonincoming(repo, other, force) to avoid recomputing it here.
136 136
137 137 If portable is given, compute more conservative common and missingheads,
138 138 to make bundles created from the instance more portable.'''
139 139 # declare an empty outgoing object to be filled later
140 140 og = outgoing(repo, None, None)
141 141
142 142 # get common set if not provided
143 143 if commoninc is None:
144 144 commoninc = findcommonincoming(repo, other, force=force)
145 145 og.commonheads, _any, _hds = commoninc
146 146
147 147 # compute outgoing
148 148 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
149 149 if not mayexclude:
150 150 og.missingheads = onlyheads or repo.heads()
151 151 elif onlyheads is None:
152 152 # use visible heads as it should be cached
153 153 og.missingheads = repo.filtered("served").heads()
154 154 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
155 155 else:
156 156 # compute common, missing and exclude secret stuff
157 157 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
158 158 og._common, allmissing = sets
159 159 og._missing = missing = []
160 160 og.excluded = excluded = []
161 161 for node in allmissing:
162 162 ctx = repo[node]
163 163 if ctx.phase() >= phases.secret or ctx.extinct():
164 164 excluded.append(node)
165 165 else:
166 166 missing.append(node)
167 167 if len(missing) == len(allmissing):
168 168 missingheads = onlyheads
169 169 else: # update missing heads
170 170 missingheads = phases.newheads(repo, onlyheads, excluded)
171 171 og.missingheads = missingheads
172 172 if portable:
173 173 # recompute common and missingheads as if -r<rev> had been given for
174 174 # each head of missing, and --base <rev> for each head of the proper
175 175 # ancestors of missing
176 176 og._computecommonmissing()
177 177 cl = repo.changelog
178 178 missingrevs = set(cl.rev(n) for n in og._missing)
179 179 og._common = set(cl.ancestors(missingrevs)) - missingrevs
180 180 commonheads = set(og.commonheads)
181 181 og.missingheads = [h for h in og.missingheads if h not in commonheads]
182 182
183 183 return og
184 184
185 185 def _headssummary(pushop):
186 186 """compute a summary of branch and heads status before and after push
187 187
188 188 return {'branch': ([remoteheads], [newheads],
189 189 [unsyncedheads], [discardedheads])} mapping
190 190
191 191 - branch: the branch name,
192 192 - remoteheads: the list of remote heads known locally
193 193 None if the branch is new,
194 194 - newheads: the new remote heads (known locally) with outgoing pushed,
195 195 - unsyncedheads: the list of remote heads unknown locally,
196 196 - discardedheads: the list of heads made obsolete by the push.
197 197 """
198 198 repo = pushop.repo.unfiltered()
199 199 remote = pushop.remote
200 200 outgoing = pushop.outgoing
201 201 cl = repo.changelog
202 202 headssum = {}
203 203 # A. Create set of branches involved in the push.
204 204 branches = set(repo[n].branch() for n in outgoing.missing)
205 205 remotemap = remote.branchmap()
206 206 newbranches = branches - set(remotemap)
207 207 branches.difference_update(newbranches)
208 208
209 209 # A. register remote heads
210 210 remotebranches = set()
211 211 for branch, heads in remote.branchmap().iteritems():
212 212 remotebranches.add(branch)
213 213 known = []
214 214 unsynced = []
215 215 knownnode = cl.hasnode # do not use nodemap until it is filtered
216 216 for h in heads:
217 217 if knownnode(h):
218 218 known.append(h)
219 219 else:
220 220 unsynced.append(h)
221 221 headssum[branch] = (known, list(known), unsynced)
222 222 # B. add new branch data
223 223 missingctx = list(repo[n] for n in outgoing.missing)
224 224 touchedbranches = set()
225 225 for ctx in missingctx:
226 226 branch = ctx.branch()
227 227 touchedbranches.add(branch)
228 228 if branch not in headssum:
229 229 headssum[branch] = (None, [], [])
230 230
231 231 # C drop data about untouched branches:
232 232 for branch in remotebranches - touchedbranches:
233 233 del headssum[branch]
234 234
235 235 # D. Update newmap with outgoing changes.
236 236 # This will possibly add new heads and remove existing ones.
237 237 newmap = branchmap.branchcache((branch, heads[1])
238 238 for branch, heads in headssum.iteritems()
239 239 if heads[0] is not None)
240 240 newmap.update(repo, (ctx.rev() for ctx in missingctx))
241 241 for branch, newheads in newmap.iteritems():
242 242 headssum[branch][1][:] = newheads
243 243 for branch, items in headssum.iteritems():
244 244 for l in items:
245 245 if l is not None:
246 246 l.sort()
247 247 headssum[branch] = items + ([],)
248 248
249 249 # If there are no obsstore, no post processing are needed.
250 250 if repo.obsstore:
251 251 allmissing = set(outgoing.missing)
252 252 cctx = repo.set('%ld', outgoing.common)
253 allfuturecommon = set(c.node() for c in cctx)
254 allfuturecommon.update(allmissing)
253 allfuturecommon = set(c.rev() for c in cctx)
254 torev = repo.changelog.rev
255 allfuturecommon.update(torev(m) for m in allmissing)
255 256 for branch, heads in sorted(headssum.iteritems()):
256 257 remoteheads, newheads, unsyncedheads, placeholder = heads
257 258 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
258 259 headssum[branch] = (remoteheads, sorted(result[0]), unsyncedheads,
259 260 sorted(result[1]))
260 261 return headssum
261 262
262 263 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
263 264 """Compute branchmapsummary for repo without branchmap support"""
264 265
265 266 # 1-4b. old servers: Check for new topological heads.
266 267 # Construct {old,new}map with branch = None (topological branch).
267 268 # (code based on update)
268 269 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
269 270 oldheads = sorted(h for h in remoteheads if knownnode(h))
270 271 # all nodes in outgoing.missing are children of either:
271 272 # - an element of oldheads
272 273 # - another element of outgoing.missing
273 274 # - nullrev
274 275 # This explains why the new head are very simple to compute.
275 276 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
276 277 newheads = sorted(c.node() for c in r)
277 278 # set some unsynced head to issue the "unsynced changes" warning
278 279 if inc:
279 280 unsynced = [None]
280 281 else:
281 282 unsynced = []
282 283 return {None: (oldheads, newheads, unsynced, [])}
283 284
284 285 def _nowarnheads(pushop):
285 286 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
286 287 repo = pushop.repo.unfiltered()
287 288 remote = pushop.remote
288 289 localbookmarks = repo._bookmarks
289 290 remotebookmarks = remote.listkeys('bookmarks')
290 291 bookmarkedheads = set()
291 292
292 293 # internal config: bookmarks.pushing
293 294 newbookmarks = [localbookmarks.expandname(b)
294 295 for b in pushop.ui.configlist('bookmarks', 'pushing')]
295 296
296 297 for bm in localbookmarks:
297 298 rnode = remotebookmarks.get(bm)
298 299 if rnode and rnode in repo:
299 300 lctx, rctx = repo[bm], repo[rnode]
300 301 if bookmarks.validdest(repo, rctx, lctx):
301 302 bookmarkedheads.add(lctx.node())
302 303 else:
303 304 if bm in newbookmarks and bm not in remotebookmarks:
304 305 bookmarkedheads.add(repo[bm].node())
305 306
306 307 return bookmarkedheads
307 308
308 309 def checkheads(pushop):
309 310 """Check that a push won't add any outgoing head
310 311
311 312 raise Abort error and display ui message as needed.
312 313 """
313 314
314 315 repo = pushop.repo.unfiltered()
315 316 remote = pushop.remote
316 317 outgoing = pushop.outgoing
317 318 remoteheads = pushop.remoteheads
318 319 newbranch = pushop.newbranch
319 320 inc = bool(pushop.incoming)
320 321
321 322 # Check for each named branch if we're creating new remote heads.
322 323 # To be a remote head after push, node must be either:
323 324 # - unknown locally
324 325 # - a local outgoing head descended from update
325 326 # - a remote head that's known locally and not
326 327 # ancestral to an outgoing head
327 328 if remoteheads == [nullid]:
328 329 # remote is empty, nothing to check.
329 330 return
330 331
331 332 if remote.capable('branchmap'):
332 333 headssum = _headssummary(pushop)
333 334 else:
334 335 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
335 336 pushop.pushbranchmap = headssum
336 337 newbranches = [branch for branch, heads in headssum.iteritems()
337 338 if heads[0] is None]
338 339 # 1. Check for new branches on the remote.
339 340 if newbranches and not newbranch: # new branch requires --new-branch
340 341 branchnames = ', '.join(sorted(newbranches))
341 342 raise error.Abort(_("push creates new remote branches: %s!")
342 343 % branchnames,
343 344 hint=_("use 'hg push --new-branch' to create"
344 345 " new remote branches"))
345 346
346 347 # 2. Find heads that we need not warn about
347 348 nowarnheads = _nowarnheads(pushop)
348 349
349 350 # 3. Check for new heads.
350 351 # If there are more heads after the push than before, a suitable
351 352 # error message, depending on unsynced status, is displayed.
352 353 errormsg = None
353 354 for branch, heads in sorted(headssum.iteritems()):
354 355 remoteheads, newheads, unsyncedheads, discardedheads = heads
355 356 # add unsynced data
356 357 if remoteheads is None:
357 358 oldhs = set()
358 359 else:
359 360 oldhs = set(remoteheads)
360 361 oldhs.update(unsyncedheads)
361 362 dhs = None # delta heads, the new heads on branch
362 363 newhs = set(newheads)
363 364 newhs.update(unsyncedheads)
364 365 if unsyncedheads:
365 366 if None in unsyncedheads:
366 367 # old remote, no heads data
367 368 heads = None
368 369 elif len(unsyncedheads) <= 4 or repo.ui.verbose:
369 370 heads = ' '.join(short(h) for h in unsyncedheads)
370 371 else:
371 372 heads = (' '.join(short(h) for h in unsyncedheads[:4]) +
372 373 ' ' + _("and %s others") % (len(unsyncedheads) - 4))
373 374 if heads is None:
374 375 repo.ui.status(_("remote has heads that are "
375 376 "not known locally\n"))
376 377 elif branch is None:
377 378 repo.ui.status(_("remote has heads that are "
378 379 "not known locally: %s\n") % heads)
379 380 else:
380 381 repo.ui.status(_("remote has heads on branch '%s' that are "
381 382 "not known locally: %s\n") % (branch, heads))
382 383 if remoteheads is None:
383 384 if len(newhs) > 1:
384 385 dhs = list(newhs)
385 386 if errormsg is None:
386 387 errormsg = (_("push creates new branch '%s' "
387 388 "with multiple heads") % (branch))
388 389 hint = _("merge or"
389 390 " see 'hg help push' for details about"
390 391 " pushing new heads")
391 392 elif len(newhs) > len(oldhs):
392 393 # remove bookmarked or existing remote heads from the new heads list
393 394 dhs = sorted(newhs - nowarnheads - oldhs)
394 395 if dhs:
395 396 if errormsg is None:
396 397 if branch not in ('default', None):
397 398 errormsg = _("push creates new remote head %s "
398 399 "on branch '%s'!") % (short(dhs[0]), branch)
399 400 elif repo[dhs[0]].bookmarks():
400 401 errormsg = _("push creates new remote head %s "
401 402 "with bookmark '%s'!") % (
402 403 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
403 404 else:
404 405 errormsg = _("push creates new remote head %s!"
405 406 ) % short(dhs[0])
406 407 if unsyncedheads:
407 408 hint = _("pull and merge or"
408 409 " see 'hg help push' for details about"
409 410 " pushing new heads")
410 411 else:
411 412 hint = _("merge or"
412 413 " see 'hg help push' for details about"
413 414 " pushing new heads")
414 415 if branch is None:
415 416 repo.ui.note(_("new remote heads:\n"))
416 417 else:
417 418 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
418 419 for h in dhs:
419 420 repo.ui.note((" %s\n") % short(h))
420 421 if errormsg:
421 422 raise error.Abort(errormsg, hint=hint)
422 423
423 424 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
424 425 """post process the list of new heads with obsolescence information
425 426
426 427 Exists as a sub-function to contain the complexity and allow extensions to
427 428 experiment with smarter logic.
428 429
429 430 Returns (newheads, discarded_heads) tuple
430 431 """
431 432 # known issue
432 433 #
433 434 # * We "silently" skip processing on all changeset unknown locally
434 435 #
435 436 # * if <nh> is public on the remote, it won't be affected by obsolete
436 437 # marker and a new is created
437 438
438 439 # define various utilities and containers
439 440 repo = pushop.repo
440 441 unfi = repo.unfiltered()
441 442 tonode = unfi.changelog.node
442 443 torev = unfi.changelog.nodemap.get
443 444 public = phases.public
444 445 getphase = unfi._phasecache.phase
445 446 ispublic = (lambda r: getphase(unfi, r) == public)
446 ispushed = (lambda n: n in futurecommon)
447 ispushed = (lambda n: torev(n) in futurecommon)
447 448 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
448 449 successorsmarkers = unfi.obsstore.successors
449 450 newhs = set() # final set of new heads
450 451 discarded = set() # new head of fully replaced branch
451 452
452 453 localcandidate = set() # candidate heads known locally
453 454 unknownheads = set() # candidate heads unknown locally
454 455 for h in candidate_newhs:
455 456 if h in unfi:
456 457 localcandidate.add(h)
457 458 else:
458 459 if successorsmarkers.get(h) is not None:
459 460 msg = ('checkheads: remote head unknown locally has'
460 461 ' local marker: %s\n')
461 462 repo.ui.debug(msg % hex(h))
462 463 unknownheads.add(h)
463 464
464 465 # fast path the simple case
465 466 if len(localcandidate) == 1:
466 467 return unknownheads | set(candidate_newhs), set()
467 468
468 469 # actually process branch replacement
469 470 while localcandidate:
470 471 nh = localcandidate.pop()
471 472 # run this check early to skip the evaluation of the whole branch
472 if (nh in futurecommon or ispublic(torev(nh))):
473 if (torev(nh) in futurecommon or ispublic(torev(nh))):
473 474 newhs.add(nh)
474 475 continue
475 476
476 477 # Get all revs/nodes on the branch exclusive to this head
477 478 # (already filtered heads are "ignored"))
478 479 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
479 480 nh, localcandidate, newhs)
480 481 branchnodes = [tonode(r) for r in branchrevs]
481 482
482 483 # The branch won't be hidden on the remote if
483 484 # * any part of it is public,
484 485 # * any part of it is considered part of the result by previous logic,
485 486 # * if we have no markers to push to obsolete it.
486 487 if (any(ispublic(r) for r in branchrevs)
487 or any(n in futurecommon for n in branchnodes)
488 or any(torev(n) in futurecommon for n in branchnodes)
488 489 or any(not hasoutmarker(n) for n in branchnodes)):
489 490 newhs.add(nh)
490 491 else:
491 492 # note: there is a corner case if there is a merge in the branch.
492 493 # we might end up with -more- heads. However, these heads are not
493 494 # "added" by the push, but more by the "removal" on the remote so I
494 495 # think is a okay to ignore them,
495 496 discarded.add(nh)
496 497 newhs |= unknownheads
497 498 return newhs, discarded
498 499
499 500 def pushingmarkerfor(obsstore, ispushed, node):
500 501 """true if some markers are to be pushed for node
501 502
502 503 We cannot just look in to the pushed obsmarkers from the pushop because
503 504 discovery might have filtered relevant markers. In addition listing all
504 505 markers relevant to all changesets in the pushed set would be too expensive
505 506 (O(len(repo)))
506 507
507 508 (note: There are cache opportunity in this function. but it would requires
508 509 a two dimensional stack.)
509 510 """
510 511 successorsmarkers = obsstore.successors
511 512 stack = [node]
512 513 seen = set(stack)
513 514 while stack:
514 515 current = stack.pop()
515 516 if ispushed(current):
516 517 return True
517 518 markers = successorsmarkers.get(current, ())
518 519 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
519 520 for m in markers:
520 521 nexts = m[1] # successors
521 522 if not nexts: # this is a prune marker
522 523 nexts = m[5] or () # parents
523 524 for n in nexts:
524 525 if n not in seen:
525 526 seen.add(n)
526 527 stack.append(n)
527 528 return False
General Comments 0
You need to be logged in to leave comments. Login now