##// END OF EJS Templates
index: use `index.get_rev` in `discovery._postprocessobsolete`...
marmoute -
r43960:65d67702 default
parent child Browse files
Show More
@@ -1,593 +1,593
1 1 # discovery.py - protocol changeset discovery functions
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11
12 12 from .i18n import _
13 13 from .node import (
14 14 hex,
15 15 nullid,
16 16 short,
17 17 )
18 18
19 19 from . import (
20 20 bookmarks,
21 21 branchmap,
22 22 error,
23 23 phases,
24 24 pycompat,
25 25 scmutil,
26 26 setdiscovery,
27 27 treediscovery,
28 28 util,
29 29 )
30 30
31 31
32 32 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
33 33 """Return a tuple (common, anyincoming, heads) used to identify the common
34 34 subset of nodes between repo and remote.
35 35
36 36 "common" is a list of (at least) the heads of the common subset.
37 37 "anyincoming" is testable as a boolean indicating if any nodes are missing
38 38 locally. If remote does not support getbundle, this actually is a list of
39 39 roots of the nodes that would be incoming, to be supplied to
40 40 changegroupsubset. No code except for pull should be relying on this fact
41 41 any longer.
42 42 "heads" is either the supplied heads, or else the remote's heads.
43 43 "ancestorsof" if not None, restrict the discovery to a subset defined by
44 44 these nodes. Changeset outside of this set won't be considered (and
45 45 won't appears in "common")
46 46
47 47 If you pass heads and they are all known locally, the response lists just
48 48 these heads in "common" and in "heads".
49 49
50 50 Please use findcommonoutgoing to compute the set of outgoing nodes to give
51 51 extensions a good hook into outgoing.
52 52 """
53 53
54 54 if not remote.capable(b'getbundle'):
55 55 return treediscovery.findcommonincoming(repo, remote, heads, force)
56 56
57 57 if heads:
58 58 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
59 59 if all(knownnode(h) for h in heads):
60 60 return (heads, False, heads)
61 61
62 62 res = setdiscovery.findcommonheads(
63 63 repo.ui,
64 64 repo,
65 65 remote,
66 66 abortwhenunrelated=not force,
67 67 ancestorsof=ancestorsof,
68 68 )
69 69 common, anyinc, srvheads = res
70 70 return (list(common), anyinc, heads or list(srvheads))
71 71
72 72
73 73 class outgoing(object):
74 74 '''Represents the set of nodes present in a local repo but not in a
75 75 (possibly) remote one.
76 76
77 77 Members:
78 78
79 79 missing is a list of all nodes present in local but not in remote.
80 80 common is a list of all nodes shared between the two repos.
81 81 excluded is the list of missing changeset that shouldn't be sent remotely.
82 82 missingheads is the list of heads of missing.
83 83 commonheads is the list of heads of common.
84 84
85 85 The sets are computed on demand from the heads, unless provided upfront
86 86 by discovery.'''
87 87
88 88 def __init__(
89 89 self, repo, commonheads=None, missingheads=None, missingroots=None
90 90 ):
91 91 # at least one of them must not be set
92 92 assert None in (commonheads, missingroots)
93 93 cl = repo.changelog
94 94 if missingheads is None:
95 95 missingheads = cl.heads()
96 96 if missingroots:
97 97 discbases = []
98 98 for n in missingroots:
99 99 discbases.extend([p for p in cl.parents(n) if p != nullid])
100 100 # TODO remove call to nodesbetween.
101 101 # TODO populate attributes on outgoing instance instead of setting
102 102 # discbases.
103 103 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
104 104 included = set(csets)
105 105 missingheads = heads
106 106 commonheads = [n for n in discbases if n not in included]
107 107 elif not commonheads:
108 108 commonheads = [nullid]
109 109 self.commonheads = commonheads
110 110 self.missingheads = missingheads
111 111 self._revlog = cl
112 112 self._common = None
113 113 self._missing = None
114 114 self.excluded = []
115 115
116 116 def _computecommonmissing(self):
117 117 sets = self._revlog.findcommonmissing(
118 118 self.commonheads, self.missingheads
119 119 )
120 120 self._common, self._missing = sets
121 121
122 122 @util.propertycache
123 123 def common(self):
124 124 if self._common is None:
125 125 self._computecommonmissing()
126 126 return self._common
127 127
128 128 @util.propertycache
129 129 def missing(self):
130 130 if self._missing is None:
131 131 self._computecommonmissing()
132 132 return self._missing
133 133
134 134
135 135 def findcommonoutgoing(
136 136 repo, other, onlyheads=None, force=False, commoninc=None, portable=False
137 137 ):
138 138 '''Return an outgoing instance to identify the nodes present in repo but
139 139 not in other.
140 140
141 141 If onlyheads is given, only nodes ancestral to nodes in onlyheads
142 142 (inclusive) are included. If you already know the local repo's heads,
143 143 passing them in onlyheads is faster than letting them be recomputed here.
144 144
145 145 If commoninc is given, it must be the result of a prior call to
146 146 findcommonincoming(repo, other, force) to avoid recomputing it here.
147 147
148 148 If portable is given, compute more conservative common and missingheads,
149 149 to make bundles created from the instance more portable.'''
150 150 # declare an empty outgoing object to be filled later
151 151 og = outgoing(repo, None, None)
152 152
153 153 # get common set if not provided
154 154 if commoninc is None:
155 155 commoninc = findcommonincoming(
156 156 repo, other, force=force, ancestorsof=onlyheads
157 157 )
158 158 og.commonheads, _any, _hds = commoninc
159 159
160 160 # compute outgoing
161 161 mayexclude = repo._phasecache.phaseroots[phases.secret] or repo.obsstore
162 162 if not mayexclude:
163 163 og.missingheads = onlyheads or repo.heads()
164 164 elif onlyheads is None:
165 165 # use visible heads as it should be cached
166 166 og.missingheads = repo.filtered(b"served").heads()
167 167 og.excluded = [ctx.node() for ctx in repo.set(b'secret() or extinct()')]
168 168 else:
169 169 # compute common, missing and exclude secret stuff
170 170 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
171 171 og._common, allmissing = sets
172 172 og._missing = missing = []
173 173 og.excluded = excluded = []
174 174 for node in allmissing:
175 175 ctx = repo[node]
176 176 if ctx.phase() >= phases.secret or ctx.extinct():
177 177 excluded.append(node)
178 178 else:
179 179 missing.append(node)
180 180 if len(missing) == len(allmissing):
181 181 missingheads = onlyheads
182 182 else: # update missing heads
183 183 missingheads = phases.newheads(repo, onlyheads, excluded)
184 184 og.missingheads = missingheads
185 185 if portable:
186 186 # recompute common and missingheads as if -r<rev> had been given for
187 187 # each head of missing, and --base <rev> for each head of the proper
188 188 # ancestors of missing
189 189 og._computecommonmissing()
190 190 cl = repo.changelog
191 191 missingrevs = set(cl.rev(n) for n in og._missing)
192 192 og._common = set(cl.ancestors(missingrevs)) - missingrevs
193 193 commonheads = set(og.commonheads)
194 194 og.missingheads = [h for h in og.missingheads if h not in commonheads]
195 195
196 196 return og
197 197
198 198
199 199 def _headssummary(pushop):
200 200 """compute a summary of branch and heads status before and after push
201 201
202 202 return {'branch': ([remoteheads], [newheads],
203 203 [unsyncedheads], [discardedheads])} mapping
204 204
205 205 - branch: the branch name,
206 206 - remoteheads: the list of remote heads known locally
207 207 None if the branch is new,
208 208 - newheads: the new remote heads (known locally) with outgoing pushed,
209 209 - unsyncedheads: the list of remote heads unknown locally,
210 210 - discardedheads: the list of heads made obsolete by the push.
211 211 """
212 212 repo = pushop.repo.unfiltered()
213 213 remote = pushop.remote
214 214 outgoing = pushop.outgoing
215 215 cl = repo.changelog
216 216 headssum = {}
217 217 missingctx = set()
218 218 # A. Create set of branches involved in the push.
219 219 branches = set()
220 220 for n in outgoing.missing:
221 221 ctx = repo[n]
222 222 missingctx.add(ctx)
223 223 branches.add(ctx.branch())
224 224
225 225 with remote.commandexecutor() as e:
226 226 remotemap = e.callcommand(b'branchmap', {}).result()
227 227
228 228 knownnode = cl.hasnode # do not use nodemap until it is filtered
229 229 # A. register remote heads of branches which are in outgoing set
230 230 for branch, heads in pycompat.iteritems(remotemap):
231 231 # don't add head info about branches which we don't have locally
232 232 if branch not in branches:
233 233 continue
234 234 known = []
235 235 unsynced = []
236 236 for h in heads:
237 237 if knownnode(h):
238 238 known.append(h)
239 239 else:
240 240 unsynced.append(h)
241 241 headssum[branch] = (known, list(known), unsynced)
242 242
243 243 # B. add new branch data
244 244 for branch in branches:
245 245 if branch not in headssum:
246 246 headssum[branch] = (None, [], [])
247 247
248 248 # C. Update newmap with outgoing changes.
249 249 # This will possibly add new heads and remove existing ones.
250 250 newmap = branchmap.remotebranchcache(
251 251 (branch, heads[1])
252 252 for branch, heads in pycompat.iteritems(headssum)
253 253 if heads[0] is not None
254 254 )
255 255 newmap.update(repo, (ctx.rev() for ctx in missingctx))
256 256 for branch, newheads in pycompat.iteritems(newmap):
257 257 headssum[branch][1][:] = newheads
258 258 for branch, items in pycompat.iteritems(headssum):
259 259 for l in items:
260 260 if l is not None:
261 261 l.sort()
262 262 headssum[branch] = items + ([],)
263 263
264 264 # If there are no obsstore, no post processing are needed.
265 265 if repo.obsstore:
266 266 torev = repo.changelog.rev
267 267 futureheads = set(torev(h) for h in outgoing.missingheads)
268 268 futureheads |= set(torev(h) for h in outgoing.commonheads)
269 269 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
270 270 for branch, heads in sorted(pycompat.iteritems(headssum)):
271 271 remoteheads, newheads, unsyncedheads, placeholder = heads
272 272 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
273 273 headssum[branch] = (
274 274 remoteheads,
275 275 sorted(result[0]),
276 276 unsyncedheads,
277 277 sorted(result[1]),
278 278 )
279 279 return headssum
280 280
281 281
282 282 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
283 283 """Compute branchmapsummary for repo without branchmap support"""
284 284
285 285 # 1-4b. old servers: Check for new topological heads.
286 286 # Construct {old,new}map with branch = None (topological branch).
287 287 # (code based on update)
288 288 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
289 289 oldheads = sorted(h for h in remoteheads if knownnode(h))
290 290 # all nodes in outgoing.missing are children of either:
291 291 # - an element of oldheads
292 292 # - another element of outgoing.missing
293 293 # - nullrev
294 294 # This explains why the new head are very simple to compute.
295 295 r = repo.set(b'heads(%ln + %ln)', oldheads, outgoing.missing)
296 296 newheads = sorted(c.node() for c in r)
297 297 # set some unsynced head to issue the "unsynced changes" warning
298 298 if inc:
299 299 unsynced = [None]
300 300 else:
301 301 unsynced = []
302 302 return {None: (oldheads, newheads, unsynced, [])}
303 303
304 304
305 305 def _nowarnheads(pushop):
306 306 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
307 307 repo = pushop.repo.unfiltered()
308 308 remote = pushop.remote
309 309 localbookmarks = repo._bookmarks
310 310
311 311 with remote.commandexecutor() as e:
312 312 remotebookmarks = e.callcommand(
313 313 b'listkeys', {b'namespace': b'bookmarks',}
314 314 ).result()
315 315
316 316 bookmarkedheads = set()
317 317
318 318 # internal config: bookmarks.pushing
319 319 newbookmarks = [
320 320 localbookmarks.expandname(b)
321 321 for b in pushop.ui.configlist(b'bookmarks', b'pushing')
322 322 ]
323 323
324 324 for bm in localbookmarks:
325 325 rnode = remotebookmarks.get(bm)
326 326 if rnode and rnode in repo:
327 327 lctx, rctx = repo[localbookmarks[bm]], repo[rnode]
328 328 if bookmarks.validdest(repo, rctx, lctx):
329 329 bookmarkedheads.add(lctx.node())
330 330 else:
331 331 if bm in newbookmarks and bm not in remotebookmarks:
332 332 bookmarkedheads.add(localbookmarks[bm])
333 333
334 334 return bookmarkedheads
335 335
336 336
337 337 def checkheads(pushop):
338 338 """Check that a push won't add any outgoing head
339 339
340 340 raise Abort error and display ui message as needed.
341 341 """
342 342
343 343 repo = pushop.repo.unfiltered()
344 344 remote = pushop.remote
345 345 outgoing = pushop.outgoing
346 346 remoteheads = pushop.remoteheads
347 347 newbranch = pushop.newbranch
348 348 inc = bool(pushop.incoming)
349 349
350 350 # Check for each named branch if we're creating new remote heads.
351 351 # To be a remote head after push, node must be either:
352 352 # - unknown locally
353 353 # - a local outgoing head descended from update
354 354 # - a remote head that's known locally and not
355 355 # ancestral to an outgoing head
356 356 if remoteheads == [nullid]:
357 357 # remote is empty, nothing to check.
358 358 return
359 359
360 360 if remote.capable(b'branchmap'):
361 361 headssum = _headssummary(pushop)
362 362 else:
363 363 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
364 364 pushop.pushbranchmap = headssum
365 365 newbranches = [
366 366 branch
367 367 for branch, heads in pycompat.iteritems(headssum)
368 368 if heads[0] is None
369 369 ]
370 370 # 1. Check for new branches on the remote.
371 371 if newbranches and not newbranch: # new branch requires --new-branch
372 372 branchnames = b', '.join(sorted(newbranches))
373 373 # Calculate how many of the new branches are closed branches
374 374 closedbranches = set()
375 375 for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
376 376 if isclosed:
377 377 closedbranches.add(tag)
378 378 closedbranches = closedbranches & set(newbranches)
379 379 if closedbranches:
380 380 errmsg = _(b"push creates new remote branches: %s (%d closed)!") % (
381 381 branchnames,
382 382 len(closedbranches),
383 383 )
384 384 else:
385 385 errmsg = _(b"push creates new remote branches: %s!") % branchnames
386 386 hint = _(b"use 'hg push --new-branch' to create new remote branches")
387 387 raise error.Abort(errmsg, hint=hint)
388 388
389 389 # 2. Find heads that we need not warn about
390 390 nowarnheads = _nowarnheads(pushop)
391 391
392 392 # 3. Check for new heads.
393 393 # If there are more heads after the push than before, a suitable
394 394 # error message, depending on unsynced status, is displayed.
395 395 errormsg = None
396 396 for branch, heads in sorted(pycompat.iteritems(headssum)):
397 397 remoteheads, newheads, unsyncedheads, discardedheads = heads
398 398 # add unsynced data
399 399 if remoteheads is None:
400 400 oldhs = set()
401 401 else:
402 402 oldhs = set(remoteheads)
403 403 oldhs.update(unsyncedheads)
404 404 dhs = None # delta heads, the new heads on branch
405 405 newhs = set(newheads)
406 406 newhs.update(unsyncedheads)
407 407 if unsyncedheads:
408 408 if None in unsyncedheads:
409 409 # old remote, no heads data
410 410 heads = None
411 411 else:
412 412 heads = scmutil.nodesummaries(repo, unsyncedheads)
413 413 if heads is None:
414 414 repo.ui.status(
415 415 _(b"remote has heads that are not known locally\n")
416 416 )
417 417 elif branch is None:
418 418 repo.ui.status(
419 419 _(b"remote has heads that are not known locally: %s\n")
420 420 % heads
421 421 )
422 422 else:
423 423 repo.ui.status(
424 424 _(
425 425 b"remote has heads on branch '%s' that are "
426 426 b"not known locally: %s\n"
427 427 )
428 428 % (branch, heads)
429 429 )
430 430 if remoteheads is None:
431 431 if len(newhs) > 1:
432 432 dhs = list(newhs)
433 433 if errormsg is None:
434 434 errormsg = (
435 435 _(b"push creates new branch '%s' with multiple heads")
436 436 % branch
437 437 )
438 438 hint = _(
439 439 b"merge or"
440 440 b" see 'hg help push' for details about"
441 441 b" pushing new heads"
442 442 )
443 443 elif len(newhs) > len(oldhs):
444 444 # remove bookmarked or existing remote heads from the new heads list
445 445 dhs = sorted(newhs - nowarnheads - oldhs)
446 446 if dhs:
447 447 if errormsg is None:
448 448 if branch not in (b'default', None):
449 449 errormsg = _(
450 450 b"push creates new remote head %s on branch '%s'!"
451 451 ) % (short(dhs[0]), branch)
452 452 elif repo[dhs[0]].bookmarks():
453 453 errormsg = _(
454 454 b"push creates new remote head %s "
455 455 b"with bookmark '%s'!"
456 456 ) % (short(dhs[0]), repo[dhs[0]].bookmarks()[0])
457 457 else:
458 458 errormsg = _(b"push creates new remote head %s!") % short(
459 459 dhs[0]
460 460 )
461 461 if unsyncedheads:
462 462 hint = _(
463 463 b"pull and merge or"
464 464 b" see 'hg help push' for details about"
465 465 b" pushing new heads"
466 466 )
467 467 else:
468 468 hint = _(
469 469 b"merge or"
470 470 b" see 'hg help push' for details about"
471 471 b" pushing new heads"
472 472 )
473 473 if branch is None:
474 474 repo.ui.note(_(b"new remote heads:\n"))
475 475 else:
476 476 repo.ui.note(_(b"new remote heads on branch '%s':\n") % branch)
477 477 for h in dhs:
478 478 repo.ui.note(b" %s\n" % short(h))
479 479 if errormsg:
480 480 raise error.Abort(errormsg, hint=hint)
481 481
482 482
483 483 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
484 484 """post process the list of new heads with obsolescence information
485 485
486 486 Exists as a sub-function to contain the complexity and allow extensions to
487 487 experiment with smarter logic.
488 488
489 489 Returns (newheads, discarded_heads) tuple
490 490 """
491 491 # known issue
492 492 #
493 493 # * We "silently" skip processing on all changeset unknown locally
494 494 #
495 495 # * if <nh> is public on the remote, it won't be affected by obsolete
496 496 # marker and a new is created
497 497
498 498 # define various utilities and containers
499 499 repo = pushop.repo
500 500 unfi = repo.unfiltered()
501 501 tonode = unfi.changelog.node
502 torev = unfi.changelog.nodemap.get
502 torev = unfi.changelog.index.get_rev
503 503 public = phases.public
504 504 getphase = unfi._phasecache.phase
505 505 ispublic = lambda r: getphase(unfi, r) == public
506 506 ispushed = lambda n: torev(n) in futurecommon
507 507 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
508 508 successorsmarkers = unfi.obsstore.successors
509 509 newhs = set() # final set of new heads
510 510 discarded = set() # new head of fully replaced branch
511 511
512 512 localcandidate = set() # candidate heads known locally
513 513 unknownheads = set() # candidate heads unknown locally
514 514 for h in candidate_newhs:
515 515 if h in unfi:
516 516 localcandidate.add(h)
517 517 else:
518 518 if successorsmarkers.get(h) is not None:
519 519 msg = (
520 520 b'checkheads: remote head unknown locally has'
521 521 b' local marker: %s\n'
522 522 )
523 523 repo.ui.debug(msg % hex(h))
524 524 unknownheads.add(h)
525 525
526 526 # fast path the simple case
527 527 if len(localcandidate) == 1:
528 528 return unknownheads | set(candidate_newhs), set()
529 529
530 530 # actually process branch replacement
531 531 while localcandidate:
532 532 nh = localcandidate.pop()
533 533 # run this check early to skip the evaluation of the whole branch
534 534 if torev(nh) in futurecommon or ispublic(torev(nh)):
535 535 newhs.add(nh)
536 536 continue
537 537
538 538 # Get all revs/nodes on the branch exclusive to this head
539 539 # (already filtered heads are "ignored"))
540 540 branchrevs = unfi.revs(
541 541 b'only(%n, (%ln+%ln))', nh, localcandidate, newhs
542 542 )
543 543 branchnodes = [tonode(r) for r in branchrevs]
544 544
545 545 # The branch won't be hidden on the remote if
546 546 # * any part of it is public,
547 547 # * any part of it is considered part of the result by previous logic,
548 548 # * if we have no markers to push to obsolete it.
549 549 if (
550 550 any(ispublic(r) for r in branchrevs)
551 551 or any(torev(n) in futurecommon for n in branchnodes)
552 552 or any(not hasoutmarker(n) for n in branchnodes)
553 553 ):
554 554 newhs.add(nh)
555 555 else:
556 556 # note: there is a corner case if there is a merge in the branch.
557 557 # we might end up with -more- heads. However, these heads are not
558 558 # "added" by the push, but more by the "removal" on the remote so I
559 559 # think is a okay to ignore them,
560 560 discarded.add(nh)
561 561 newhs |= unknownheads
562 562 return newhs, discarded
563 563
564 564
565 565 def pushingmarkerfor(obsstore, ispushed, node):
566 566 """true if some markers are to be pushed for node
567 567
568 568 We cannot just look in to the pushed obsmarkers from the pushop because
569 569 discovery might have filtered relevant markers. In addition listing all
570 570 markers relevant to all changesets in the pushed set would be too expensive
571 571 (O(len(repo)))
572 572
573 573 (note: There are cache opportunity in this function. but it would requires
574 574 a two dimensional stack.)
575 575 """
576 576 successorsmarkers = obsstore.successors
577 577 stack = [node]
578 578 seen = set(stack)
579 579 while stack:
580 580 current = stack.pop()
581 581 if ispushed(current):
582 582 return True
583 583 markers = successorsmarkers.get(current, ())
584 584 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
585 585 for m in markers:
586 586 nexts = m[1] # successors
587 587 if not nexts: # this is a prune marker
588 588 nexts = m[5] or () # parents
589 589 for n in nexts:
590 590 if n not in seen:
591 591 seen.add(n)
592 592 stack.append(n)
593 593 return False
General Comments 0
You need to be logged in to leave comments. Login now