##// END OF EJS Templates
discovery: remove deprecated API...
Raphaël Gomès -
r49361:61fe7e17 default
parent child Browse files
Show More
@@ -1,628 +1,617
1 1 # discovery.py - protocol changeset discovery functions
2 2 #
3 3 # Copyright 2010 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11
12 12 from .i18n import _
13 13 from .node import (
14 14 hex,
15 15 short,
16 16 )
17 17
18 18 from . import (
19 19 bookmarks,
20 20 branchmap,
21 21 error,
22 22 phases,
23 23 pycompat,
24 24 scmutil,
25 25 setdiscovery,
26 26 treediscovery,
27 27 util,
28 28 )
29 29
30 30
31 31 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
32 32 """Return a tuple (common, anyincoming, heads) used to identify the common
33 33 subset of nodes between repo and remote.
34 34
35 35 "common" is a list of (at least) the heads of the common subset.
36 36 "anyincoming" is testable as a boolean indicating if any nodes are missing
37 37 locally. If remote does not support getbundle, this actually is a list of
38 38 roots of the nodes that would be incoming, to be supplied to
39 39 changegroupsubset. No code except for pull should be relying on this fact
40 40 any longer.
41 41 "heads" is either the supplied heads, or else the remote's heads.
42 42 "ancestorsof" if not None, restrict the discovery to a subset defined by
43 43 these nodes. Changeset outside of this set won't be considered (but may
44 44 still appear in "common").
45 45
46 46 If you pass heads and they are all known locally, the response lists just
47 47 these heads in "common" and in "heads".
48 48
49 49 Please use findcommonoutgoing to compute the set of outgoing nodes to give
50 50 extensions a good hook into outgoing.
51 51 """
52 52
53 53 if not remote.capable(b'getbundle'):
54 54 return treediscovery.findcommonincoming(repo, remote, heads, force)
55 55
56 56 if heads:
57 57 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
58 58 if all(knownnode(h) for h in heads):
59 59 return (heads, False, heads)
60 60
61 61 res = setdiscovery.findcommonheads(
62 62 repo.ui,
63 63 repo,
64 64 remote,
65 65 abortwhenunrelated=not force,
66 66 ancestorsof=ancestorsof,
67 67 )
68 68 common, anyinc, srvheads = res
69 69 if heads and not anyinc:
70 70 # server could be lying on the advertised heads
71 71 has_node = repo.changelog.hasnode
72 72 anyinc = any(not has_node(n) for n in heads)
73 73 return (list(common), anyinc, heads or list(srvheads))
74 74
75 75
76 76 class outgoing(object):
77 77 """Represents the result of a findcommonoutgoing() call.
78 78
79 79 Members:
80 80
81 81 ancestorsof is a list of the nodes whose ancestors are included in the
82 82 outgoing operation.
83 83
84 84 missing is a list of those ancestors of ancestorsof that are present in
85 85 local but not in remote.
86 86
87 87 common is a set containing revs common between the local and the remote
88 88 repository (at least all of those that are ancestors of ancestorsof).
89 89
90 90 commonheads is the list of heads of common.
91 91
92 92 excluded is the list of missing changeset that shouldn't be sent
93 93 remotely.
94 94
95 95 Some members are computed on demand from the heads, unless provided upfront
96 96 by discovery."""
97 97
98 98 def __init__(
99 99 self, repo, commonheads=None, ancestorsof=None, missingroots=None
100 100 ):
101 101 # at least one of them must not be set
102 102 assert None in (commonheads, missingroots)
103 103 cl = repo.changelog
104 104 if ancestorsof is None:
105 105 ancestorsof = cl.heads()
106 106 if missingroots:
107 107 discbases = []
108 108 for n in missingroots:
109 109 discbases.extend([p for p in cl.parents(n) if p != repo.nullid])
110 110 # TODO remove call to nodesbetween.
111 111 # TODO populate attributes on outgoing instance instead of setting
112 112 # discbases.
113 113 csets, roots, heads = cl.nodesbetween(missingroots, ancestorsof)
114 114 included = set(csets)
115 115 ancestorsof = heads
116 116 commonheads = [n for n in discbases if n not in included]
117 117 elif not commonheads:
118 118 commonheads = [repo.nullid]
119 119 self.commonheads = commonheads
120 120 self.ancestorsof = ancestorsof
121 121 self._revlog = cl
122 122 self._common = None
123 123 self._missing = None
124 124 self.excluded = []
125 125
126 126 def _computecommonmissing(self):
127 127 sets = self._revlog.findcommonmissing(
128 128 self.commonheads, self.ancestorsof
129 129 )
130 130 self._common, self._missing = sets
131 131
132 132 @util.propertycache
133 133 def common(self):
134 134 if self._common is None:
135 135 self._computecommonmissing()
136 136 return self._common
137 137
138 138 @util.propertycache
139 139 def missing(self):
140 140 if self._missing is None:
141 141 self._computecommonmissing()
142 142 return self._missing
143 143
144 @property
145 def missingheads(self):
146 util.nouideprecwarn(
147 b'outgoing.missingheads never contained what the name suggests and '
148 b'was renamed to outgoing.ancestorsof. check your code for '
149 b'correctness.',
150 b'5.5',
151 stacklevel=2,
152 )
153 return self.ancestorsof
154
155 144
156 145 def findcommonoutgoing(
157 146 repo, other, onlyheads=None, force=False, commoninc=None, portable=False
158 147 ):
159 148 """Return an outgoing instance to identify the nodes present in repo but
160 149 not in other.
161 150
162 151 If onlyheads is given, only nodes ancestral to nodes in onlyheads
163 152 (inclusive) are included. If you already know the local repo's heads,
164 153 passing them in onlyheads is faster than letting them be recomputed here.
165 154
166 155 If commoninc is given, it must be the result of a prior call to
167 156 findcommonincoming(repo, other, force) to avoid recomputing it here.
168 157
169 158 If portable is given, compute more conservative common and ancestorsof,
170 159 to make bundles created from the instance more portable."""
171 160 # declare an empty outgoing object to be filled later
172 161 og = outgoing(repo, None, None)
173 162
174 163 # get common set if not provided
175 164 if commoninc is None:
176 165 commoninc = findcommonincoming(
177 166 repo, other, force=force, ancestorsof=onlyheads
178 167 )
179 168 og.commonheads, _any, _hds = commoninc
180 169
181 170 # compute outgoing
182 171 mayexclude = repo._phasecache.phaseroots[phases.secret] or repo.obsstore
183 172 if not mayexclude:
184 173 og.ancestorsof = onlyheads or repo.heads()
185 174 elif onlyheads is None:
186 175 # use visible heads as it should be cached
187 176 og.ancestorsof = repo.filtered(b"served").heads()
188 177 og.excluded = [ctx.node() for ctx in repo.set(b'secret() or extinct()')]
189 178 else:
190 179 # compute common, missing and exclude secret stuff
191 180 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
192 181 og._common, allmissing = sets
193 182 og._missing = missing = []
194 183 og.excluded = excluded = []
195 184 for node in allmissing:
196 185 ctx = repo[node]
197 186 if ctx.phase() >= phases.secret or ctx.extinct():
198 187 excluded.append(node)
199 188 else:
200 189 missing.append(node)
201 190 if len(missing) == len(allmissing):
202 191 ancestorsof = onlyheads
203 192 else: # update missing heads
204 193 ancestorsof = phases.newheads(repo, onlyheads, excluded)
205 194 og.ancestorsof = ancestorsof
206 195 if portable:
207 196 # recompute common and ancestorsof as if -r<rev> had been given for
208 197 # each head of missing, and --base <rev> for each head of the proper
209 198 # ancestors of missing
210 199 og._computecommonmissing()
211 200 cl = repo.changelog
212 201 missingrevs = {cl.rev(n) for n in og._missing}
213 202 og._common = set(cl.ancestors(missingrevs)) - missingrevs
214 203 commonheads = set(og.commonheads)
215 204 og.ancestorsof = [h for h in og.ancestorsof if h not in commonheads]
216 205
217 206 return og
218 207
219 208
220 209 def _headssummary(pushop):
221 210 """compute a summary of branch and heads status before and after push
222 211
223 212 return {'branch': ([remoteheads], [newheads],
224 213 [unsyncedheads], [discardedheads])} mapping
225 214
226 215 - branch: the branch name,
227 216 - remoteheads: the list of remote heads known locally
228 217 None if the branch is new,
229 218 - newheads: the new remote heads (known locally) with outgoing pushed,
230 219 - unsyncedheads: the list of remote heads unknown locally,
231 220 - discardedheads: the list of heads made obsolete by the push.
232 221 """
233 222 repo = pushop.repo.unfiltered()
234 223 remote = pushop.remote
235 224 outgoing = pushop.outgoing
236 225 cl = repo.changelog
237 226 headssum = {}
238 227 missingctx = set()
239 228 # A. Create set of branches involved in the push.
240 229 branches = set()
241 230 for n in outgoing.missing:
242 231 ctx = repo[n]
243 232 missingctx.add(ctx)
244 233 branches.add(ctx.branch())
245 234
246 235 with remote.commandexecutor() as e:
247 236 remotemap = e.callcommand(b'branchmap', {}).result()
248 237
249 238 knownnode = cl.hasnode # do not use nodemap until it is filtered
250 239 # A. register remote heads of branches which are in outgoing set
251 240 for branch, heads in pycompat.iteritems(remotemap):
252 241 # don't add head info about branches which we don't have locally
253 242 if branch not in branches:
254 243 continue
255 244 known = []
256 245 unsynced = []
257 246 for h in heads:
258 247 if knownnode(h):
259 248 known.append(h)
260 249 else:
261 250 unsynced.append(h)
262 251 headssum[branch] = (known, list(known), unsynced)
263 252
264 253 # B. add new branch data
265 254 for branch in branches:
266 255 if branch not in headssum:
267 256 headssum[branch] = (None, [], [])
268 257
269 258 # C. Update newmap with outgoing changes.
270 259 # This will possibly add new heads and remove existing ones.
271 260 newmap = branchmap.remotebranchcache(
272 261 repo,
273 262 (
274 263 (branch, heads[1])
275 264 for branch, heads in pycompat.iteritems(headssum)
276 265 if heads[0] is not None
277 266 ),
278 267 )
279 268 newmap.update(repo, (ctx.rev() for ctx in missingctx))
280 269 for branch, newheads in pycompat.iteritems(newmap):
281 270 headssum[branch][1][:] = newheads
282 271 for branch, items in pycompat.iteritems(headssum):
283 272 for l in items:
284 273 if l is not None:
285 274 l.sort()
286 275 headssum[branch] = items + ([],)
287 276
288 277 # If there are no obsstore, no post processing are needed.
289 278 if repo.obsstore:
290 279 torev = repo.changelog.rev
291 280 futureheads = {torev(h) for h in outgoing.ancestorsof}
292 281 futureheads |= {torev(h) for h in outgoing.commonheads}
293 282 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
294 283 for branch, heads in sorted(pycompat.iteritems(headssum)):
295 284 remoteheads, newheads, unsyncedheads, placeholder = heads
296 285 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
297 286 headssum[branch] = (
298 287 remoteheads,
299 288 sorted(result[0]),
300 289 unsyncedheads,
301 290 sorted(result[1]),
302 291 )
303 292 return headssum
304 293
305 294
306 295 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
307 296 """Compute branchmapsummary for repo without branchmap support"""
308 297
309 298 # 1-4b. old servers: Check for new topological heads.
310 299 # Construct {old,new}map with branch = None (topological branch).
311 300 # (code based on update)
312 301 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
313 302 oldheads = sorted(h for h in remoteheads if knownnode(h))
314 303 # all nodes in outgoing.missing are children of either:
315 304 # - an element of oldheads
316 305 # - another element of outgoing.missing
317 306 # - nullrev
318 307 # This explains why the new head are very simple to compute.
319 308 r = repo.set(b'heads(%ln + %ln)', oldheads, outgoing.missing)
320 309 newheads = sorted(c.node() for c in r)
321 310 # set some unsynced head to issue the "unsynced changes" warning
322 311 if inc:
323 312 unsynced = [None]
324 313 else:
325 314 unsynced = []
326 315 return {None: (oldheads, newheads, unsynced, [])}
327 316
328 317
329 318 def _nowarnheads(pushop):
330 319 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
331 320 repo = pushop.repo.unfiltered()
332 321 remote = pushop.remote
333 322 localbookmarks = repo._bookmarks
334 323
335 324 with remote.commandexecutor() as e:
336 325 remotebookmarks = e.callcommand(
337 326 b'listkeys',
338 327 {
339 328 b'namespace': b'bookmarks',
340 329 },
341 330 ).result()
342 331
343 332 bookmarkedheads = set()
344 333
345 334 # internal config: bookmarks.pushing
346 335 newbookmarks = [
347 336 localbookmarks.expandname(b)
348 337 for b in pushop.ui.configlist(b'bookmarks', b'pushing')
349 338 ]
350 339
351 340 for bm in localbookmarks:
352 341 rnode = remotebookmarks.get(bm)
353 342 if rnode and rnode in repo:
354 343 lctx, rctx = repo[localbookmarks[bm]], repo[rnode]
355 344 if bookmarks.validdest(repo, rctx, lctx):
356 345 bookmarkedheads.add(lctx.node())
357 346 else:
358 347 if bm in newbookmarks and bm not in remotebookmarks:
359 348 bookmarkedheads.add(localbookmarks[bm])
360 349
361 350 return bookmarkedheads
362 351
363 352
364 353 def checkheads(pushop):
365 354 """Check that a push won't add any outgoing head
366 355
367 356 raise StateError error and display ui message as needed.
368 357 """
369 358
370 359 repo = pushop.repo.unfiltered()
371 360 remote = pushop.remote
372 361 outgoing = pushop.outgoing
373 362 remoteheads = pushop.remoteheads
374 363 newbranch = pushop.newbranch
375 364 inc = bool(pushop.incoming)
376 365
377 366 # Check for each named branch if we're creating new remote heads.
378 367 # To be a remote head after push, node must be either:
379 368 # - unknown locally
380 369 # - a local outgoing head descended from update
381 370 # - a remote head that's known locally and not
382 371 # ancestral to an outgoing head
383 372 if remoteheads == [repo.nullid]:
384 373 # remote is empty, nothing to check.
385 374 return
386 375
387 376 if remote.capable(b'branchmap'):
388 377 headssum = _headssummary(pushop)
389 378 else:
390 379 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
391 380 pushop.pushbranchmap = headssum
392 381 newbranches = [
393 382 branch
394 383 for branch, heads in pycompat.iteritems(headssum)
395 384 if heads[0] is None
396 385 ]
397 386 # 1. Check for new branches on the remote.
398 387 if newbranches and not newbranch: # new branch requires --new-branch
399 388 branchnames = b', '.join(sorted(newbranches))
400 389 # Calculate how many of the new branches are closed branches
401 390 closedbranches = set()
402 391 for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
403 392 if isclosed:
404 393 closedbranches.add(tag)
405 394 closedbranches = closedbranches & set(newbranches)
406 395 if closedbranches:
407 396 errmsg = _(b"push creates new remote branches: %s (%d closed)") % (
408 397 branchnames,
409 398 len(closedbranches),
410 399 )
411 400 else:
412 401 errmsg = _(b"push creates new remote branches: %s") % branchnames
413 402 hint = _(b"use 'hg push --new-branch' to create new remote branches")
414 403 raise error.StateError(errmsg, hint=hint)
415 404
416 405 # 2. Find heads that we need not warn about
417 406 nowarnheads = _nowarnheads(pushop)
418 407
419 408 # 3. Check for new heads.
420 409 # If there are more heads after the push than before, a suitable
421 410 # error message, depending on unsynced status, is displayed.
422 411 errormsg = None
423 412 for branch, heads in sorted(pycompat.iteritems(headssum)):
424 413 remoteheads, newheads, unsyncedheads, discardedheads = heads
425 414 # add unsynced data
426 415 if remoteheads is None:
427 416 oldhs = set()
428 417 else:
429 418 oldhs = set(remoteheads)
430 419 oldhs.update(unsyncedheads)
431 420 dhs = None # delta heads, the new heads on branch
432 421 newhs = set(newheads)
433 422 newhs.update(unsyncedheads)
434 423 if unsyncedheads:
435 424 if None in unsyncedheads:
436 425 # old remote, no heads data
437 426 heads = None
438 427 else:
439 428 heads = scmutil.nodesummaries(repo, unsyncedheads)
440 429 if heads is None:
441 430 repo.ui.status(
442 431 _(b"remote has heads that are not known locally\n")
443 432 )
444 433 elif branch is None:
445 434 repo.ui.status(
446 435 _(b"remote has heads that are not known locally: %s\n")
447 436 % heads
448 437 )
449 438 else:
450 439 repo.ui.status(
451 440 _(
452 441 b"remote has heads on branch '%s' that are "
453 442 b"not known locally: %s\n"
454 443 )
455 444 % (branch, heads)
456 445 )
457 446 if remoteheads is None:
458 447 if len(newhs) > 1:
459 448 dhs = list(newhs)
460 449 if errormsg is None:
461 450 errormsg = (
462 451 _(b"push creates new branch '%s' with multiple heads")
463 452 % branch
464 453 )
465 454 hint = _(
466 455 b"merge or"
467 456 b" see 'hg help push' for details about"
468 457 b" pushing new heads"
469 458 )
470 459 elif len(newhs) > len(oldhs):
471 460 # remove bookmarked or existing remote heads from the new heads list
472 461 dhs = sorted(newhs - nowarnheads - oldhs)
473 462 if dhs:
474 463 if errormsg is None:
475 464 if branch not in (b'default', None):
476 465 errormsg = _(
477 466 b"push creates new remote head %s on branch '%s'"
478 467 ) % (
479 468 short(dhs[0]),
480 469 branch,
481 470 )
482 471 elif repo[dhs[0]].bookmarks():
483 472 errormsg = _(
484 473 b"push creates new remote head %s "
485 474 b"with bookmark '%s'"
486 475 ) % (short(dhs[0]), repo[dhs[0]].bookmarks()[0])
487 476 else:
488 477 errormsg = _(b"push creates new remote head %s") % short(
489 478 dhs[0]
490 479 )
491 480 if unsyncedheads:
492 481 hint = _(
493 482 b"pull and merge or"
494 483 b" see 'hg help push' for details about"
495 484 b" pushing new heads"
496 485 )
497 486 else:
498 487 hint = _(
499 488 b"merge or"
500 489 b" see 'hg help push' for details about"
501 490 b" pushing new heads"
502 491 )
503 492 if branch is None:
504 493 repo.ui.note(_(b"new remote heads:\n"))
505 494 else:
506 495 repo.ui.note(_(b"new remote heads on branch '%s':\n") % branch)
507 496 for h in dhs:
508 497 repo.ui.note(b" %s\n" % short(h))
509 498 if errormsg:
510 499 raise error.StateError(errormsg, hint=hint)
511 500
512 501
513 502 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
514 503 """post process the list of new heads with obsolescence information
515 504
516 505 Exists as a sub-function to contain the complexity and allow extensions to
517 506 experiment with smarter logic.
518 507
519 508 Returns (newheads, discarded_heads) tuple
520 509 """
521 510 # known issue
522 511 #
523 512 # * We "silently" skip processing on all changeset unknown locally
524 513 #
525 514 # * if <nh> is public on the remote, it won't be affected by obsolete
526 515 # marker and a new is created
527 516
528 517 # define various utilities and containers
529 518 repo = pushop.repo
530 519 unfi = repo.unfiltered()
531 520 torev = unfi.changelog.index.get_rev
532 521 public = phases.public
533 522 getphase = unfi._phasecache.phase
534 523 ispublic = lambda r: getphase(unfi, r) == public
535 524 ispushed = lambda n: torev(n) in futurecommon
536 525 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
537 526 successorsmarkers = unfi.obsstore.successors
538 527 newhs = set() # final set of new heads
539 528 discarded = set() # new head of fully replaced branch
540 529
541 530 localcandidate = set() # candidate heads known locally
542 531 unknownheads = set() # candidate heads unknown locally
543 532 for h in candidate_newhs:
544 533 if h in unfi:
545 534 localcandidate.add(h)
546 535 else:
547 536 if successorsmarkers.get(h) is not None:
548 537 msg = (
549 538 b'checkheads: remote head unknown locally has'
550 539 b' local marker: %s\n'
551 540 )
552 541 repo.ui.debug(msg % hex(h))
553 542 unknownheads.add(h)
554 543
555 544 # fast path the simple case
556 545 if len(localcandidate) == 1:
557 546 return unknownheads | set(candidate_newhs), set()
558 547
559 548 # actually process branch replacement
560 549 while localcandidate:
561 550 nh = localcandidate.pop()
562 551 current_branch = unfi[nh].branch()
563 552 # run this check early to skip the evaluation of the whole branch
564 553 if torev(nh) in futurecommon or ispublic(torev(nh)):
565 554 newhs.add(nh)
566 555 continue
567 556
568 557 # Get all revs/nodes on the branch exclusive to this head
569 558 # (already filtered heads are "ignored"))
570 559 branchrevs = unfi.revs(
571 560 b'only(%n, (%ln+%ln))', nh, localcandidate, newhs
572 561 )
573 562
574 563 branchnodes = []
575 564 for r in branchrevs:
576 565 c = unfi[r]
577 566 if c.branch() == current_branch:
578 567 branchnodes.append(c.node())
579 568
580 569 # The branch won't be hidden on the remote if
581 570 # * any part of it is public,
582 571 # * any part of it is considered part of the result by previous logic,
583 572 # * if we have no markers to push to obsolete it.
584 573 if (
585 574 any(ispublic(r) for r in branchrevs)
586 575 or any(torev(n) in futurecommon for n in branchnodes)
587 576 or any(not hasoutmarker(n) for n in branchnodes)
588 577 ):
589 578 newhs.add(nh)
590 579 else:
591 580 # note: there is a corner case if there is a merge in the branch.
592 581 # we might end up with -more- heads. However, these heads are not
593 582 # "added" by the push, but more by the "removal" on the remote so I
594 583 # think is a okay to ignore them,
595 584 discarded.add(nh)
596 585 newhs |= unknownheads
597 586 return newhs, discarded
598 587
599 588
600 589 def pushingmarkerfor(obsstore, ispushed, node):
601 590 """true if some markers are to be pushed for node
602 591
603 592 We cannot just look in to the pushed obsmarkers from the pushop because
604 593 discovery might have filtered relevant markers. In addition listing all
605 594 markers relevant to all changesets in the pushed set would be too expensive
606 595 (O(len(repo)))
607 596
608 597 (note: There are cache opportunity in this function. but it would requires
609 598 a two dimensional stack.)
610 599 """
611 600 successorsmarkers = obsstore.successors
612 601 stack = [node]
613 602 seen = set(stack)
614 603 while stack:
615 604 current = stack.pop()
616 605 if ispushed(current):
617 606 return True
618 607 markers = successorsmarkers.get(current, ())
619 608 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
620 609 for m in markers:
621 610 nexts = m[1] # successors
622 611 if not nexts: # this is a prune marker
623 612 nexts = m[5] or () # parents
624 613 for n in nexts:
625 614 if n not in seen:
626 615 seen.add(n)
627 616 stack.append(n)
628 617 return False
General Comments 0
You need to be logged in to leave comments. Login now