##// END OF EJS Templates
outgoing: add a simple fastpath when there is no common...
marmoute -
r52488:3a6fae3b default
parent child Browse files
Show More
@@ -1,646 +1,658 b''
1 1 # discovery.py - protocol changeset discovery functions
2 2 #
3 3 # Copyright 2010 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import functools
10 10
11 11 from .i18n import _
12 12 from .node import (
13 13 hex,
14 14 short,
15 15 )
16 16
17 17 from . import (
18 18 bookmarks,
19 19 branchmap,
20 20 error,
21 21 node as nodemod,
22 22 obsolete,
23 23 phases,
24 24 pycompat,
25 25 scmutil,
26 26 setdiscovery,
27 27 treediscovery,
28 28 util,
29 29 )
30 30
31 31
32 32 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
33 33 """Return a tuple (common, anyincoming, heads) used to identify the common
34 34 subset of nodes between repo and remote.
35 35
36 36 "common" is a list of (at least) the heads of the common subset.
37 37 "anyincoming" is testable as a boolean indicating if any nodes are missing
38 38 locally. If remote does not support getbundle, this actually is a list of
39 39 roots of the nodes that would be incoming, to be supplied to
40 40 changegroupsubset. No code except for pull should be relying on this fact
41 41 any longer.
42 42 "heads" is either the supplied heads, or else the remote's heads.
43 43 "ancestorsof" if not None, restrict the discovery to a subset defined by
44 44 these nodes. Changeset outside of this set won't be considered (but may
45 45 still appear in "common").
46 46
47 47 If you pass heads and they are all known locally, the response lists just
48 48 these heads in "common" and in "heads".
49 49
50 50 Please use findcommonoutgoing to compute the set of outgoing nodes to give
51 51 extensions a good hook into outgoing.
52 52 """
53 53
54 54 if not remote.capable(b'getbundle'):
55 55 return treediscovery.findcommonincoming(repo, remote, heads, force)
56 56
57 57 if heads:
58 58 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
59 59 if all(knownnode(h) for h in heads):
60 60 return (heads, False, heads)
61 61
62 62 res = setdiscovery.findcommonheads(
63 63 repo.ui,
64 64 repo,
65 65 remote,
66 66 abortwhenunrelated=not force,
67 67 ancestorsof=ancestorsof,
68 68 )
69 69 common, anyinc, srvheads = res
70 70 if heads and not anyinc:
71 71 # server could be lying on the advertised heads
72 72 has_node = repo.changelog.hasnode
73 73 anyinc = any(not has_node(n) for n in heads)
74 74 return (list(common), anyinc, heads or list(srvheads))
75 75
76 76
77 77 class outgoing:
78 78 """Represents the result of a findcommonoutgoing() call.
79 79
80 80 Members:
81 81
82 82 ancestorsof is a list of the nodes whose ancestors are included in the
83 83 outgoing operation.
84 84
85 85 missing is a list of those ancestors of ancestorsof that are present in
86 86 local but not in remote.
87 87
88 88 common is a set containing revs common between the local and the remote
89 89 repository (at least all of those that are ancestors of ancestorsof).
90 90
91 91 commonheads is the list of heads of common.
92 92
93 93 excluded is the list of missing changeset that shouldn't be sent
94 94 remotely.
95 95
96 96 Some members are computed on demand from the heads, unless provided upfront
97 97 by discovery."""
98 98
99 99 def __init__(
100 100 self, repo, commonheads=None, ancestorsof=None, missingroots=None
101 101 ):
102 102 # at most one of them must not be set
103 103 if commonheads is not None and missingroots is not None:
104 104 m = 'commonheads and missingroots arguments are mutually exclusive'
105 105 raise error.ProgrammingError(m)
106 106 cl = repo.changelog
107 unfi = repo.unfiltered()
108 ucl = unfi.changelog
109 to_node = ucl.node
107 110 missing = None
108 111 common = None
112 arg_anc = ancestorsof
109 113 if ancestorsof is None:
110 114 ancestorsof = cl.heads()
111 if missingroots:
115
116 # XXX-perf: do we need all this to be node-list? They would be simpler
117 # as rev-num sets (and smartset)
118 if missingroots == [nodemod.nullrev] or missingroots == []:
119 commonheads = [repo.nullid]
120 common = set()
121 if arg_anc is None:
122 missing = [to_node(r) for r in cl]
123 else:
124 missing_rev = repo.revs('::%ln', missingroots, ancestorsof)
125 missing = [to_node(r) for r in missing_rev]
126 elif missingroots is not None:
112 127 # TODO remove call to nodesbetween.
113 128 missing_rev = repo.revs('%ln::%ln', missingroots, ancestorsof)
114 unfi = repo.unfiltered()
115 ucl = unfi.changelog
116 to_node = ucl.node
117 129 ancestorsof = [to_node(r) for r in ucl.headrevs(missing_rev)]
118 130 parent_revs = ucl.parentrevs
119 131 common_legs = set()
120 132 for r in missing_rev:
121 133 p1, p2 = parent_revs(r)
122 134 if p1 not in missing_rev:
123 135 common_legs.add(p1)
124 136 if p2 not in missing_rev:
125 137 common_legs.add(p2)
126 138 common_legs.discard(nodemod.nullrev)
127 139 if not common_legs:
128 140 commonheads = [repo.nullid]
129 141 common = set()
130 142 else:
131 143 commonheads_revs = unfi.revs(
132 144 'heads(%ld::%ld)',
133 145 common_legs,
134 146 common_legs,
135 147 )
136 148 commonheads = [to_node(r) for r in commonheads_revs]
137 149 common = ucl.ancestors(commonheads_revs, inclusive=True)
138 150 missing = [to_node(r) for r in missing_rev]
139 151 elif not commonheads:
140 152 commonheads = [repo.nullid]
141 153 self.commonheads = commonheads
142 154 self.ancestorsof = ancestorsof
143 155 self._revlog = cl
144 156 self._common = common
145 157 self._missing = missing
146 158 self.excluded = []
147 159
148 160 def _computecommonmissing(self):
149 161 sets = self._revlog.findcommonmissing(
150 162 self.commonheads, self.ancestorsof
151 163 )
152 164 self._common, self._missing = sets
153 165
154 166 @util.propertycache
155 167 def common(self):
156 168 if self._common is None:
157 169 self._computecommonmissing()
158 170 return self._common
159 171
160 172 @util.propertycache
161 173 def missing(self):
162 174 if self._missing is None:
163 175 self._computecommonmissing()
164 176 return self._missing
165 177
166 178
167 179 def findcommonoutgoing(
168 180 repo, other, onlyheads=None, force=False, commoninc=None, portable=False
169 181 ):
170 182 """Return an outgoing instance to identify the nodes present in repo but
171 183 not in other.
172 184
173 185 If onlyheads is given, only nodes ancestral to nodes in onlyheads
174 186 (inclusive) are included. If you already know the local repo's heads,
175 187 passing them in onlyheads is faster than letting them be recomputed here.
176 188
177 189 If commoninc is given, it must be the result of a prior call to
178 190 findcommonincoming(repo, other, force) to avoid recomputing it here.
179 191
180 192 If portable is given, compute more conservative common and ancestorsof,
181 193 to make bundles created from the instance more portable."""
182 194 # declare an empty outgoing object to be filled later
183 195 og = outgoing(repo, None, None)
184 196
185 197 # get common set if not provided
186 198 if commoninc is None:
187 199 commoninc = findcommonincoming(
188 200 repo, other, force=force, ancestorsof=onlyheads
189 201 )
190 202 og.commonheads, _any, _hds = commoninc
191 203
192 204 # compute outgoing
193 205 mayexclude = phases.hassecret(repo) or repo.obsstore
194 206 if not mayexclude:
195 207 og.ancestorsof = onlyheads or repo.heads()
196 208 elif onlyheads is None:
197 209 # use visible heads as it should be cached
198 210 og.ancestorsof = repo.filtered(b"served").heads()
199 211 og.excluded = [ctx.node() for ctx in repo.set(b'secret() or extinct()')]
200 212 else:
201 213 # compute common, missing and exclude secret stuff
202 214 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
203 215 og._common, allmissing = sets
204 216 og._missing = missing = []
205 217 og.excluded = excluded = []
206 218 for node in allmissing:
207 219 ctx = repo[node]
208 220 if ctx.phase() >= phases.secret or ctx.extinct():
209 221 excluded.append(node)
210 222 else:
211 223 missing.append(node)
212 224 if len(missing) == len(allmissing):
213 225 ancestorsof = onlyheads
214 226 else: # update missing heads
215 227 to_rev = repo.changelog.index.rev
216 228 to_node = repo.changelog.node
217 229 excluded_revs = [to_rev(r) for r in excluded]
218 230 onlyheads_revs = [to_rev(r) for r in onlyheads]
219 231 new_heads = phases.new_heads(repo, onlyheads_revs, excluded_revs)
220 232 ancestorsof = [to_node(r) for r in new_heads]
221 233 og.ancestorsof = ancestorsof
222 234 if portable:
223 235 # recompute common and ancestorsof as if -r<rev> had been given for
224 236 # each head of missing, and --base <rev> for each head of the proper
225 237 # ancestors of missing
226 238 og._computecommonmissing()
227 239 cl = repo.changelog
228 240 missingrevs = {cl.rev(n) for n in og._missing}
229 241 og._common = set(cl.ancestors(missingrevs)) - missingrevs
230 242 commonheads = set(og.commonheads)
231 243 og.ancestorsof = [h for h in og.ancestorsof if h not in commonheads]
232 244
233 245 return og
234 246
235 247
236 248 def _headssummary(pushop):
237 249 """compute a summary of branch and heads status before and after push
238 250
239 251 return {'branch': ([remoteheads], [newheads],
240 252 [unsyncedheads], [discardedheads])} mapping
241 253
242 254 - branch: the branch name,
243 255 - remoteheads: the list of remote heads known locally
244 256 None if the branch is new,
245 257 - newheads: the new remote heads (known locally) with outgoing pushed,
246 258 - unsyncedheads: the list of remote heads unknown locally,
247 259 - discardedheads: the list of heads made obsolete by the push.
248 260 """
249 261 repo = pushop.repo.unfiltered()
250 262 remote = pushop.remote
251 263 outgoing = pushop.outgoing
252 264 cl = repo.changelog
253 265 headssum = {}
254 266 missingctx = set()
255 267 # A. Create set of branches involved in the push.
256 268 branches = set()
257 269 for n in outgoing.missing:
258 270 ctx = repo[n]
259 271 missingctx.add(ctx)
260 272 branches.add(ctx.branch())
261 273
262 274 with remote.commandexecutor() as e:
263 275 remotemap = e.callcommand(b'branchmap', {}).result()
264 276
265 277 knownnode = cl.hasnode # do not use nodemap until it is filtered
266 278 # A. register remote heads of branches which are in outgoing set
267 279 for branch, heads in remotemap.items():
268 280 # don't add head info about branches which we don't have locally
269 281 if branch not in branches:
270 282 continue
271 283 known = []
272 284 unsynced = []
273 285 for h in heads:
274 286 if knownnode(h):
275 287 known.append(h)
276 288 else:
277 289 unsynced.append(h)
278 290 headssum[branch] = (known, list(known), unsynced)
279 291
280 292 # B. add new branch data
281 293 for branch in branches:
282 294 if branch not in headssum:
283 295 headssum[branch] = (None, [], [])
284 296
285 297 # C. Update newmap with outgoing changes.
286 298 # This will possibly add new heads and remove existing ones.
287 299 newmap = branchmap.remotebranchcache(
288 300 repo,
289 301 (
290 302 (branch, heads[1])
291 303 for branch, heads in headssum.items()
292 304 if heads[0] is not None
293 305 ),
294 306 )
295 307 newmap.update(repo, (ctx.rev() for ctx in missingctx))
296 308 for branch, newheads in newmap.items():
297 309 headssum[branch][1][:] = newheads
298 310 for branch, items in headssum.items():
299 311 for l in items:
300 312 if l is not None:
301 313 l.sort()
302 314 headssum[branch] = items + ([],)
303 315
304 316 # If there are no obsstore, no post processing are needed.
305 317 if repo.obsstore:
306 318 torev = repo.changelog.rev
307 319 futureheads = {torev(h) for h in outgoing.ancestorsof}
308 320 futureheads |= {torev(h) for h in outgoing.commonheads}
309 321 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
310 322 for branch, heads in sorted(pycompat.iteritems(headssum)):
311 323 remoteheads, newheads, unsyncedheads, placeholder = heads
312 324 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
313 325 headssum[branch] = (
314 326 remoteheads,
315 327 sorted(result[0]),
316 328 unsyncedheads,
317 329 sorted(result[1]),
318 330 )
319 331 return headssum
320 332
321 333
322 334 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
323 335 """Compute branchmapsummary for repo without branchmap support"""
324 336
325 337 # 1-4b. old servers: Check for new topological heads.
326 338 # Construct {old,new}map with branch = None (topological branch).
327 339 # (code based on update)
328 340 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
329 341 oldheads = sorted(h for h in remoteheads if knownnode(h))
330 342 # all nodes in outgoing.missing are children of either:
331 343 # - an element of oldheads
332 344 # - another element of outgoing.missing
333 345 # - nullrev
334 346 # This explains why the new head are very simple to compute.
335 347 r = repo.set(b'heads(%ln + %ln)', oldheads, outgoing.missing)
336 348 newheads = sorted(c.node() for c in r)
337 349 # set some unsynced head to issue the "unsynced changes" warning
338 350 if inc:
339 351 unsynced = [None]
340 352 else:
341 353 unsynced = []
342 354 return {None: (oldheads, newheads, unsynced, [])}
343 355
344 356
345 357 def _nowarnheads(pushop):
346 358 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
347 359 repo = pushop.repo.unfiltered()
348 360 remote = pushop.remote
349 361 localbookmarks = repo._bookmarks
350 362
351 363 with remote.commandexecutor() as e:
352 364 remotebookmarks = e.callcommand(
353 365 b'listkeys',
354 366 {
355 367 b'namespace': b'bookmarks',
356 368 },
357 369 ).result()
358 370
359 371 bookmarkedheads = set()
360 372
361 373 # internal config: bookmarks.pushing
362 374 newbookmarks = [
363 375 localbookmarks.expandname(b)
364 376 for b in pushop.ui.configlist(b'bookmarks', b'pushing')
365 377 ]
366 378
367 379 for bm in localbookmarks:
368 380 rnode = remotebookmarks.get(bm)
369 381 if rnode and rnode in repo:
370 382 lctx, rctx = repo[localbookmarks[bm]], repo[rnode]
371 383 if bookmarks.validdest(repo, rctx, lctx):
372 384 bookmarkedheads.add(lctx.node())
373 385 else:
374 386 if bm in newbookmarks and bm not in remotebookmarks:
375 387 bookmarkedheads.add(localbookmarks[bm])
376 388
377 389 return bookmarkedheads
378 390
379 391
380 392 def checkheads(pushop):
381 393 """Check that a push won't add any outgoing head
382 394
383 395 raise StateError error and display ui message as needed.
384 396 """
385 397
386 398 repo = pushop.repo.unfiltered()
387 399 remote = pushop.remote
388 400 outgoing = pushop.outgoing
389 401 remoteheads = pushop.remoteheads
390 402 newbranch = pushop.newbranch
391 403 inc = bool(pushop.incoming)
392 404
393 405 # Check for each named branch if we're creating new remote heads.
394 406 # To be a remote head after push, node must be either:
395 407 # - unknown locally
396 408 # - a local outgoing head descended from update
397 409 # - a remote head that's known locally and not
398 410 # ancestral to an outgoing head
399 411 if remoteheads == [repo.nullid]:
400 412 # remote is empty, nothing to check.
401 413 return
402 414
403 415 if remote.capable(b'branchmap'):
404 416 headssum = _headssummary(pushop)
405 417 else:
406 418 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
407 419 pushop.pushbranchmap = headssum
408 420 newbranches = [
409 421 branch for branch, heads in headssum.items() if heads[0] is None
410 422 ]
411 423 # 1. Check for new branches on the remote.
412 424 if newbranches and not newbranch: # new branch requires --new-branch
413 425 branchnames = b', '.join(sorted(newbranches))
414 426 # Calculate how many of the new branches are closed branches
415 427 closedbranches = set()
416 428 for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
417 429 if isclosed:
418 430 closedbranches.add(tag)
419 431 closedbranches = closedbranches & set(newbranches)
420 432 if closedbranches:
421 433 errmsg = _(b"push creates new remote branches: %s (%d closed)") % (
422 434 branchnames,
423 435 len(closedbranches),
424 436 )
425 437 else:
426 438 errmsg = _(b"push creates new remote branches: %s") % branchnames
427 439 hint = _(b"use 'hg push --new-branch' to create new remote branches")
428 440 raise error.StateError(errmsg, hint=hint)
429 441
430 442 # 2. Find heads that we need not warn about
431 443 nowarnheads = _nowarnheads(pushop)
432 444
433 445 # 3. Check for new heads.
434 446 # If there are more heads after the push than before, a suitable
435 447 # error message, depending on unsynced status, is displayed.
436 448 errormsg = None
437 449 for branch, heads in sorted(pycompat.iteritems(headssum)):
438 450 remoteheads, newheads, unsyncedheads, discardedheads = heads
439 451 # add unsynced data
440 452 if remoteheads is None:
441 453 oldhs = set()
442 454 else:
443 455 oldhs = set(remoteheads)
444 456 oldhs.update(unsyncedheads)
445 457 dhs = None # delta heads, the new heads on branch
446 458 newhs = set(newheads)
447 459 newhs.update(unsyncedheads)
448 460 if unsyncedheads:
449 461 if None in unsyncedheads:
450 462 # old remote, no heads data
451 463 heads = None
452 464 else:
453 465 heads = scmutil.nodesummaries(repo, unsyncedheads)
454 466 if heads is None:
455 467 repo.ui.status(
456 468 _(b"remote has heads that are not known locally\n")
457 469 )
458 470 elif branch is None:
459 471 repo.ui.status(
460 472 _(b"remote has heads that are not known locally: %s\n")
461 473 % heads
462 474 )
463 475 else:
464 476 repo.ui.status(
465 477 _(
466 478 b"remote has heads on branch '%s' that are "
467 479 b"not known locally: %s\n"
468 480 )
469 481 % (branch, heads)
470 482 )
471 483 if remoteheads is None:
472 484 if len(newhs) > 1:
473 485 dhs = list(newhs)
474 486 if errormsg is None:
475 487 errormsg = (
476 488 _(b"push creates new branch '%s' with multiple heads")
477 489 % branch
478 490 )
479 491 hint = _(
480 492 b"merge or"
481 493 b" see 'hg help push' for details about"
482 494 b" pushing new heads"
483 495 )
484 496 elif len(newhs) > len(oldhs):
485 497 # remove bookmarked or existing remote heads from the new heads list
486 498 dhs = sorted(newhs - nowarnheads - oldhs)
487 499 if dhs:
488 500 if errormsg is None:
489 501 if branch not in (b'default', None):
490 502 errormsg = _(
491 503 b"push creates new remote head %s on branch '%s'"
492 504 ) % (
493 505 short(dhs[0]),
494 506 branch,
495 507 )
496 508 elif repo[dhs[0]].bookmarks():
497 509 errormsg = _(
498 510 b"push creates new remote head %s "
499 511 b"with bookmark '%s'"
500 512 ) % (short(dhs[0]), repo[dhs[0]].bookmarks()[0])
501 513 else:
502 514 errormsg = _(b"push creates new remote head %s") % short(
503 515 dhs[0]
504 516 )
505 517 if unsyncedheads:
506 518 hint = _(
507 519 b"pull and merge or"
508 520 b" see 'hg help push' for details about"
509 521 b" pushing new heads"
510 522 )
511 523 else:
512 524 hint = _(
513 525 b"merge or"
514 526 b" see 'hg help push' for details about"
515 527 b" pushing new heads"
516 528 )
517 529 if branch is None:
518 530 repo.ui.note(_(b"new remote heads:\n"))
519 531 else:
520 532 repo.ui.note(_(b"new remote heads on branch '%s':\n") % branch)
521 533 for h in dhs:
522 534 repo.ui.note(b" %s\n" % short(h))
523 535 if errormsg:
524 536 raise error.StateError(errormsg, hint=hint)
525 537
526 538
527 539 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
528 540 """post process the list of new heads with obsolescence information
529 541
530 542 Exists as a sub-function to contain the complexity and allow extensions to
531 543 experiment with smarter logic.
532 544
533 545 Returns (newheads, discarded_heads) tuple
534 546 """
535 547 # known issue
536 548 #
537 549 # * We "silently" skip processing on all changeset unknown locally
538 550 #
539 551 # * if <nh> is public on the remote, it won't be affected by obsolete
540 552 # marker and a new is created
541 553
542 554 # define various utilities and containers
543 555 repo = pushop.repo
544 556 unfi = repo.unfiltered()
545 557 torev = unfi.changelog.index.get_rev
546 558 public = phases.public
547 559 getphase = unfi._phasecache.phase
548 560 ispublic = lambda r: getphase(unfi, r) == public
549 561 ispushed = lambda n: torev(n) in futurecommon
550 562 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
551 563 successorsmarkers = unfi.obsstore.successors
552 564 newhs = set() # final set of new heads
553 565 discarded = set() # new head of fully replaced branch
554 566
555 567 localcandidate = set() # candidate heads known locally
556 568 unknownheads = set() # candidate heads unknown locally
557 569 for h in candidate_newhs:
558 570 if h in unfi:
559 571 localcandidate.add(h)
560 572 else:
561 573 if successorsmarkers.get(h) is not None:
562 574 msg = (
563 575 b'checkheads: remote head unknown locally has'
564 576 b' local marker: %s\n'
565 577 )
566 578 repo.ui.debug(msg % hex(h))
567 579 unknownheads.add(h)
568 580
569 581 # fast path the simple case
570 582 if len(localcandidate) == 1:
571 583 return unknownheads | set(candidate_newhs), set()
572 584
573 585 obsrevs = obsolete.getrevs(unfi, b'obsolete')
574 586 futurenonobsolete = frozenset(futurecommon) - obsrevs
575 587
576 588 # actually process branch replacement
577 589 while localcandidate:
578 590 nh = localcandidate.pop()
579 591 r = torev(nh)
580 592 current_branch = unfi[nh].branch()
581 593 # run this check early to skip the evaluation of the whole branch
582 594 if ispublic(r) or r not in obsrevs:
583 595 newhs.add(nh)
584 596 continue
585 597
586 598 # Get all revs/nodes on the branch exclusive to this head
587 599 # (already filtered heads are "ignored"))
588 600 branchrevs = unfi.revs(
589 601 b'only(%n, (%ln+%ln))', nh, localcandidate, newhs
590 602 )
591 603
592 604 branchnodes = []
593 605 for r in branchrevs:
594 606 c = unfi[r]
595 607 if c.branch() == current_branch:
596 608 branchnodes.append(c.node())
597 609
598 610 # The branch won't be hidden on the remote if
599 611 # * any part of it is public,
600 612 # * any part of it is considered part of the result by previous logic,
601 613 # * if we have no markers to push to obsolete it.
602 614 if (
603 615 any(ispublic(r) for r in branchrevs)
604 616 or any(torev(n) in futurenonobsolete for n in branchnodes)
605 617 or any(not hasoutmarker(n) for n in branchnodes)
606 618 ):
607 619 newhs.add(nh)
608 620 else:
609 621 # note: there is a corner case if there is a merge in the branch.
610 622 # we might end up with -more- heads. However, these heads are not
611 623 # "added" by the push, but more by the "removal" on the remote so I
612 624 # think is a okay to ignore them,
613 625 discarded.add(nh)
614 626 newhs |= unknownheads
615 627 return newhs, discarded
616 628
617 629
618 630 def pushingmarkerfor(obsstore, ispushed, node):
619 631 """true if some markers are to be pushed for node
620 632
621 633 We cannot just look in to the pushed obsmarkers from the pushop because
622 634 discovery might have filtered relevant markers. In addition listing all
623 635 markers relevant to all changesets in the pushed set would be too expensive
624 636 (O(len(repo)))
625 637
626 638 (note: There are cache opportunity in this function. but it would requires
627 639 a two dimensional stack.)
628 640 """
629 641 successorsmarkers = obsstore.successors
630 642 stack = [node]
631 643 seen = set(stack)
632 644 while stack:
633 645 current = stack.pop()
634 646 if ispushed(current):
635 647 return True
636 648 markers = successorsmarkers.get(current, ())
637 649 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
638 650 for m in markers:
639 651 nexts = m[1] # successors
640 652 if not nexts: # this is a prune marker
641 653 nexts = m[5] or () # parents
642 654 for n in nexts:
643 655 if n not in seen:
644 656 seen.add(n)
645 657 stack.append(n)
646 658 return False
General Comments 0
You need to be logged in to leave comments. Login now