##// END OF EJS Templates
discovery: prevent recomputing info about server and outgoing changesets...
Pulkit Goyal -
r42193:98908e36 default
parent child Browse files
Show More
@@ -1,533 +1,534
1 1 # discovery.py - protocol changeset discovery functions
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11
12 12 from .i18n import _
13 13 from .node import (
14 14 hex,
15 15 nullid,
16 16 short,
17 17 )
18 18
19 19 from . import (
20 20 bookmarks,
21 21 branchmap,
22 22 error,
23 23 phases,
24 24 scmutil,
25 25 setdiscovery,
26 26 treediscovery,
27 27 util,
28 28 )
29 29
30 30 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
31 31 """Return a tuple (common, anyincoming, heads) used to identify the common
32 32 subset of nodes between repo and remote.
33 33
34 34 "common" is a list of (at least) the heads of the common subset.
35 35 "anyincoming" is testable as a boolean indicating if any nodes are missing
36 36 locally. If remote does not support getbundle, this actually is a list of
37 37 roots of the nodes that would be incoming, to be supplied to
38 38 changegroupsubset. No code except for pull should be relying on this fact
39 39 any longer.
40 40 "heads" is either the supplied heads, or else the remote's heads.
41 41 "ancestorsof" if not None, restrict the discovery to a subset defined by
42 42 these nodes. Changeset outside of this set won't be considered (and
43 43 won't appears in "common")
44 44
45 45 If you pass heads and they are all known locally, the response lists just
46 46 these heads in "common" and in "heads".
47 47
48 48 Please use findcommonoutgoing to compute the set of outgoing nodes to give
49 49 extensions a good hook into outgoing.
50 50 """
51 51
52 52 if not remote.capable('getbundle'):
53 53 return treediscovery.findcommonincoming(repo, remote, heads, force)
54 54
55 55 if heads:
56 56 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
57 57 if all(knownnode(h) for h in heads):
58 58 return (heads, False, heads)
59 59
60 60 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
61 61 abortwhenunrelated=not force,
62 62 ancestorsof=ancestorsof)
63 63 common, anyinc, srvheads = res
64 64 return (list(common), anyinc, heads or list(srvheads))
65 65
66 66 class outgoing(object):
67 67 '''Represents the set of nodes present in a local repo but not in a
68 68 (possibly) remote one.
69 69
70 70 Members:
71 71
72 72 missing is a list of all nodes present in local but not in remote.
73 73 common is a list of all nodes shared between the two repos.
74 74 excluded is the list of missing changeset that shouldn't be sent remotely.
75 75 missingheads is the list of heads of missing.
76 76 commonheads is the list of heads of common.
77 77
78 78 The sets are computed on demand from the heads, unless provided upfront
79 79 by discovery.'''
80 80
81 81 def __init__(self, repo, commonheads=None, missingheads=None,
82 82 missingroots=None):
83 83 # at least one of them must not be set
84 84 assert None in (commonheads, missingroots)
85 85 cl = repo.changelog
86 86 if missingheads is None:
87 87 missingheads = cl.heads()
88 88 if missingroots:
89 89 discbases = []
90 90 for n in missingroots:
91 91 discbases.extend([p for p in cl.parents(n) if p != nullid])
92 92 # TODO remove call to nodesbetween.
93 93 # TODO populate attributes on outgoing instance instead of setting
94 94 # discbases.
95 95 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
96 96 included = set(csets)
97 97 missingheads = heads
98 98 commonheads = [n for n in discbases if n not in included]
99 99 elif not commonheads:
100 100 commonheads = [nullid]
101 101 self.commonheads = commonheads
102 102 self.missingheads = missingheads
103 103 self._revlog = cl
104 104 self._common = None
105 105 self._missing = None
106 106 self.excluded = []
107 107
108 108 def _computecommonmissing(self):
109 109 sets = self._revlog.findcommonmissing(self.commonheads,
110 110 self.missingheads)
111 111 self._common, self._missing = sets
112 112
113 113 @util.propertycache
114 114 def common(self):
115 115 if self._common is None:
116 116 self._computecommonmissing()
117 117 return self._common
118 118
119 119 @util.propertycache
120 120 def missing(self):
121 121 if self._missing is None:
122 122 self._computecommonmissing()
123 123 return self._missing
124 124
125 125 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
126 126 commoninc=None, portable=False):
127 127 '''Return an outgoing instance to identify the nodes present in repo but
128 128 not in other.
129 129
130 130 If onlyheads is given, only nodes ancestral to nodes in onlyheads
131 131 (inclusive) are included. If you already know the local repo's heads,
132 132 passing them in onlyheads is faster than letting them be recomputed here.
133 133
134 134 If commoninc is given, it must be the result of a prior call to
135 135 findcommonincoming(repo, other, force) to avoid recomputing it here.
136 136
137 137 If portable is given, compute more conservative common and missingheads,
138 138 to make bundles created from the instance more portable.'''
139 139 # declare an empty outgoing object to be filled later
140 140 og = outgoing(repo, None, None)
141 141
142 142 # get common set if not provided
143 143 if commoninc is None:
144 144 commoninc = findcommonincoming(repo, other, force=force,
145 145 ancestorsof=onlyheads)
146 146 og.commonheads, _any, _hds = commoninc
147 147
148 148 # compute outgoing
149 149 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
150 150 if not mayexclude:
151 151 og.missingheads = onlyheads or repo.heads()
152 152 elif onlyheads is None:
153 153 # use visible heads as it should be cached
154 154 og.missingheads = repo.filtered("served").heads()
155 155 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
156 156 else:
157 157 # compute common, missing and exclude secret stuff
158 158 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
159 159 og._common, allmissing = sets
160 160 og._missing = missing = []
161 161 og.excluded = excluded = []
162 162 for node in allmissing:
163 163 ctx = repo[node]
164 164 if ctx.phase() >= phases.secret or ctx.extinct():
165 165 excluded.append(node)
166 166 else:
167 167 missing.append(node)
168 168 if len(missing) == len(allmissing):
169 169 missingheads = onlyheads
170 170 else: # update missing heads
171 171 missingheads = phases.newheads(repo, onlyheads, excluded)
172 172 og.missingheads = missingheads
173 173 if portable:
174 174 # recompute common and missingheads as if -r<rev> had been given for
175 175 # each head of missing, and --base <rev> for each head of the proper
176 176 # ancestors of missing
177 177 og._computecommonmissing()
178 178 cl = repo.changelog
179 179 missingrevs = set(cl.rev(n) for n in og._missing)
180 180 og._common = set(cl.ancestors(missingrevs)) - missingrevs
181 181 commonheads = set(og.commonheads)
182 182 og.missingheads = [h for h in og.missingheads if h not in commonheads]
183 183
184 184 return og
185 185
186 186 def _headssummary(pushop):
187 187 """compute a summary of branch and heads status before and after push
188 188
189 189 return {'branch': ([remoteheads], [newheads],
190 190 [unsyncedheads], [discardedheads])} mapping
191 191
192 192 - branch: the branch name,
193 193 - remoteheads: the list of remote heads known locally
194 194 None if the branch is new,
195 195 - newheads: the new remote heads (known locally) with outgoing pushed,
196 196 - unsyncedheads: the list of remote heads unknown locally,
197 197 - discardedheads: the list of heads made obsolete by the push.
198 198 """
199 199 repo = pushop.repo.unfiltered()
200 200 remote = pushop.remote
201 201 outgoing = pushop.outgoing
202 202 cl = repo.changelog
203 203 headssum = {}
204 missingctx = set()
204 205 # A. Create set of branches involved in the push.
205 branches = set(repo[n].branch() for n in outgoing.missing)
206 branches = set()
207 for n in outgoing.missing:
208 ctx = repo[n]
209 missingctx.add(ctx)
210 branches.add(ctx.branch())
211 nbranches = branches.copy()
206 212
207 213 with remote.commandexecutor() as e:
208 214 remotemap = e.callcommand('branchmap', {}).result()
209 215
210 newbranches = branches - set(remotemap)
216 remotebranches = set(remotemap)
217 newbranches = branches - remotebranches
211 218 branches.difference_update(newbranches)
212 219
213 220 # A. register remote heads
214 remotebranches = set()
215 221 for branch, heads in remotemap.iteritems():
216 remotebranches.add(branch)
217 222 known = []
218 223 unsynced = []
219 224 knownnode = cl.hasnode # do not use nodemap until it is filtered
220 225 for h in heads:
221 226 if knownnode(h):
222 227 known.append(h)
223 228 else:
224 229 unsynced.append(h)
225 230 headssum[branch] = (known, list(known), unsynced)
226 231 # B. add new branch data
227 missingctx = list(repo[n] for n in outgoing.missing)
228 touchedbranches = set()
229 for ctx in missingctx:
230 branch = ctx.branch()
231 touchedbranches.add(branch)
232 for branch in nbranches:
232 233 if branch not in headssum:
233 234 headssum[branch] = (None, [], [])
234 235
235 236 # C drop data about untouched branches:
236 for branch in remotebranches - touchedbranches:
237 for branch in remotebranches - nbranches:
237 238 del headssum[branch]
238 239
239 240 # D. Update newmap with outgoing changes.
240 241 # This will possibly add new heads and remove existing ones.
241 242 newmap = branchmap.remotebranchcache((branch, heads[1])
242 243 for branch, heads in headssum.iteritems()
243 244 if heads[0] is not None)
244 245 newmap.update(repo, (ctx.rev() for ctx in missingctx))
245 246 for branch, newheads in newmap.iteritems():
246 247 headssum[branch][1][:] = newheads
247 248 for branch, items in headssum.iteritems():
248 249 for l in items:
249 250 if l is not None:
250 251 l.sort()
251 252 headssum[branch] = items + ([],)
252 253
253 254 # If there are no obsstore, no post processing are needed.
254 255 if repo.obsstore:
255 256 torev = repo.changelog.rev
256 257 futureheads = set(torev(h) for h in outgoing.missingheads)
257 258 futureheads |= set(torev(h) for h in outgoing.commonheads)
258 259 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
259 260 for branch, heads in sorted(headssum.iteritems()):
260 261 remoteheads, newheads, unsyncedheads, placeholder = heads
261 262 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
262 263 headssum[branch] = (remoteheads, sorted(result[0]), unsyncedheads,
263 264 sorted(result[1]))
264 265 return headssum
265 266
266 267 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
267 268 """Compute branchmapsummary for repo without branchmap support"""
268 269
269 270 # 1-4b. old servers: Check for new topological heads.
270 271 # Construct {old,new}map with branch = None (topological branch).
271 272 # (code based on update)
272 273 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
273 274 oldheads = sorted(h for h in remoteheads if knownnode(h))
274 275 # all nodes in outgoing.missing are children of either:
275 276 # - an element of oldheads
276 277 # - another element of outgoing.missing
277 278 # - nullrev
278 279 # This explains why the new head are very simple to compute.
279 280 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
280 281 newheads = sorted(c.node() for c in r)
281 282 # set some unsynced head to issue the "unsynced changes" warning
282 283 if inc:
283 284 unsynced = [None]
284 285 else:
285 286 unsynced = []
286 287 return {None: (oldheads, newheads, unsynced, [])}
287 288
288 289 def _nowarnheads(pushop):
289 290 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
290 291 repo = pushop.repo.unfiltered()
291 292 remote = pushop.remote
292 293 localbookmarks = repo._bookmarks
293 294
294 295 with remote.commandexecutor() as e:
295 296 remotebookmarks = e.callcommand('listkeys', {
296 297 'namespace': 'bookmarks',
297 298 }).result()
298 299
299 300 bookmarkedheads = set()
300 301
301 302 # internal config: bookmarks.pushing
302 303 newbookmarks = [localbookmarks.expandname(b)
303 304 for b in pushop.ui.configlist('bookmarks', 'pushing')]
304 305
305 306 for bm in localbookmarks:
306 307 rnode = remotebookmarks.get(bm)
307 308 if rnode and rnode in repo:
308 309 lctx, rctx = localbookmarks.changectx(bm), repo[rnode]
309 310 if bookmarks.validdest(repo, rctx, lctx):
310 311 bookmarkedheads.add(lctx.node())
311 312 else:
312 313 if bm in newbookmarks and bm not in remotebookmarks:
313 314 bookmarkedheads.add(localbookmarks[bm])
314 315
315 316 return bookmarkedheads
316 317
317 318 def checkheads(pushop):
318 319 """Check that a push won't add any outgoing head
319 320
320 321 raise Abort error and display ui message as needed.
321 322 """
322 323
323 324 repo = pushop.repo.unfiltered()
324 325 remote = pushop.remote
325 326 outgoing = pushop.outgoing
326 327 remoteheads = pushop.remoteheads
327 328 newbranch = pushop.newbranch
328 329 inc = bool(pushop.incoming)
329 330
330 331 # Check for each named branch if we're creating new remote heads.
331 332 # To be a remote head after push, node must be either:
332 333 # - unknown locally
333 334 # - a local outgoing head descended from update
334 335 # - a remote head that's known locally and not
335 336 # ancestral to an outgoing head
336 337 if remoteheads == [nullid]:
337 338 # remote is empty, nothing to check.
338 339 return
339 340
340 341 if remote.capable('branchmap'):
341 342 headssum = _headssummary(pushop)
342 343 else:
343 344 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
344 345 pushop.pushbranchmap = headssum
345 346 newbranches = [branch for branch, heads in headssum.iteritems()
346 347 if heads[0] is None]
347 348 # 1. Check for new branches on the remote.
348 349 if newbranches and not newbranch: # new branch requires --new-branch
349 350 branchnames = ', '.join(sorted(newbranches))
350 351 raise error.Abort(_("push creates new remote branches: %s!")
351 352 % branchnames,
352 353 hint=_("use 'hg push --new-branch' to create"
353 354 " new remote branches"))
354 355
355 356 # 2. Find heads that we need not warn about
356 357 nowarnheads = _nowarnheads(pushop)
357 358
358 359 # 3. Check for new heads.
359 360 # If there are more heads after the push than before, a suitable
360 361 # error message, depending on unsynced status, is displayed.
361 362 errormsg = None
362 363 for branch, heads in sorted(headssum.iteritems()):
363 364 remoteheads, newheads, unsyncedheads, discardedheads = heads
364 365 # add unsynced data
365 366 if remoteheads is None:
366 367 oldhs = set()
367 368 else:
368 369 oldhs = set(remoteheads)
369 370 oldhs.update(unsyncedheads)
370 371 dhs = None # delta heads, the new heads on branch
371 372 newhs = set(newheads)
372 373 newhs.update(unsyncedheads)
373 374 if unsyncedheads:
374 375 if None in unsyncedheads:
375 376 # old remote, no heads data
376 377 heads = None
377 378 else:
378 379 heads = scmutil.nodesummaries(repo, unsyncedheads)
379 380 if heads is None:
380 381 repo.ui.status(_("remote has heads that are "
381 382 "not known locally\n"))
382 383 elif branch is None:
383 384 repo.ui.status(_("remote has heads that are "
384 385 "not known locally: %s\n") % heads)
385 386 else:
386 387 repo.ui.status(_("remote has heads on branch '%s' that are "
387 388 "not known locally: %s\n") % (branch, heads))
388 389 if remoteheads is None:
389 390 if len(newhs) > 1:
390 391 dhs = list(newhs)
391 392 if errormsg is None:
392 393 errormsg = (_("push creates new branch '%s' "
393 394 "with multiple heads") % (branch))
394 395 hint = _("merge or"
395 396 " see 'hg help push' for details about"
396 397 " pushing new heads")
397 398 elif len(newhs) > len(oldhs):
398 399 # remove bookmarked or existing remote heads from the new heads list
399 400 dhs = sorted(newhs - nowarnheads - oldhs)
400 401 if dhs:
401 402 if errormsg is None:
402 403 if branch not in ('default', None):
403 404 errormsg = _("push creates new remote head %s "
404 405 "on branch '%s'!") % (short(dhs[0]), branch)
405 406 elif repo[dhs[0]].bookmarks():
406 407 errormsg = _("push creates new remote head %s "
407 408 "with bookmark '%s'!") % (
408 409 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
409 410 else:
410 411 errormsg = _("push creates new remote head %s!"
411 412 ) % short(dhs[0])
412 413 if unsyncedheads:
413 414 hint = _("pull and merge or"
414 415 " see 'hg help push' for details about"
415 416 " pushing new heads")
416 417 else:
417 418 hint = _("merge or"
418 419 " see 'hg help push' for details about"
419 420 " pushing new heads")
420 421 if branch is None:
421 422 repo.ui.note(_("new remote heads:\n"))
422 423 else:
423 424 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
424 425 for h in dhs:
425 426 repo.ui.note((" %s\n") % short(h))
426 427 if errormsg:
427 428 raise error.Abort(errormsg, hint=hint)
428 429
429 430 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
430 431 """post process the list of new heads with obsolescence information
431 432
432 433 Exists as a sub-function to contain the complexity and allow extensions to
433 434 experiment with smarter logic.
434 435
435 436 Returns (newheads, discarded_heads) tuple
436 437 """
437 438 # known issue
438 439 #
439 440 # * We "silently" skip processing on all changeset unknown locally
440 441 #
441 442 # * if <nh> is public on the remote, it won't be affected by obsolete
442 443 # marker and a new is created
443 444
444 445 # define various utilities and containers
445 446 repo = pushop.repo
446 447 unfi = repo.unfiltered()
447 448 tonode = unfi.changelog.node
448 449 torev = unfi.changelog.nodemap.get
449 450 public = phases.public
450 451 getphase = unfi._phasecache.phase
451 452 ispublic = (lambda r: getphase(unfi, r) == public)
452 453 ispushed = (lambda n: torev(n) in futurecommon)
453 454 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
454 455 successorsmarkers = unfi.obsstore.successors
455 456 newhs = set() # final set of new heads
456 457 discarded = set() # new head of fully replaced branch
457 458
458 459 localcandidate = set() # candidate heads known locally
459 460 unknownheads = set() # candidate heads unknown locally
460 461 for h in candidate_newhs:
461 462 if h in unfi:
462 463 localcandidate.add(h)
463 464 else:
464 465 if successorsmarkers.get(h) is not None:
465 466 msg = ('checkheads: remote head unknown locally has'
466 467 ' local marker: %s\n')
467 468 repo.ui.debug(msg % hex(h))
468 469 unknownheads.add(h)
469 470
470 471 # fast path the simple case
471 472 if len(localcandidate) == 1:
472 473 return unknownheads | set(candidate_newhs), set()
473 474
474 475 # actually process branch replacement
475 476 while localcandidate:
476 477 nh = localcandidate.pop()
477 478 # run this check early to skip the evaluation of the whole branch
478 479 if (torev(nh) in futurecommon or ispublic(torev(nh))):
479 480 newhs.add(nh)
480 481 continue
481 482
482 483 # Get all revs/nodes on the branch exclusive to this head
483 484 # (already filtered heads are "ignored"))
484 485 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
485 486 nh, localcandidate, newhs)
486 487 branchnodes = [tonode(r) for r in branchrevs]
487 488
488 489 # The branch won't be hidden on the remote if
489 490 # * any part of it is public,
490 491 # * any part of it is considered part of the result by previous logic,
491 492 # * if we have no markers to push to obsolete it.
492 493 if (any(ispublic(r) for r in branchrevs)
493 494 or any(torev(n) in futurecommon for n in branchnodes)
494 495 or any(not hasoutmarker(n) for n in branchnodes)):
495 496 newhs.add(nh)
496 497 else:
497 498 # note: there is a corner case if there is a merge in the branch.
498 499 # we might end up with -more- heads. However, these heads are not
499 500 # "added" by the push, but more by the "removal" on the remote so I
500 501 # think is a okay to ignore them,
501 502 discarded.add(nh)
502 503 newhs |= unknownheads
503 504 return newhs, discarded
504 505
505 506 def pushingmarkerfor(obsstore, ispushed, node):
506 507 """true if some markers are to be pushed for node
507 508
508 509 We cannot just look in to the pushed obsmarkers from the pushop because
509 510 discovery might have filtered relevant markers. In addition listing all
510 511 markers relevant to all changesets in the pushed set would be too expensive
511 512 (O(len(repo)))
512 513
513 514 (note: There are cache opportunity in this function. but it would requires
514 515 a two dimensional stack.)
515 516 """
516 517 successorsmarkers = obsstore.successors
517 518 stack = [node]
518 519 seen = set(stack)
519 520 while stack:
520 521 current = stack.pop()
521 522 if ispushed(current):
522 523 return True
523 524 markers = successorsmarkers.get(current, ())
524 525 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
525 526 for m in markers:
526 527 nexts = m[1] # successors
527 528 if not nexts: # this is a prune marker
528 529 nexts = m[5] or () # parents
529 530 for n in nexts:
530 531 if n not in seen:
531 532 seen.add(n)
532 533 stack.append(n)
533 534 return False
General Comments 0
You need to be logged in to leave comments. Login now