##// END OF EJS Templates
discovery: don't reimplement all()...
Martin von Zweigbergk -
r35897:6c1d3779 default
parent child Browse files
Show More
@@ -1,530 +1,525 b''
1 1 # discovery.py - protocol changeset discovery functions
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11
12 12 from .i18n import _
13 13 from .node import (
14 14 hex,
15 15 nullid,
16 16 short,
17 17 )
18 18
19 19 from . import (
20 20 bookmarks,
21 21 branchmap,
22 22 error,
23 23 phases,
24 24 scmutil,
25 25 setdiscovery,
26 26 treediscovery,
27 27 util,
28 28 )
29 29
30 30 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
31 31 """Return a tuple (common, anyincoming, heads) used to identify the common
32 32 subset of nodes between repo and remote.
33 33
34 34 "common" is a list of (at least) the heads of the common subset.
35 35 "anyincoming" is testable as a boolean indicating if any nodes are missing
36 36 locally. If remote does not support getbundle, this actually is a list of
37 37 roots of the nodes that would be incoming, to be supplied to
38 38 changegroupsubset. No code except for pull should be relying on this fact
39 39 any longer.
40 40 "heads" is either the supplied heads, or else the remote's heads.
41 41 "ancestorsof" if not None, restrict the discovery to a subset defined by
42 42 these nodes. Changeset outside of this set won't be considered (and
43 43 won't appears in "common")
44 44
45 45 If you pass heads and they are all known locally, the response lists just
46 46 these heads in "common" and in "heads".
47 47
48 48 Please use findcommonoutgoing to compute the set of outgoing nodes to give
49 49 extensions a good hook into outgoing.
50 50 """
51 51
52 52 if not remote.capable('getbundle'):
53 53 return treediscovery.findcommonincoming(repo, remote, heads, force)
54 54
55 55 if heads:
56 allknown = True
57 56 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
58 for h in heads:
59 if not knownnode(h):
60 allknown = False
61 break
62 if allknown:
57 if all(knownnode(h) for h in heads):
63 58 return (heads, False, heads)
64 59
65 60 res = setdiscovery.findcommonheads(repo.ui, repo, remote, heads,
66 61 abortwhenunrelated=not force,
67 62 ancestorsof=ancestorsof)
68 63 common, anyinc, srvheads = res
69 64 return (list(common), anyinc, heads or list(srvheads))
70 65
71 66 class outgoing(object):
72 67 '''Represents the set of nodes present in a local repo but not in a
73 68 (possibly) remote one.
74 69
75 70 Members:
76 71
77 72 missing is a list of all nodes present in local but not in remote.
78 73 common is a list of all nodes shared between the two repos.
79 74 excluded is the list of missing changeset that shouldn't be sent remotely.
80 75 missingheads is the list of heads of missing.
81 76 commonheads is the list of heads of common.
82 77
83 78 The sets are computed on demand from the heads, unless provided upfront
84 79 by discovery.'''
85 80
86 81 def __init__(self, repo, commonheads=None, missingheads=None,
87 82 missingroots=None):
88 83 # at least one of them must not be set
89 84 assert None in (commonheads, missingroots)
90 85 cl = repo.changelog
91 86 if missingheads is None:
92 87 missingheads = cl.heads()
93 88 if missingroots:
94 89 discbases = []
95 90 for n in missingroots:
96 91 discbases.extend([p for p in cl.parents(n) if p != nullid])
97 92 # TODO remove call to nodesbetween.
98 93 # TODO populate attributes on outgoing instance instead of setting
99 94 # discbases.
100 95 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
101 96 included = set(csets)
102 97 missingheads = heads
103 98 commonheads = [n for n in discbases if n not in included]
104 99 elif not commonheads:
105 100 commonheads = [nullid]
106 101 self.commonheads = commonheads
107 102 self.missingheads = missingheads
108 103 self._revlog = cl
109 104 self._common = None
110 105 self._missing = None
111 106 self.excluded = []
112 107
113 108 def _computecommonmissing(self):
114 109 sets = self._revlog.findcommonmissing(self.commonheads,
115 110 self.missingheads)
116 111 self._common, self._missing = sets
117 112
118 113 @util.propertycache
119 114 def common(self):
120 115 if self._common is None:
121 116 self._computecommonmissing()
122 117 return self._common
123 118
124 119 @util.propertycache
125 120 def missing(self):
126 121 if self._missing is None:
127 122 self._computecommonmissing()
128 123 return self._missing
129 124
130 125 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
131 126 commoninc=None, portable=False):
132 127 '''Return an outgoing instance to identify the nodes present in repo but
133 128 not in other.
134 129
135 130 If onlyheads is given, only nodes ancestral to nodes in onlyheads
136 131 (inclusive) are included. If you already know the local repo's heads,
137 132 passing them in onlyheads is faster than letting them be recomputed here.
138 133
139 134 If commoninc is given, it must be the result of a prior call to
140 135 findcommonincoming(repo, other, force) to avoid recomputing it here.
141 136
142 137 If portable is given, compute more conservative common and missingheads,
143 138 to make bundles created from the instance more portable.'''
144 139 # declare an empty outgoing object to be filled later
145 140 og = outgoing(repo, None, None)
146 141
147 142 # get common set if not provided
148 143 if commoninc is None:
149 144 commoninc = findcommonincoming(repo, other, force=force,
150 145 ancestorsof=onlyheads)
151 146 og.commonheads, _any, _hds = commoninc
152 147
153 148 # compute outgoing
154 149 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
155 150 if not mayexclude:
156 151 og.missingheads = onlyheads or repo.heads()
157 152 elif onlyheads is None:
158 153 # use visible heads as it should be cached
159 154 og.missingheads = repo.filtered("served").heads()
160 155 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
161 156 else:
162 157 # compute common, missing and exclude secret stuff
163 158 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
164 159 og._common, allmissing = sets
165 160 og._missing = missing = []
166 161 og.excluded = excluded = []
167 162 for node in allmissing:
168 163 ctx = repo[node]
169 164 if ctx.phase() >= phases.secret or ctx.extinct():
170 165 excluded.append(node)
171 166 else:
172 167 missing.append(node)
173 168 if len(missing) == len(allmissing):
174 169 missingheads = onlyheads
175 170 else: # update missing heads
176 171 missingheads = phases.newheads(repo, onlyheads, excluded)
177 172 og.missingheads = missingheads
178 173 if portable:
179 174 # recompute common and missingheads as if -r<rev> had been given for
180 175 # each head of missing, and --base <rev> for each head of the proper
181 176 # ancestors of missing
182 177 og._computecommonmissing()
183 178 cl = repo.changelog
184 179 missingrevs = set(cl.rev(n) for n in og._missing)
185 180 og._common = set(cl.ancestors(missingrevs)) - missingrevs
186 181 commonheads = set(og.commonheads)
187 182 og.missingheads = [h for h in og.missingheads if h not in commonheads]
188 183
189 184 return og
190 185
191 186 def _headssummary(pushop):
192 187 """compute a summary of branch and heads status before and after push
193 188
194 189 return {'branch': ([remoteheads], [newheads],
195 190 [unsyncedheads], [discardedheads])} mapping
196 191
197 192 - branch: the branch name,
198 193 - remoteheads: the list of remote heads known locally
199 194 None if the branch is new,
200 195 - newheads: the new remote heads (known locally) with outgoing pushed,
201 196 - unsyncedheads: the list of remote heads unknown locally,
202 197 - discardedheads: the list of heads made obsolete by the push.
203 198 """
204 199 repo = pushop.repo.unfiltered()
205 200 remote = pushop.remote
206 201 outgoing = pushop.outgoing
207 202 cl = repo.changelog
208 203 headssum = {}
209 204 # A. Create set of branches involved in the push.
210 205 branches = set(repo[n].branch() for n in outgoing.missing)
211 206 remotemap = remote.branchmap()
212 207 newbranches = branches - set(remotemap)
213 208 branches.difference_update(newbranches)
214 209
215 210 # A. register remote heads
216 211 remotebranches = set()
217 212 for branch, heads in remote.branchmap().iteritems():
218 213 remotebranches.add(branch)
219 214 known = []
220 215 unsynced = []
221 216 knownnode = cl.hasnode # do not use nodemap until it is filtered
222 217 for h in heads:
223 218 if knownnode(h):
224 219 known.append(h)
225 220 else:
226 221 unsynced.append(h)
227 222 headssum[branch] = (known, list(known), unsynced)
228 223 # B. add new branch data
229 224 missingctx = list(repo[n] for n in outgoing.missing)
230 225 touchedbranches = set()
231 226 for ctx in missingctx:
232 227 branch = ctx.branch()
233 228 touchedbranches.add(branch)
234 229 if branch not in headssum:
235 230 headssum[branch] = (None, [], [])
236 231
237 232 # C drop data about untouched branches:
238 233 for branch in remotebranches - touchedbranches:
239 234 del headssum[branch]
240 235
241 236 # D. Update newmap with outgoing changes.
242 237 # This will possibly add new heads and remove existing ones.
243 238 newmap = branchmap.branchcache((branch, heads[1])
244 239 for branch, heads in headssum.iteritems()
245 240 if heads[0] is not None)
246 241 newmap.update(repo, (ctx.rev() for ctx in missingctx))
247 242 for branch, newheads in newmap.iteritems():
248 243 headssum[branch][1][:] = newheads
249 244 for branch, items in headssum.iteritems():
250 245 for l in items:
251 246 if l is not None:
252 247 l.sort()
253 248 headssum[branch] = items + ([],)
254 249
255 250 # If there are no obsstore, no post processing are needed.
256 251 if repo.obsstore:
257 252 torev = repo.changelog.rev
258 253 futureheads = set(torev(h) for h in outgoing.missingheads)
259 254 futureheads |= set(torev(h) for h in outgoing.commonheads)
260 255 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
261 256 for branch, heads in sorted(headssum.iteritems()):
262 257 remoteheads, newheads, unsyncedheads, placeholder = heads
263 258 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
264 259 headssum[branch] = (remoteheads, sorted(result[0]), unsyncedheads,
265 260 sorted(result[1]))
266 261 return headssum
267 262
268 263 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
269 264 """Compute branchmapsummary for repo without branchmap support"""
270 265
271 266 # 1-4b. old servers: Check for new topological heads.
272 267 # Construct {old,new}map with branch = None (topological branch).
273 268 # (code based on update)
274 269 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
275 270 oldheads = sorted(h for h in remoteheads if knownnode(h))
276 271 # all nodes in outgoing.missing are children of either:
277 272 # - an element of oldheads
278 273 # - another element of outgoing.missing
279 274 # - nullrev
280 275 # This explains why the new head are very simple to compute.
281 276 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
282 277 newheads = sorted(c.node() for c in r)
283 278 # set some unsynced head to issue the "unsynced changes" warning
284 279 if inc:
285 280 unsynced = [None]
286 281 else:
287 282 unsynced = []
288 283 return {None: (oldheads, newheads, unsynced, [])}
289 284
290 285 def _nowarnheads(pushop):
291 286 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
292 287 repo = pushop.repo.unfiltered()
293 288 remote = pushop.remote
294 289 localbookmarks = repo._bookmarks
295 290 remotebookmarks = remote.listkeys('bookmarks')
296 291 bookmarkedheads = set()
297 292
298 293 # internal config: bookmarks.pushing
299 294 newbookmarks = [localbookmarks.expandname(b)
300 295 for b in pushop.ui.configlist('bookmarks', 'pushing')]
301 296
302 297 for bm in localbookmarks:
303 298 rnode = remotebookmarks.get(bm)
304 299 if rnode and rnode in repo:
305 300 lctx, rctx = repo[bm], repo[rnode]
306 301 if bookmarks.validdest(repo, rctx, lctx):
307 302 bookmarkedheads.add(lctx.node())
308 303 else:
309 304 if bm in newbookmarks and bm not in remotebookmarks:
310 305 bookmarkedheads.add(repo[bm].node())
311 306
312 307 return bookmarkedheads
313 308
314 309 def checkheads(pushop):
315 310 """Check that a push won't add any outgoing head
316 311
317 312 raise Abort error and display ui message as needed.
318 313 """
319 314
320 315 repo = pushop.repo.unfiltered()
321 316 remote = pushop.remote
322 317 outgoing = pushop.outgoing
323 318 remoteheads = pushop.remoteheads
324 319 newbranch = pushop.newbranch
325 320 inc = bool(pushop.incoming)
326 321
327 322 # Check for each named branch if we're creating new remote heads.
328 323 # To be a remote head after push, node must be either:
329 324 # - unknown locally
330 325 # - a local outgoing head descended from update
331 326 # - a remote head that's known locally and not
332 327 # ancestral to an outgoing head
333 328 if remoteheads == [nullid]:
334 329 # remote is empty, nothing to check.
335 330 return
336 331
337 332 if remote.capable('branchmap'):
338 333 headssum = _headssummary(pushop)
339 334 else:
340 335 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
341 336 pushop.pushbranchmap = headssum
342 337 newbranches = [branch for branch, heads in headssum.iteritems()
343 338 if heads[0] is None]
344 339 # 1. Check for new branches on the remote.
345 340 if newbranches and not newbranch: # new branch requires --new-branch
346 341 branchnames = ', '.join(sorted(newbranches))
347 342 raise error.Abort(_("push creates new remote branches: %s!")
348 343 % branchnames,
349 344 hint=_("use 'hg push --new-branch' to create"
350 345 " new remote branches"))
351 346
352 347 # 2. Find heads that we need not warn about
353 348 nowarnheads = _nowarnheads(pushop)
354 349
355 350 # 3. Check for new heads.
356 351 # If there are more heads after the push than before, a suitable
357 352 # error message, depending on unsynced status, is displayed.
358 353 errormsg = None
359 354 for branch, heads in sorted(headssum.iteritems()):
360 355 remoteheads, newheads, unsyncedheads, discardedheads = heads
361 356 # add unsynced data
362 357 if remoteheads is None:
363 358 oldhs = set()
364 359 else:
365 360 oldhs = set(remoteheads)
366 361 oldhs.update(unsyncedheads)
367 362 dhs = None # delta heads, the new heads on branch
368 363 newhs = set(newheads)
369 364 newhs.update(unsyncedheads)
370 365 if unsyncedheads:
371 366 if None in unsyncedheads:
372 367 # old remote, no heads data
373 368 heads = None
374 369 else:
375 370 heads = scmutil.nodesummaries(repo, unsyncedheads)
376 371 if heads is None:
377 372 repo.ui.status(_("remote has heads that are "
378 373 "not known locally\n"))
379 374 elif branch is None:
380 375 repo.ui.status(_("remote has heads that are "
381 376 "not known locally: %s\n") % heads)
382 377 else:
383 378 repo.ui.status(_("remote has heads on branch '%s' that are "
384 379 "not known locally: %s\n") % (branch, heads))
385 380 if remoteheads is None:
386 381 if len(newhs) > 1:
387 382 dhs = list(newhs)
388 383 if errormsg is None:
389 384 errormsg = (_("push creates new branch '%s' "
390 385 "with multiple heads") % (branch))
391 386 hint = _("merge or"
392 387 " see 'hg help push' for details about"
393 388 " pushing new heads")
394 389 elif len(newhs) > len(oldhs):
395 390 # remove bookmarked or existing remote heads from the new heads list
396 391 dhs = sorted(newhs - nowarnheads - oldhs)
397 392 if dhs:
398 393 if errormsg is None:
399 394 if branch not in ('default', None):
400 395 errormsg = _("push creates new remote head %s "
401 396 "on branch '%s'!") % (short(dhs[0]), branch)
402 397 elif repo[dhs[0]].bookmarks():
403 398 errormsg = _("push creates new remote head %s "
404 399 "with bookmark '%s'!") % (
405 400 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
406 401 else:
407 402 errormsg = _("push creates new remote head %s!"
408 403 ) % short(dhs[0])
409 404 if unsyncedheads:
410 405 hint = _("pull and merge or"
411 406 " see 'hg help push' for details about"
412 407 " pushing new heads")
413 408 else:
414 409 hint = _("merge or"
415 410 " see 'hg help push' for details about"
416 411 " pushing new heads")
417 412 if branch is None:
418 413 repo.ui.note(_("new remote heads:\n"))
419 414 else:
420 415 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
421 416 for h in dhs:
422 417 repo.ui.note((" %s\n") % short(h))
423 418 if errormsg:
424 419 raise error.Abort(errormsg, hint=hint)
425 420
426 421 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
427 422 """post process the list of new heads with obsolescence information
428 423
429 424 Exists as a sub-function to contain the complexity and allow extensions to
430 425 experiment with smarter logic.
431 426
432 427 Returns (newheads, discarded_heads) tuple
433 428 """
434 429 # known issue
435 430 #
436 431 # * We "silently" skip processing on all changeset unknown locally
437 432 #
438 433 # * if <nh> is public on the remote, it won't be affected by obsolete
439 434 # marker and a new is created
440 435
441 436 # define various utilities and containers
442 437 repo = pushop.repo
443 438 unfi = repo.unfiltered()
444 439 tonode = unfi.changelog.node
445 440 torev = unfi.changelog.nodemap.get
446 441 public = phases.public
447 442 getphase = unfi._phasecache.phase
448 443 ispublic = (lambda r: getphase(unfi, r) == public)
449 444 ispushed = (lambda n: torev(n) in futurecommon)
450 445 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
451 446 successorsmarkers = unfi.obsstore.successors
452 447 newhs = set() # final set of new heads
453 448 discarded = set() # new head of fully replaced branch
454 449
455 450 localcandidate = set() # candidate heads known locally
456 451 unknownheads = set() # candidate heads unknown locally
457 452 for h in candidate_newhs:
458 453 if h in unfi:
459 454 localcandidate.add(h)
460 455 else:
461 456 if successorsmarkers.get(h) is not None:
462 457 msg = ('checkheads: remote head unknown locally has'
463 458 ' local marker: %s\n')
464 459 repo.ui.debug(msg % hex(h))
465 460 unknownheads.add(h)
466 461
467 462 # fast path the simple case
468 463 if len(localcandidate) == 1:
469 464 return unknownheads | set(candidate_newhs), set()
470 465
471 466 # actually process branch replacement
472 467 while localcandidate:
473 468 nh = localcandidate.pop()
474 469 # run this check early to skip the evaluation of the whole branch
475 470 if (torev(nh) in futurecommon or ispublic(torev(nh))):
476 471 newhs.add(nh)
477 472 continue
478 473
479 474 # Get all revs/nodes on the branch exclusive to this head
480 475 # (already filtered heads are "ignored"))
481 476 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
482 477 nh, localcandidate, newhs)
483 478 branchnodes = [tonode(r) for r in branchrevs]
484 479
485 480 # The branch won't be hidden on the remote if
486 481 # * any part of it is public,
487 482 # * any part of it is considered part of the result by previous logic,
488 483 # * if we have no markers to push to obsolete it.
489 484 if (any(ispublic(r) for r in branchrevs)
490 485 or any(torev(n) in futurecommon for n in branchnodes)
491 486 or any(not hasoutmarker(n) for n in branchnodes)):
492 487 newhs.add(nh)
493 488 else:
494 489 # note: there is a corner case if there is a merge in the branch.
495 490 # we might end up with -more- heads. However, these heads are not
496 491 # "added" by the push, but more by the "removal" on the remote so I
497 492 # think is a okay to ignore them,
498 493 discarded.add(nh)
499 494 newhs |= unknownheads
500 495 return newhs, discarded
501 496
502 497 def pushingmarkerfor(obsstore, ispushed, node):
503 498 """true if some markers are to be pushed for node
504 499
505 500 We cannot just look in to the pushed obsmarkers from the pushop because
506 501 discovery might have filtered relevant markers. In addition listing all
507 502 markers relevant to all changesets in the pushed set would be too expensive
508 503 (O(len(repo)))
509 504
510 505 (note: There are cache opportunity in this function. but it would requires
511 506 a two dimensional stack.)
512 507 """
513 508 successorsmarkers = obsstore.successors
514 509 stack = [node]
515 510 seen = set(stack)
516 511 while stack:
517 512 current = stack.pop()
518 513 if ispushed(current):
519 514 return True
520 515 markers = successorsmarkers.get(current, ())
521 516 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
522 517 for m in markers:
523 518 nexts = m[1] # successors
524 519 if not nexts: # this is a prune marker
525 520 nexts = m[5] or () # parents
526 521 for n in nexts:
527 522 if n not in seen:
528 523 seen.add(n)
529 524 stack.append(n)
530 525 return False
General Comments 0
You need to be logged in to leave comments. Login now