##// END OF EJS Templates
discovery: also use lists for the returns of '_oldheadssummary'...
marmoute -
r32671:81cbfaea default
parent child Browse files
Show More
@@ -1,519 +1,519 b''
1 1 # discovery.py - protocol changeset discovery functions
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11
12 12 from .i18n import _
13 13 from .node import (
14 14 hex,
15 15 nullid,
16 16 short,
17 17 )
18 18
19 19 from . import (
20 20 bookmarks,
21 21 branchmap,
22 22 error,
23 23 phases,
24 24 setdiscovery,
25 25 treediscovery,
26 26 util,
27 27 )
28 28
29 29 def findcommonincoming(repo, remote, heads=None, force=False):
30 30 """Return a tuple (common, anyincoming, heads) used to identify the common
31 31 subset of nodes between repo and remote.
32 32
33 33 "common" is a list of (at least) the heads of the common subset.
34 34 "anyincoming" is testable as a boolean indicating if any nodes are missing
35 35 locally. If remote does not support getbundle, this actually is a list of
36 36 roots of the nodes that would be incoming, to be supplied to
37 37 changegroupsubset. No code except for pull should be relying on this fact
38 38 any longer.
39 39 "heads" is either the supplied heads, or else the remote's heads.
40 40
41 41 If you pass heads and they are all known locally, the response lists just
42 42 these heads in "common" and in "heads".
43 43
44 44 Please use findcommonoutgoing to compute the set of outgoing nodes to give
45 45 extensions a good hook into outgoing.
46 46 """
47 47
48 48 if not remote.capable('getbundle'):
49 49 return treediscovery.findcommonincoming(repo, remote, heads, force)
50 50
51 51 if heads:
52 52 allknown = True
53 53 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
54 54 for h in heads:
55 55 if not knownnode(h):
56 56 allknown = False
57 57 break
58 58 if allknown:
59 59 return (heads, False, heads)
60 60
61 61 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
62 62 abortwhenunrelated=not force)
63 63 common, anyinc, srvheads = res
64 64 return (list(common), anyinc, heads or list(srvheads))
65 65
66 66 class outgoing(object):
67 67 '''Represents the set of nodes present in a local repo but not in a
68 68 (possibly) remote one.
69 69
70 70 Members:
71 71
72 72 missing is a list of all nodes present in local but not in remote.
73 73 common is a list of all nodes shared between the two repos.
74 74 excluded is the list of missing changeset that shouldn't be sent remotely.
75 75 missingheads is the list of heads of missing.
76 76 commonheads is the list of heads of common.
77 77
78 78 The sets are computed on demand from the heads, unless provided upfront
79 79 by discovery.'''
80 80
81 81 def __init__(self, repo, commonheads=None, missingheads=None,
82 82 missingroots=None):
83 83 # at least one of them must not be set
84 84 assert None in (commonheads, missingroots)
85 85 cl = repo.changelog
86 86 if missingheads is None:
87 87 missingheads = cl.heads()
88 88 if missingroots:
89 89 discbases = []
90 90 for n in missingroots:
91 91 discbases.extend([p for p in cl.parents(n) if p != nullid])
92 92 # TODO remove call to nodesbetween.
93 93 # TODO populate attributes on outgoing instance instead of setting
94 94 # discbases.
95 95 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
96 96 included = set(csets)
97 97 missingheads = heads
98 98 commonheads = [n for n in discbases if n not in included]
99 99 elif not commonheads:
100 100 commonheads = [nullid]
101 101 self.commonheads = commonheads
102 102 self.missingheads = missingheads
103 103 self._revlog = cl
104 104 self._common = None
105 105 self._missing = None
106 106 self.excluded = []
107 107
108 108 def _computecommonmissing(self):
109 109 sets = self._revlog.findcommonmissing(self.commonheads,
110 110 self.missingheads)
111 111 self._common, self._missing = sets
112 112
113 113 @util.propertycache
114 114 def common(self):
115 115 if self._common is None:
116 116 self._computecommonmissing()
117 117 return self._common
118 118
119 119 @util.propertycache
120 120 def missing(self):
121 121 if self._missing is None:
122 122 self._computecommonmissing()
123 123 return self._missing
124 124
125 125 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
126 126 commoninc=None, portable=False):
127 127 '''Return an outgoing instance to identify the nodes present in repo but
128 128 not in other.
129 129
130 130 If onlyheads is given, only nodes ancestral to nodes in onlyheads
131 131 (inclusive) are included. If you already know the local repo's heads,
132 132 passing them in onlyheads is faster than letting them be recomputed here.
133 133
134 134 If commoninc is given, it must be the result of a prior call to
135 135 findcommonincoming(repo, other, force) to avoid recomputing it here.
136 136
137 137 If portable is given, compute more conservative common and missingheads,
138 138 to make bundles created from the instance more portable.'''
139 139 # declare an empty outgoing object to be filled later
140 140 og = outgoing(repo, None, None)
141 141
142 142 # get common set if not provided
143 143 if commoninc is None:
144 144 commoninc = findcommonincoming(repo, other, force=force)
145 145 og.commonheads, _any, _hds = commoninc
146 146
147 147 # compute outgoing
148 148 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
149 149 if not mayexclude:
150 150 og.missingheads = onlyheads or repo.heads()
151 151 elif onlyheads is None:
152 152 # use visible heads as it should be cached
153 153 og.missingheads = repo.filtered("served").heads()
154 154 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
155 155 else:
156 156 # compute common, missing and exclude secret stuff
157 157 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
158 158 og._common, allmissing = sets
159 159 og._missing = missing = []
160 160 og.excluded = excluded = []
161 161 for node in allmissing:
162 162 ctx = repo[node]
163 163 if ctx.phase() >= phases.secret or ctx.extinct():
164 164 excluded.append(node)
165 165 else:
166 166 missing.append(node)
167 167 if len(missing) == len(allmissing):
168 168 missingheads = onlyheads
169 169 else: # update missing heads
170 170 missingheads = phases.newheads(repo, onlyheads, excluded)
171 171 og.missingheads = missingheads
172 172 if portable:
173 173 # recompute common and missingheads as if -r<rev> had been given for
174 174 # each head of missing, and --base <rev> for each head of the proper
175 175 # ancestors of missing
176 176 og._computecommonmissing()
177 177 cl = repo.changelog
178 178 missingrevs = set(cl.rev(n) for n in og._missing)
179 179 og._common = set(cl.ancestors(missingrevs)) - missingrevs
180 180 commonheads = set(og.commonheads)
181 181 og.missingheads = [h for h in og.missingheads if h not in commonheads]
182 182
183 183 return og
184 184
185 185 def _headssummary(repo, remote, outgoing):
186 186 """compute a summary of branch and heads status before and after push
187 187
188 188 return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
189 189
190 190 - branch: the branch name
191 191 - remoteheads: the list of remote heads known locally
192 192 None if the branch is new
193 193 - newheads: the new remote heads (known locally) with outgoing pushed
194 194 - unsyncedheads: the list of remote heads unknown locally.
195 195 """
196 196 cl = repo.changelog
197 197 headssum = {}
198 198 # A. Create set of branches involved in the push.
199 199 branches = set(repo[n].branch() for n in outgoing.missing)
200 200 remotemap = remote.branchmap()
201 201 newbranches = branches - set(remotemap)
202 202 branches.difference_update(newbranches)
203 203
204 204 # A. register remote heads
205 205 remotebranches = set()
206 206 for branch, heads in remote.branchmap().iteritems():
207 207 remotebranches.add(branch)
208 208 known = []
209 209 unsynced = []
210 210 knownnode = cl.hasnode # do not use nodemap until it is filtered
211 211 for h in heads:
212 212 if knownnode(h):
213 213 known.append(h)
214 214 else:
215 215 unsynced.append(h)
216 216 headssum[branch] = (known, list(known), unsynced)
217 217 # B. add new branch data
218 218 missingctx = list(repo[n] for n in outgoing.missing)
219 219 touchedbranches = set()
220 220 for ctx in missingctx:
221 221 branch = ctx.branch()
222 222 touchedbranches.add(branch)
223 223 if branch not in headssum:
224 224 headssum[branch] = (None, [], [])
225 225
226 226 # C drop data about untouched branches:
227 227 for branch in remotebranches - touchedbranches:
228 228 del headssum[branch]
229 229
230 230 # D. Update newmap with outgoing changes.
231 231 # This will possibly add new heads and remove existing ones.
232 232 newmap = branchmap.branchcache((branch, heads[1])
233 233 for branch, heads in headssum.iteritems()
234 234 if heads[0] is not None)
235 235 newmap.update(repo, (ctx.rev() for ctx in missingctx))
236 236 for branch, newheads in newmap.iteritems():
237 237 headssum[branch][1][:] = newheads
238 238 return headssum
239 239
240 240 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
241 241 """Compute branchmapsummary for repo without branchmap support"""
242 242
243 243 # 1-4b. old servers: Check for new topological heads.
244 244 # Construct {old,new}map with branch = None (topological branch).
245 245 # (code based on update)
246 246 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
247 oldheads = set(h for h in remoteheads if knownnode(h))
247 oldheads = list(h for h in remoteheads if knownnode(h))
248 248 # all nodes in outgoing.missing are children of either:
249 249 # - an element of oldheads
250 250 # - another element of outgoing.missing
251 251 # - nullrev
252 252 # This explains why the new head are very simple to compute.
253 253 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
254 254 newheads = list(c.node() for c in r)
255 255 # set some unsynced head to issue the "unsynced changes" warning
256 256 if inc:
257 unsynced = {None}
257 unsynced = [None]
258 258 else:
259 unsynced = set()
259 unsynced = []
260 260 return {None: (oldheads, newheads, unsynced)}
261 261
262 262 def _nowarnheads(pushop):
263 263 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
264 264 repo = pushop.repo.unfiltered()
265 265 remote = pushop.remote
266 266 localbookmarks = repo._bookmarks
267 267 remotebookmarks = remote.listkeys('bookmarks')
268 268 bookmarkedheads = set()
269 269
270 270 # internal config: bookmarks.pushing
271 271 newbookmarks = [localbookmarks.expandname(b)
272 272 for b in pushop.ui.configlist('bookmarks', 'pushing')]
273 273
274 274 for bm in localbookmarks:
275 275 rnode = remotebookmarks.get(bm)
276 276 if rnode and rnode in repo:
277 277 lctx, rctx = repo[bm], repo[rnode]
278 278 if bookmarks.validdest(repo, rctx, lctx):
279 279 bookmarkedheads.add(lctx.node())
280 280 else:
281 281 if bm in newbookmarks and bm not in remotebookmarks:
282 282 bookmarkedheads.add(repo[bm].node())
283 283
284 284 return bookmarkedheads
285 285
286 286 def checkheads(pushop):
287 287 """Check that a push won't add any outgoing head
288 288
289 289 raise Abort error and display ui message as needed.
290 290 """
291 291
292 292 repo = pushop.repo.unfiltered()
293 293 remote = pushop.remote
294 294 outgoing = pushop.outgoing
295 295 remoteheads = pushop.remoteheads
296 296 newbranch = pushop.newbranch
297 297 inc = bool(pushop.incoming)
298 298
299 299 # Check for each named branch if we're creating new remote heads.
300 300 # To be a remote head after push, node must be either:
301 301 # - unknown locally
302 302 # - a local outgoing head descended from update
303 303 # - a remote head that's known locally and not
304 304 # ancestral to an outgoing head
305 305 if remoteheads == [nullid]:
306 306 # remote is empty, nothing to check.
307 307 return
308 308
309 309 if remote.capable('branchmap'):
310 310 headssum = _headssummary(repo, remote, outgoing)
311 311 else:
312 312 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
313 313 newbranches = [branch for branch, heads in headssum.iteritems()
314 314 if heads[0] is None]
315 315 # 1. Check for new branches on the remote.
316 316 if newbranches and not newbranch: # new branch requires --new-branch
317 317 branchnames = ', '.join(sorted(newbranches))
318 318 raise error.Abort(_("push creates new remote branches: %s!")
319 319 % branchnames,
320 320 hint=_("use 'hg push --new-branch' to create"
321 321 " new remote branches"))
322 322
323 323 # 2. Find heads that we need not warn about
324 324 nowarnheads = _nowarnheads(pushop)
325 325
326 326 # 3. Check for new heads.
327 327 # If there are more heads after the push than before, a suitable
328 328 # error message, depending on unsynced status, is displayed.
329 329 errormsg = None
330 330 # If there is no obsstore, allfuturecommon won't be used, so no
331 331 # need to compute it.
332 332 if repo.obsstore:
333 333 allmissing = set(outgoing.missing)
334 334 cctx = repo.set('%ld', outgoing.common)
335 335 allfuturecommon = set(c.node() for c in cctx)
336 336 allfuturecommon.update(allmissing)
337 337 for branch, heads in sorted(headssum.iteritems()):
338 338 remoteheads, newheads, unsyncedheads = heads
339 339 candidate_newhs = set(newheads)
340 340 # add unsynced data
341 341 if remoteheads is None:
342 342 oldhs = set()
343 343 else:
344 344 oldhs = set(remoteheads)
345 345 oldhs.update(unsyncedheads)
346 346 candidate_newhs.update(unsyncedheads)
347 347 dhs = None # delta heads, the new heads on branch
348 348 if not repo.obsstore:
349 349 discardedheads = set()
350 350 newhs = candidate_newhs
351 351 else:
352 352 newhs, discardedheads = _postprocessobsolete(pushop,
353 353 allfuturecommon,
354 354 candidate_newhs)
355 355 unsynced = sorted(h for h in unsyncedheads if h not in discardedheads)
356 356 if unsynced:
357 357 if None in unsynced:
358 358 # old remote, no heads data
359 359 heads = None
360 360 elif len(unsynced) <= 4 or repo.ui.verbose:
361 361 heads = ' '.join(short(h) for h in unsynced)
362 362 else:
363 363 heads = (' '.join(short(h) for h in unsynced[:4]) +
364 364 ' ' + _("and %s others") % (len(unsynced) - 4))
365 365 if heads is None:
366 366 repo.ui.status(_("remote has heads that are "
367 367 "not known locally\n"))
368 368 elif branch is None:
369 369 repo.ui.status(_("remote has heads that are "
370 370 "not known locally: %s\n") % heads)
371 371 else:
372 372 repo.ui.status(_("remote has heads on branch '%s' that are "
373 373 "not known locally: %s\n") % (branch, heads))
374 374 if remoteheads is None:
375 375 if len(newhs) > 1:
376 376 dhs = list(newhs)
377 377 if errormsg is None:
378 378 errormsg = (_("push creates new branch '%s' "
379 379 "with multiple heads") % (branch))
380 380 hint = _("merge or"
381 381 " see 'hg help push' for details about"
382 382 " pushing new heads")
383 383 elif len(newhs) > len(oldhs):
384 384 # remove bookmarked or existing remote heads from the new heads list
385 385 dhs = sorted(newhs - nowarnheads - oldhs)
386 386 if dhs:
387 387 if errormsg is None:
388 388 if branch not in ('default', None):
389 389 errormsg = _("push creates new remote head %s "
390 390 "on branch '%s'!") % (short(dhs[0]), branch)
391 391 elif repo[dhs[0]].bookmarks():
392 392 errormsg = _("push creates new remote head %s "
393 393 "with bookmark '%s'!") % (
394 394 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
395 395 else:
396 396 errormsg = _("push creates new remote head %s!"
397 397 ) % short(dhs[0])
398 398 if unsyncedheads:
399 399 hint = _("pull and merge or"
400 400 " see 'hg help push' for details about"
401 401 " pushing new heads")
402 402 else:
403 403 hint = _("merge or"
404 404 " see 'hg help push' for details about"
405 405 " pushing new heads")
406 406 if branch is None:
407 407 repo.ui.note(_("new remote heads:\n"))
408 408 else:
409 409 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
410 410 for h in dhs:
411 411 repo.ui.note((" %s\n") % short(h))
412 412 if errormsg:
413 413 raise error.Abort(errormsg, hint=hint)
414 414
415 415 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
416 416 """post process the list of new heads with obsolescence information
417 417
418 418 Exists as a sub-function to contain the complexity and allow extensions to
419 419 experiment with smarter logic.
420 420
421 421 Returns (newheads, discarded_heads) tuple
422 422 """
423 423 # known issue
424 424 #
425 425 # * We "silently" skip processing on all changeset unknown locally
426 426 #
427 427 # * if <nh> is public on the remote, it won't be affected by obsolete
428 428 # marker and a new is created
429 429
430 430 # define various utilities and containers
431 431 repo = pushop.repo
432 432 unfi = repo.unfiltered()
433 433 tonode = unfi.changelog.node
434 434 torev = unfi.changelog.rev
435 435 public = phases.public
436 436 getphase = unfi._phasecache.phase
437 437 ispublic = (lambda r: getphase(unfi, r) == public)
438 438 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore,
439 439 futurecommon)
440 440 successorsmarkers = unfi.obsstore.successors
441 441 newhs = set() # final set of new heads
442 442 discarded = set() # new head of fully replaced branch
443 443
444 444 localcandidate = set() # candidate heads known locally
445 445 unknownheads = set() # candidate heads unknown locally
446 446 for h in candidate_newhs:
447 447 if h in unfi:
448 448 localcandidate.add(h)
449 449 else:
450 450 if successorsmarkers.get(h) is not None:
451 451 msg = ('checkheads: remote head unknown locally has'
452 452 ' local marker: %s\n')
453 453 repo.ui.debug(msg % hex(h))
454 454 unknownheads.add(h)
455 455
456 456 # fast path the simple case
457 457 if len(localcandidate) == 1:
458 458 return unknownheads | set(candidate_newhs), set()
459 459
460 460 # actually process branch replacement
461 461 while localcandidate:
462 462 nh = localcandidate.pop()
463 463 # run this check early to skip the evaluation of the whole branch
464 464 if (nh in futurecommon or ispublic(torev(nh))):
465 465 newhs.add(nh)
466 466 continue
467 467
468 468 # Get all revs/nodes on the branch exclusive to this head
469 469 # (already filtered heads are "ignored"))
470 470 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
471 471 nh, localcandidate, newhs)
472 472 branchnodes = [tonode(r) for r in branchrevs]
473 473
474 474 # The branch won't be hidden on the remote if
475 475 # * any part of it is public,
476 476 # * any part of it is considered part of the result by previous logic,
477 477 # * if we have no markers to push to obsolete it.
478 478 if (any(ispublic(r) for r in branchrevs)
479 479 or any(n in futurecommon for n in branchnodes)
480 480 or any(not hasoutmarker(n) for n in branchnodes)):
481 481 newhs.add(nh)
482 482 else:
483 483 # note: there is a corner case if there is a merge in the branch.
484 484 # we might end up with -more- heads. However, these heads are not
485 485 # "added" by the push, but more by the "removal" on the remote so I
486 486 # think is a okay to ignore them,
487 487 discarded.add(nh)
488 488 newhs |= unknownheads
489 489 return newhs, discarded
490 490
491 491 def pushingmarkerfor(obsstore, pushset, node):
492 492 """true if some markers are to be pushed for node
493 493
494 494 We cannot just look in to the pushed obsmarkers from the pushop because
495 495 discovery might have filtered relevant markers. In addition listing all
496 496 markers relevant to all changesets in the pushed set would be too expensive
497 497 (O(len(repo)))
498 498
499 499 (note: There are cache opportunity in this function. but it would requires
500 500 a two dimensional stack.)
501 501 """
502 502 successorsmarkers = obsstore.successors
503 503 stack = [node]
504 504 seen = set(stack)
505 505 while stack:
506 506 current = stack.pop()
507 507 if current in pushset:
508 508 return True
509 509 markers = successorsmarkers.get(current, ())
510 510 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
511 511 for m in markers:
512 512 nexts = m[1] # successors
513 513 if not nexts: # this is a prune marker
514 514 nexts = m[5] or () # parents
515 515 for n in nexts:
516 516 if n not in seen:
517 517 seen.add(n)
518 518 stack.append(n)
519 519 return False
General Comments 0
You need to be logged in to leave comments. Login now