##// END OF EJS Templates
headsummary: expose the 'discardedheads' set in the headssummary...
marmoute -
r32708:90cb4ec8 default
parent child Browse files
Show More
@@ -1,522 +1,526 b''
1 1 # discovery.py - protocol changeset discovery functions
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11
12 12 from .i18n import _
13 13 from .node import (
14 14 hex,
15 15 nullid,
16 16 short,
17 17 )
18 18
19 19 from . import (
20 20 bookmarks,
21 21 branchmap,
22 22 error,
23 23 phases,
24 24 setdiscovery,
25 25 treediscovery,
26 26 util,
27 27 )
28 28
29 29 def findcommonincoming(repo, remote, heads=None, force=False):
30 30 """Return a tuple (common, anyincoming, heads) used to identify the common
31 31 subset of nodes between repo and remote.
32 32
33 33 "common" is a list of (at least) the heads of the common subset.
34 34 "anyincoming" is testable as a boolean indicating if any nodes are missing
35 35 locally. If remote does not support getbundle, this actually is a list of
36 36 roots of the nodes that would be incoming, to be supplied to
37 37 changegroupsubset. No code except for pull should be relying on this fact
38 38 any longer.
39 39 "heads" is either the supplied heads, or else the remote's heads.
40 40
41 41 If you pass heads and they are all known locally, the response lists just
42 42 these heads in "common" and in "heads".
43 43
44 44 Please use findcommonoutgoing to compute the set of outgoing nodes to give
45 45 extensions a good hook into outgoing.
46 46 """
47 47
48 48 if not remote.capable('getbundle'):
49 49 return treediscovery.findcommonincoming(repo, remote, heads, force)
50 50
51 51 if heads:
52 52 allknown = True
53 53 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
54 54 for h in heads:
55 55 if not knownnode(h):
56 56 allknown = False
57 57 break
58 58 if allknown:
59 59 return (heads, False, heads)
60 60
61 61 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
62 62 abortwhenunrelated=not force)
63 63 common, anyinc, srvheads = res
64 64 return (list(common), anyinc, heads or list(srvheads))
65 65
66 66 class outgoing(object):
67 67 '''Represents the set of nodes present in a local repo but not in a
68 68 (possibly) remote one.
69 69
70 70 Members:
71 71
72 72 missing is a list of all nodes present in local but not in remote.
73 73 common is a list of all nodes shared between the two repos.
74 74 excluded is the list of missing changeset that shouldn't be sent remotely.
75 75 missingheads is the list of heads of missing.
76 76 commonheads is the list of heads of common.
77 77
78 78 The sets are computed on demand from the heads, unless provided upfront
79 79 by discovery.'''
80 80
81 81 def __init__(self, repo, commonheads=None, missingheads=None,
82 82 missingroots=None):
83 83 # at least one of them must not be set
84 84 assert None in (commonheads, missingroots)
85 85 cl = repo.changelog
86 86 if missingheads is None:
87 87 missingheads = cl.heads()
88 88 if missingroots:
89 89 discbases = []
90 90 for n in missingroots:
91 91 discbases.extend([p for p in cl.parents(n) if p != nullid])
92 92 # TODO remove call to nodesbetween.
93 93 # TODO populate attributes on outgoing instance instead of setting
94 94 # discbases.
95 95 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
96 96 included = set(csets)
97 97 missingheads = heads
98 98 commonheads = [n for n in discbases if n not in included]
99 99 elif not commonheads:
100 100 commonheads = [nullid]
101 101 self.commonheads = commonheads
102 102 self.missingheads = missingheads
103 103 self._revlog = cl
104 104 self._common = None
105 105 self._missing = None
106 106 self.excluded = []
107 107
108 108 def _computecommonmissing(self):
109 109 sets = self._revlog.findcommonmissing(self.commonheads,
110 110 self.missingheads)
111 111 self._common, self._missing = sets
112 112
113 113 @util.propertycache
114 114 def common(self):
115 115 if self._common is None:
116 116 self._computecommonmissing()
117 117 return self._common
118 118
119 119 @util.propertycache
120 120 def missing(self):
121 121 if self._missing is None:
122 122 self._computecommonmissing()
123 123 return self._missing
124 124
125 125 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
126 126 commoninc=None, portable=False):
127 127 '''Return an outgoing instance to identify the nodes present in repo but
128 128 not in other.
129 129
130 130 If onlyheads is given, only nodes ancestral to nodes in onlyheads
131 131 (inclusive) are included. If you already know the local repo's heads,
132 132 passing them in onlyheads is faster than letting them be recomputed here.
133 133
134 134 If commoninc is given, it must be the result of a prior call to
135 135 findcommonincoming(repo, other, force) to avoid recomputing it here.
136 136
137 137 If portable is given, compute more conservative common and missingheads,
138 138 to make bundles created from the instance more portable.'''
139 139 # declare an empty outgoing object to be filled later
140 140 og = outgoing(repo, None, None)
141 141
142 142 # get common set if not provided
143 143 if commoninc is None:
144 144 commoninc = findcommonincoming(repo, other, force=force)
145 145 og.commonheads, _any, _hds = commoninc
146 146
147 147 # compute outgoing
148 148 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
149 149 if not mayexclude:
150 150 og.missingheads = onlyheads or repo.heads()
151 151 elif onlyheads is None:
152 152 # use visible heads as it should be cached
153 153 og.missingheads = repo.filtered("served").heads()
154 154 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
155 155 else:
156 156 # compute common, missing and exclude secret stuff
157 157 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
158 158 og._common, allmissing = sets
159 159 og._missing = missing = []
160 160 og.excluded = excluded = []
161 161 for node in allmissing:
162 162 ctx = repo[node]
163 163 if ctx.phase() >= phases.secret or ctx.extinct():
164 164 excluded.append(node)
165 165 else:
166 166 missing.append(node)
167 167 if len(missing) == len(allmissing):
168 168 missingheads = onlyheads
169 169 else: # update missing heads
170 170 missingheads = phases.newheads(repo, onlyheads, excluded)
171 171 og.missingheads = missingheads
172 172 if portable:
173 173 # recompute common and missingheads as if -r<rev> had been given for
174 174 # each head of missing, and --base <rev> for each head of the proper
175 175 # ancestors of missing
176 176 og._computecommonmissing()
177 177 cl = repo.changelog
178 178 missingrevs = set(cl.rev(n) for n in og._missing)
179 179 og._common = set(cl.ancestors(missingrevs)) - missingrevs
180 180 commonheads = set(og.commonheads)
181 181 og.missingheads = [h for h in og.missingheads if h not in commonheads]
182 182
183 183 return og
184 184
185 185 def _headssummary(pushop):
186 186 """compute a summary of branch and heads status before and after push
187 187
188 return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
188 return {'branch': ([remoteheads], [newheads],
189 [unsyncedheads], [discardedheads])} mapping
189 190
190 - branch: the branch name
191 - branch: the branch name,
191 192 - remoteheads: the list of remote heads known locally
192 None if the branch is new
193 - newheads: the new remote heads (known locally) with outgoing pushed
194 - unsyncedheads: the list of remote heads unknown locally.
193 None if the branch is new,
194 - newheads: the new remote heads (known locally) with outgoing pushed,
195 - unsyncedheads: the list of remote heads unknown locally,
196 - discardedheads: the list of heads made obsolete by the push.
195 197 """
196 198 repo = pushop.repo.unfiltered()
197 199 remote = pushop.remote
198 200 outgoing = pushop.outgoing
199 201 cl = repo.changelog
200 202 headssum = {}
201 203 # A. Create set of branches involved in the push.
202 204 branches = set(repo[n].branch() for n in outgoing.missing)
203 205 remotemap = remote.branchmap()
204 206 newbranches = branches - set(remotemap)
205 207 branches.difference_update(newbranches)
206 208
207 209 # A. register remote heads
208 210 remotebranches = set()
209 211 for branch, heads in remote.branchmap().iteritems():
210 212 remotebranches.add(branch)
211 213 known = []
212 214 unsynced = []
213 215 knownnode = cl.hasnode # do not use nodemap until it is filtered
214 216 for h in heads:
215 217 if knownnode(h):
216 218 known.append(h)
217 219 else:
218 220 unsynced.append(h)
219 221 headssum[branch] = (known, list(known), unsynced)
220 222 # B. add new branch data
221 223 missingctx = list(repo[n] for n in outgoing.missing)
222 224 touchedbranches = set()
223 225 for ctx in missingctx:
224 226 branch = ctx.branch()
225 227 touchedbranches.add(branch)
226 228 if branch not in headssum:
227 229 headssum[branch] = (None, [], [])
228 230
229 231 # C drop data about untouched branches:
230 232 for branch in remotebranches - touchedbranches:
231 233 del headssum[branch]
232 234
233 235 # D. Update newmap with outgoing changes.
234 236 # This will possibly add new heads and remove existing ones.
235 237 newmap = branchmap.branchcache((branch, heads[1])
236 238 for branch, heads in headssum.iteritems()
237 239 if heads[0] is not None)
238 240 newmap.update(repo, (ctx.rev() for ctx in missingctx))
239 241 for branch, newheads in newmap.iteritems():
240 242 headssum[branch][1][:] = newheads
241 243 for branch, items in headssum.iteritems():
242 244 for l in items:
243 245 if l is not None:
244 246 l.sort()
247 headssum[branch] = items + ([],)
248
245 249 # If there are no obsstore, no post processing are needed.
246 250 if repo.obsstore:
247 251 allmissing = set(outgoing.missing)
248 252 cctx = repo.set('%ld', outgoing.common)
249 253 allfuturecommon = set(c.node() for c in cctx)
250 254 allfuturecommon.update(allmissing)
251 255 for branch, heads in sorted(headssum.iteritems()):
252 remoteheads, newheads, unsyncedheads = heads
256 remoteheads, newheads, unsyncedheads, placeholder = heads
253 257 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
254 newheads = sorted(result[0])
255 headssum[branch] = (remoteheads, newheads, unsyncedheads)
258 headssum[branch] = (remoteheads, sorted(result[0]), unsyncedheads,
259 sorted(result[1]))
256 260 return headssum
257 261
258 262 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
259 263 """Compute branchmapsummary for repo without branchmap support"""
260 264
261 265 # 1-4b. old servers: Check for new topological heads.
262 266 # Construct {old,new}map with branch = None (topological branch).
263 267 # (code based on update)
264 268 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
265 269 oldheads = sorted(h for h in remoteheads if knownnode(h))
266 270 # all nodes in outgoing.missing are children of either:
267 271 # - an element of oldheads
268 272 # - another element of outgoing.missing
269 273 # - nullrev
270 274 # This explains why the new head are very simple to compute.
271 275 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
272 276 newheads = sorted(c.node() for c in r)
273 277 # set some unsynced head to issue the "unsynced changes" warning
274 278 if inc:
275 279 unsynced = [None]
276 280 else:
277 281 unsynced = []
278 return {None: (oldheads, newheads, unsynced)}
282 return {None: (oldheads, newheads, unsynced, [])}
279 283
280 284 def _nowarnheads(pushop):
281 285 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
282 286 repo = pushop.repo.unfiltered()
283 287 remote = pushop.remote
284 288 localbookmarks = repo._bookmarks
285 289 remotebookmarks = remote.listkeys('bookmarks')
286 290 bookmarkedheads = set()
287 291
288 292 # internal config: bookmarks.pushing
289 293 newbookmarks = [localbookmarks.expandname(b)
290 294 for b in pushop.ui.configlist('bookmarks', 'pushing')]
291 295
292 296 for bm in localbookmarks:
293 297 rnode = remotebookmarks.get(bm)
294 298 if rnode and rnode in repo:
295 299 lctx, rctx = repo[bm], repo[rnode]
296 300 if bookmarks.validdest(repo, rctx, lctx):
297 301 bookmarkedheads.add(lctx.node())
298 302 else:
299 303 if bm in newbookmarks and bm not in remotebookmarks:
300 304 bookmarkedheads.add(repo[bm].node())
301 305
302 306 return bookmarkedheads
303 307
304 308 def checkheads(pushop):
305 309 """Check that a push won't add any outgoing head
306 310
307 311 raise Abort error and display ui message as needed.
308 312 """
309 313
310 314 repo = pushop.repo.unfiltered()
311 315 remote = pushop.remote
312 316 outgoing = pushop.outgoing
313 317 remoteheads = pushop.remoteheads
314 318 newbranch = pushop.newbranch
315 319 inc = bool(pushop.incoming)
316 320
317 321 # Check for each named branch if we're creating new remote heads.
318 322 # To be a remote head after push, node must be either:
319 323 # - unknown locally
320 324 # - a local outgoing head descended from update
321 325 # - a remote head that's known locally and not
322 326 # ancestral to an outgoing head
323 327 if remoteheads == [nullid]:
324 328 # remote is empty, nothing to check.
325 329 return
326 330
327 331 if remote.capable('branchmap'):
328 332 headssum = _headssummary(pushop)
329 333 else:
330 334 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
331 335 newbranches = [branch for branch, heads in headssum.iteritems()
332 336 if heads[0] is None]
333 337 # 1. Check for new branches on the remote.
334 338 if newbranches and not newbranch: # new branch requires --new-branch
335 339 branchnames = ', '.join(sorted(newbranches))
336 340 raise error.Abort(_("push creates new remote branches: %s!")
337 341 % branchnames,
338 342 hint=_("use 'hg push --new-branch' to create"
339 343 " new remote branches"))
340 344
341 345 # 2. Find heads that we need not warn about
342 346 nowarnheads = _nowarnheads(pushop)
343 347
344 348 # 3. Check for new heads.
345 349 # If there are more heads after the push than before, a suitable
346 350 # error message, depending on unsynced status, is displayed.
347 351 errormsg = None
348 352 for branch, heads in sorted(headssum.iteritems()):
349 remoteheads, newheads, unsyncedheads = heads
353 remoteheads, newheads, unsyncedheads, discardedheads = heads
350 354 # add unsynced data
351 355 if remoteheads is None:
352 356 oldhs = set()
353 357 else:
354 358 oldhs = set(remoteheads)
355 359 oldhs.update(unsyncedheads)
356 360 dhs = None # delta heads, the new heads on branch
357 361 newhs = set(newheads)
358 362 newhs.update(unsyncedheads)
359 363 if unsyncedheads:
360 364 if None in unsyncedheads:
361 365 # old remote, no heads data
362 366 heads = None
363 367 elif len(unsyncedheads) <= 4 or repo.ui.verbose:
364 368 heads = ' '.join(short(h) for h in unsyncedheads)
365 369 else:
366 370 heads = (' '.join(short(h) for h in unsyncedheads[:4]) +
367 371 ' ' + _("and %s others") % (len(unsyncedheads) - 4))
368 372 if heads is None:
369 373 repo.ui.status(_("remote has heads that are "
370 374 "not known locally\n"))
371 375 elif branch is None:
372 376 repo.ui.status(_("remote has heads that are "
373 377 "not known locally: %s\n") % heads)
374 378 else:
375 379 repo.ui.status(_("remote has heads on branch '%s' that are "
376 380 "not known locally: %s\n") % (branch, heads))
377 381 if remoteheads is None:
378 382 if len(newhs) > 1:
379 383 dhs = list(newhs)
380 384 if errormsg is None:
381 385 errormsg = (_("push creates new branch '%s' "
382 386 "with multiple heads") % (branch))
383 387 hint = _("merge or"
384 388 " see 'hg help push' for details about"
385 389 " pushing new heads")
386 390 elif len(newhs) > len(oldhs):
387 391 # remove bookmarked or existing remote heads from the new heads list
388 392 dhs = sorted(newhs - nowarnheads - oldhs)
389 393 if dhs:
390 394 if errormsg is None:
391 395 if branch not in ('default', None):
392 396 errormsg = _("push creates new remote head %s "
393 397 "on branch '%s'!") % (short(dhs[0]), branch)
394 398 elif repo[dhs[0]].bookmarks():
395 399 errormsg = _("push creates new remote head %s "
396 400 "with bookmark '%s'!") % (
397 401 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
398 402 else:
399 403 errormsg = _("push creates new remote head %s!"
400 404 ) % short(dhs[0])
401 405 if unsyncedheads:
402 406 hint = _("pull and merge or"
403 407 " see 'hg help push' for details about"
404 408 " pushing new heads")
405 409 else:
406 410 hint = _("merge or"
407 411 " see 'hg help push' for details about"
408 412 " pushing new heads")
409 413 if branch is None:
410 414 repo.ui.note(_("new remote heads:\n"))
411 415 else:
412 416 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
413 417 for h in dhs:
414 418 repo.ui.note((" %s\n") % short(h))
415 419 if errormsg:
416 420 raise error.Abort(errormsg, hint=hint)
417 421
418 422 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
419 423 """post process the list of new heads with obsolescence information
420 424
421 425 Exists as a sub-function to contain the complexity and allow extensions to
422 426 experiment with smarter logic.
423 427
424 428 Returns (newheads, discarded_heads) tuple
425 429 """
426 430 # known issue
427 431 #
428 432 # * We "silently" skip processing on all changeset unknown locally
429 433 #
430 434 # * if <nh> is public on the remote, it won't be affected by obsolete
431 435 # marker and a new is created
432 436
433 437 # define various utilities and containers
434 438 repo = pushop.repo
435 439 unfi = repo.unfiltered()
436 440 tonode = unfi.changelog.node
437 441 torev = unfi.changelog.rev
438 442 public = phases.public
439 443 getphase = unfi._phasecache.phase
440 444 ispublic = (lambda r: getphase(unfi, r) == public)
441 445 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore,
442 446 futurecommon)
443 447 successorsmarkers = unfi.obsstore.successors
444 448 newhs = set() # final set of new heads
445 449 discarded = set() # new head of fully replaced branch
446 450
447 451 localcandidate = set() # candidate heads known locally
448 452 unknownheads = set() # candidate heads unknown locally
449 453 for h in candidate_newhs:
450 454 if h in unfi:
451 455 localcandidate.add(h)
452 456 else:
453 457 if successorsmarkers.get(h) is not None:
454 458 msg = ('checkheads: remote head unknown locally has'
455 459 ' local marker: %s\n')
456 460 repo.ui.debug(msg % hex(h))
457 461 unknownheads.add(h)
458 462
459 463 # fast path the simple case
460 464 if len(localcandidate) == 1:
461 465 return unknownheads | set(candidate_newhs), set()
462 466
463 467 # actually process branch replacement
464 468 while localcandidate:
465 469 nh = localcandidate.pop()
466 470 # run this check early to skip the evaluation of the whole branch
467 471 if (nh in futurecommon or ispublic(torev(nh))):
468 472 newhs.add(nh)
469 473 continue
470 474
471 475 # Get all revs/nodes on the branch exclusive to this head
472 476 # (already filtered heads are "ignored"))
473 477 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
474 478 nh, localcandidate, newhs)
475 479 branchnodes = [tonode(r) for r in branchrevs]
476 480
477 481 # The branch won't be hidden on the remote if
478 482 # * any part of it is public,
479 483 # * any part of it is considered part of the result by previous logic,
480 484 # * if we have no markers to push to obsolete it.
481 485 if (any(ispublic(r) for r in branchrevs)
482 486 or any(n in futurecommon for n in branchnodes)
483 487 or any(not hasoutmarker(n) for n in branchnodes)):
484 488 newhs.add(nh)
485 489 else:
486 490 # note: there is a corner case if there is a merge in the branch.
487 491 # we might end up with -more- heads. However, these heads are not
488 492 # "added" by the push, but more by the "removal" on the remote so I
489 493 # think is a okay to ignore them,
490 494 discarded.add(nh)
491 495 newhs |= unknownheads
492 496 return newhs, discarded
493 497
494 498 def pushingmarkerfor(obsstore, pushset, node):
495 499 """true if some markers are to be pushed for node
496 500
497 501 We cannot just look in to the pushed obsmarkers from the pushop because
498 502 discovery might have filtered relevant markers. In addition listing all
499 503 markers relevant to all changesets in the pushed set would be too expensive
500 504 (O(len(repo)))
501 505
502 506 (note: There are cache opportunity in this function. but it would requires
503 507 a two dimensional stack.)
504 508 """
505 509 successorsmarkers = obsstore.successors
506 510 stack = [node]
507 511 seen = set(stack)
508 512 while stack:
509 513 current = stack.pop()
510 514 if current in pushset:
511 515 return True
512 516 markers = successorsmarkers.get(current, ())
513 517 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
514 518 for m in markers:
515 519 nexts = m[1] # successors
516 520 if not nexts: # this is a prune marker
517 521 nexts = m[5] or () # parents
518 522 for n in nexts:
519 523 if n not in seen:
520 524 seen.add(n)
521 525 stack.append(n)
522 526 return False
General Comments 0
You need to be logged in to leave comments. Login now