##// END OF EJS Templates
checkheads: perform obsolescence post processing directly in _headssummary...
marmoute -
r32707:32c8f98a default
parent child Browse files
Show More
@@ -1,522 +1,522
1 1 # discovery.py - protocol changeset discovery functions
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11
12 12 from .i18n import _
13 13 from .node import (
14 14 hex,
15 15 nullid,
16 16 short,
17 17 )
18 18
19 19 from . import (
20 20 bookmarks,
21 21 branchmap,
22 22 error,
23 23 phases,
24 24 setdiscovery,
25 25 treediscovery,
26 26 util,
27 27 )
28 28
29 29 def findcommonincoming(repo, remote, heads=None, force=False):
30 30 """Return a tuple (common, anyincoming, heads) used to identify the common
31 31 subset of nodes between repo and remote.
32 32
33 33 "common" is a list of (at least) the heads of the common subset.
34 34 "anyincoming" is testable as a boolean indicating if any nodes are missing
35 35 locally. If remote does not support getbundle, this actually is a list of
36 36 roots of the nodes that would be incoming, to be supplied to
37 37 changegroupsubset. No code except for pull should be relying on this fact
38 38 any longer.
39 39 "heads" is either the supplied heads, or else the remote's heads.
40 40
41 41 If you pass heads and they are all known locally, the response lists just
42 42 these heads in "common" and in "heads".
43 43
44 44 Please use findcommonoutgoing to compute the set of outgoing nodes to give
45 45 extensions a good hook into outgoing.
46 46 """
47 47
48 48 if not remote.capable('getbundle'):
49 49 return treediscovery.findcommonincoming(repo, remote, heads, force)
50 50
51 51 if heads:
52 52 allknown = True
53 53 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
54 54 for h in heads:
55 55 if not knownnode(h):
56 56 allknown = False
57 57 break
58 58 if allknown:
59 59 return (heads, False, heads)
60 60
61 61 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
62 62 abortwhenunrelated=not force)
63 63 common, anyinc, srvheads = res
64 64 return (list(common), anyinc, heads or list(srvheads))
65 65
66 66 class outgoing(object):
67 67 '''Represents the set of nodes present in a local repo but not in a
68 68 (possibly) remote one.
69 69
70 70 Members:
71 71
72 72 missing is a list of all nodes present in local but not in remote.
73 73 common is a list of all nodes shared between the two repos.
74 74 excluded is the list of missing changeset that shouldn't be sent remotely.
75 75 missingheads is the list of heads of missing.
76 76 commonheads is the list of heads of common.
77 77
78 78 The sets are computed on demand from the heads, unless provided upfront
79 79 by discovery.'''
80 80
81 81 def __init__(self, repo, commonheads=None, missingheads=None,
82 82 missingroots=None):
83 83 # at least one of them must not be set
84 84 assert None in (commonheads, missingroots)
85 85 cl = repo.changelog
86 86 if missingheads is None:
87 87 missingheads = cl.heads()
88 88 if missingroots:
89 89 discbases = []
90 90 for n in missingroots:
91 91 discbases.extend([p for p in cl.parents(n) if p != nullid])
92 92 # TODO remove call to nodesbetween.
93 93 # TODO populate attributes on outgoing instance instead of setting
94 94 # discbases.
95 95 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
96 96 included = set(csets)
97 97 missingheads = heads
98 98 commonheads = [n for n in discbases if n not in included]
99 99 elif not commonheads:
100 100 commonheads = [nullid]
101 101 self.commonheads = commonheads
102 102 self.missingheads = missingheads
103 103 self._revlog = cl
104 104 self._common = None
105 105 self._missing = None
106 106 self.excluded = []
107 107
108 108 def _computecommonmissing(self):
109 109 sets = self._revlog.findcommonmissing(self.commonheads,
110 110 self.missingheads)
111 111 self._common, self._missing = sets
112 112
113 113 @util.propertycache
114 114 def common(self):
115 115 if self._common is None:
116 116 self._computecommonmissing()
117 117 return self._common
118 118
119 119 @util.propertycache
120 120 def missing(self):
121 121 if self._missing is None:
122 122 self._computecommonmissing()
123 123 return self._missing
124 124
125 125 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
126 126 commoninc=None, portable=False):
127 127 '''Return an outgoing instance to identify the nodes present in repo but
128 128 not in other.
129 129
130 130 If onlyheads is given, only nodes ancestral to nodes in onlyheads
131 131 (inclusive) are included. If you already know the local repo's heads,
132 132 passing them in onlyheads is faster than letting them be recomputed here.
133 133
134 134 If commoninc is given, it must be the result of a prior call to
135 135 findcommonincoming(repo, other, force) to avoid recomputing it here.
136 136
137 137 If portable is given, compute more conservative common and missingheads,
138 138 to make bundles created from the instance more portable.'''
139 139 # declare an empty outgoing object to be filled later
140 140 og = outgoing(repo, None, None)
141 141
142 142 # get common set if not provided
143 143 if commoninc is None:
144 144 commoninc = findcommonincoming(repo, other, force=force)
145 145 og.commonheads, _any, _hds = commoninc
146 146
147 147 # compute outgoing
148 148 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
149 149 if not mayexclude:
150 150 og.missingheads = onlyheads or repo.heads()
151 151 elif onlyheads is None:
152 152 # use visible heads as it should be cached
153 153 og.missingheads = repo.filtered("served").heads()
154 154 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
155 155 else:
156 156 # compute common, missing and exclude secret stuff
157 157 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
158 158 og._common, allmissing = sets
159 159 og._missing = missing = []
160 160 og.excluded = excluded = []
161 161 for node in allmissing:
162 162 ctx = repo[node]
163 163 if ctx.phase() >= phases.secret or ctx.extinct():
164 164 excluded.append(node)
165 165 else:
166 166 missing.append(node)
167 167 if len(missing) == len(allmissing):
168 168 missingheads = onlyheads
169 169 else: # update missing heads
170 170 missingheads = phases.newheads(repo, onlyheads, excluded)
171 171 og.missingheads = missingheads
172 172 if portable:
173 173 # recompute common and missingheads as if -r<rev> had been given for
174 174 # each head of missing, and --base <rev> for each head of the proper
175 175 # ancestors of missing
176 176 og._computecommonmissing()
177 177 cl = repo.changelog
178 178 missingrevs = set(cl.rev(n) for n in og._missing)
179 179 og._common = set(cl.ancestors(missingrevs)) - missingrevs
180 180 commonheads = set(og.commonheads)
181 181 og.missingheads = [h for h in og.missingheads if h not in commonheads]
182 182
183 183 return og
184 184
185 185 def _headssummary(pushop):
186 186 """compute a summary of branch and heads status before and after push
187 187
188 188 return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
189 189
190 190 - branch: the branch name
191 191 - remoteheads: the list of remote heads known locally
192 192 None if the branch is new
193 193 - newheads: the new remote heads (known locally) with outgoing pushed
194 194 - unsyncedheads: the list of remote heads unknown locally.
195 195 """
196 196 repo = pushop.repo.unfiltered()
197 197 remote = pushop.remote
198 198 outgoing = pushop.outgoing
199 199 cl = repo.changelog
200 200 headssum = {}
201 201 # A. Create set of branches involved in the push.
202 202 branches = set(repo[n].branch() for n in outgoing.missing)
203 203 remotemap = remote.branchmap()
204 204 newbranches = branches - set(remotemap)
205 205 branches.difference_update(newbranches)
206 206
207 207 # A. register remote heads
208 208 remotebranches = set()
209 209 for branch, heads in remote.branchmap().iteritems():
210 210 remotebranches.add(branch)
211 211 known = []
212 212 unsynced = []
213 213 knownnode = cl.hasnode # do not use nodemap until it is filtered
214 214 for h in heads:
215 215 if knownnode(h):
216 216 known.append(h)
217 217 else:
218 218 unsynced.append(h)
219 219 headssum[branch] = (known, list(known), unsynced)
220 220 # B. add new branch data
221 221 missingctx = list(repo[n] for n in outgoing.missing)
222 222 touchedbranches = set()
223 223 for ctx in missingctx:
224 224 branch = ctx.branch()
225 225 touchedbranches.add(branch)
226 226 if branch not in headssum:
227 227 headssum[branch] = (None, [], [])
228 228
229 229 # C drop data about untouched branches:
230 230 for branch in remotebranches - touchedbranches:
231 231 del headssum[branch]
232 232
233 233 # D. Update newmap with outgoing changes.
234 234 # This will possibly add new heads and remove existing ones.
235 235 newmap = branchmap.branchcache((branch, heads[1])
236 236 for branch, heads in headssum.iteritems()
237 237 if heads[0] is not None)
238 238 newmap.update(repo, (ctx.rev() for ctx in missingctx))
239 239 for branch, newheads in newmap.iteritems():
240 240 headssum[branch][1][:] = newheads
241 241 for branch, items in headssum.iteritems():
242 242 for l in items:
243 243 if l is not None:
244 244 l.sort()
245 # If there are no obsstore, no post processing are needed.
246 if repo.obsstore:
247 allmissing = set(outgoing.missing)
248 cctx = repo.set('%ld', outgoing.common)
249 allfuturecommon = set(c.node() for c in cctx)
250 allfuturecommon.update(allmissing)
251 for branch, heads in sorted(headssum.iteritems()):
252 remoteheads, newheads, unsyncedheads = heads
253 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
254 newheads = sorted(result[0])
255 headssum[branch] = (remoteheads, newheads, unsyncedheads)
245 256 return headssum
246 257
247 258 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
248 259 """Compute branchmapsummary for repo without branchmap support"""
249 260
250 261 # 1-4b. old servers: Check for new topological heads.
251 262 # Construct {old,new}map with branch = None (topological branch).
252 263 # (code based on update)
253 264 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
254 265 oldheads = sorted(h for h in remoteheads if knownnode(h))
255 266 # all nodes in outgoing.missing are children of either:
256 267 # - an element of oldheads
257 268 # - another element of outgoing.missing
258 269 # - nullrev
259 270 # This explains why the new head are very simple to compute.
260 271 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
261 272 newheads = sorted(c.node() for c in r)
262 273 # set some unsynced head to issue the "unsynced changes" warning
263 274 if inc:
264 275 unsynced = [None]
265 276 else:
266 277 unsynced = []
267 278 return {None: (oldheads, newheads, unsynced)}
268 279
269 280 def _nowarnheads(pushop):
270 281 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
271 282 repo = pushop.repo.unfiltered()
272 283 remote = pushop.remote
273 284 localbookmarks = repo._bookmarks
274 285 remotebookmarks = remote.listkeys('bookmarks')
275 286 bookmarkedheads = set()
276 287
277 288 # internal config: bookmarks.pushing
278 289 newbookmarks = [localbookmarks.expandname(b)
279 290 for b in pushop.ui.configlist('bookmarks', 'pushing')]
280 291
281 292 for bm in localbookmarks:
282 293 rnode = remotebookmarks.get(bm)
283 294 if rnode and rnode in repo:
284 295 lctx, rctx = repo[bm], repo[rnode]
285 296 if bookmarks.validdest(repo, rctx, lctx):
286 297 bookmarkedheads.add(lctx.node())
287 298 else:
288 299 if bm in newbookmarks and bm not in remotebookmarks:
289 300 bookmarkedheads.add(repo[bm].node())
290 301
291 302 return bookmarkedheads
292 303
293 304 def checkheads(pushop):
294 305 """Check that a push won't add any outgoing head
295 306
296 307 raise Abort error and display ui message as needed.
297 308 """
298 309
299 310 repo = pushop.repo.unfiltered()
300 311 remote = pushop.remote
301 312 outgoing = pushop.outgoing
302 313 remoteheads = pushop.remoteheads
303 314 newbranch = pushop.newbranch
304 315 inc = bool(pushop.incoming)
305 316
306 317 # Check for each named branch if we're creating new remote heads.
307 318 # To be a remote head after push, node must be either:
308 319 # - unknown locally
309 320 # - a local outgoing head descended from update
310 321 # - a remote head that's known locally and not
311 322 # ancestral to an outgoing head
312 323 if remoteheads == [nullid]:
313 324 # remote is empty, nothing to check.
314 325 return
315 326
316 327 if remote.capable('branchmap'):
317 328 headssum = _headssummary(pushop)
318 329 else:
319 330 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
320 331 newbranches = [branch for branch, heads in headssum.iteritems()
321 332 if heads[0] is None]
322 333 # 1. Check for new branches on the remote.
323 334 if newbranches and not newbranch: # new branch requires --new-branch
324 335 branchnames = ', '.join(sorted(newbranches))
325 336 raise error.Abort(_("push creates new remote branches: %s!")
326 337 % branchnames,
327 338 hint=_("use 'hg push --new-branch' to create"
328 339 " new remote branches"))
329 340
330 341 # 2. Find heads that we need not warn about
331 342 nowarnheads = _nowarnheads(pushop)
332 343
333 344 # 3. Check for new heads.
334 345 # If there are more heads after the push than before, a suitable
335 346 # error message, depending on unsynced status, is displayed.
336 347 errormsg = None
337 # If there are no obsstore, no post-processing are needed.
338 if repo.obsstore:
339 allmissing = set(outgoing.missing)
340 cctx = repo.set('%ld', outgoing.common)
341 allfuturecommon = set(c.node() for c in cctx)
342 allfuturecommon.update(allmissing)
343 for branch, heads in sorted(headssum.iteritems()):
344 remoteheads, newheads, unsyncedheads = heads
345 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
346 newheads = sorted(result[0])
347 headssum[branch] = (remoteheads, newheads, unsyncedheads)
348 348 for branch, heads in sorted(headssum.iteritems()):
349 349 remoteheads, newheads, unsyncedheads = heads
350 350 # add unsynced data
351 351 if remoteheads is None:
352 352 oldhs = set()
353 353 else:
354 354 oldhs = set(remoteheads)
355 355 oldhs.update(unsyncedheads)
356 356 dhs = None # delta heads, the new heads on branch
357 357 newhs = set(newheads)
358 358 newhs.update(unsyncedheads)
359 359 if unsyncedheads:
360 360 if None in unsyncedheads:
361 361 # old remote, no heads data
362 362 heads = None
363 363 elif len(unsyncedheads) <= 4 or repo.ui.verbose:
364 364 heads = ' '.join(short(h) for h in unsyncedheads)
365 365 else:
366 366 heads = (' '.join(short(h) for h in unsyncedheads[:4]) +
367 367 ' ' + _("and %s others") % (len(unsyncedheads) - 4))
368 368 if heads is None:
369 369 repo.ui.status(_("remote has heads that are "
370 370 "not known locally\n"))
371 371 elif branch is None:
372 372 repo.ui.status(_("remote has heads that are "
373 373 "not known locally: %s\n") % heads)
374 374 else:
375 375 repo.ui.status(_("remote has heads on branch '%s' that are "
376 376 "not known locally: %s\n") % (branch, heads))
377 377 if remoteheads is None:
378 378 if len(newhs) > 1:
379 379 dhs = list(newhs)
380 380 if errormsg is None:
381 381 errormsg = (_("push creates new branch '%s' "
382 382 "with multiple heads") % (branch))
383 383 hint = _("merge or"
384 384 " see 'hg help push' for details about"
385 385 " pushing new heads")
386 386 elif len(newhs) > len(oldhs):
387 387 # remove bookmarked or existing remote heads from the new heads list
388 388 dhs = sorted(newhs - nowarnheads - oldhs)
389 389 if dhs:
390 390 if errormsg is None:
391 391 if branch not in ('default', None):
392 392 errormsg = _("push creates new remote head %s "
393 393 "on branch '%s'!") % (short(dhs[0]), branch)
394 394 elif repo[dhs[0]].bookmarks():
395 395 errormsg = _("push creates new remote head %s "
396 396 "with bookmark '%s'!") % (
397 397 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
398 398 else:
399 399 errormsg = _("push creates new remote head %s!"
400 400 ) % short(dhs[0])
401 401 if unsyncedheads:
402 402 hint = _("pull and merge or"
403 403 " see 'hg help push' for details about"
404 404 " pushing new heads")
405 405 else:
406 406 hint = _("merge or"
407 407 " see 'hg help push' for details about"
408 408 " pushing new heads")
409 409 if branch is None:
410 410 repo.ui.note(_("new remote heads:\n"))
411 411 else:
412 412 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
413 413 for h in dhs:
414 414 repo.ui.note((" %s\n") % short(h))
415 415 if errormsg:
416 416 raise error.Abort(errormsg, hint=hint)
417 417
418 418 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
419 419 """post process the list of new heads with obsolescence information
420 420
421 421 Exists as a sub-function to contain the complexity and allow extensions to
422 422 experiment with smarter logic.
423 423
424 424 Returns (newheads, discarded_heads) tuple
425 425 """
426 426 # known issue
427 427 #
428 428 # * We "silently" skip processing on all changeset unknown locally
429 429 #
430 430 # * if <nh> is public on the remote, it won't be affected by obsolete
431 431 # marker and a new is created
432 432
433 433 # define various utilities and containers
434 434 repo = pushop.repo
435 435 unfi = repo.unfiltered()
436 436 tonode = unfi.changelog.node
437 437 torev = unfi.changelog.rev
438 438 public = phases.public
439 439 getphase = unfi._phasecache.phase
440 440 ispublic = (lambda r: getphase(unfi, r) == public)
441 441 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore,
442 442 futurecommon)
443 443 successorsmarkers = unfi.obsstore.successors
444 444 newhs = set() # final set of new heads
445 445 discarded = set() # new head of fully replaced branch
446 446
447 447 localcandidate = set() # candidate heads known locally
448 448 unknownheads = set() # candidate heads unknown locally
449 449 for h in candidate_newhs:
450 450 if h in unfi:
451 451 localcandidate.add(h)
452 452 else:
453 453 if successorsmarkers.get(h) is not None:
454 454 msg = ('checkheads: remote head unknown locally has'
455 455 ' local marker: %s\n')
456 456 repo.ui.debug(msg % hex(h))
457 457 unknownheads.add(h)
458 458
459 459 # fast path the simple case
460 460 if len(localcandidate) == 1:
461 461 return unknownheads | set(candidate_newhs), set()
462 462
463 463 # actually process branch replacement
464 464 while localcandidate:
465 465 nh = localcandidate.pop()
466 466 # run this check early to skip the evaluation of the whole branch
467 467 if (nh in futurecommon or ispublic(torev(nh))):
468 468 newhs.add(nh)
469 469 continue
470 470
471 471 # Get all revs/nodes on the branch exclusive to this head
472 472 # (already filtered heads are "ignored"))
473 473 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
474 474 nh, localcandidate, newhs)
475 475 branchnodes = [tonode(r) for r in branchrevs]
476 476
477 477 # The branch won't be hidden on the remote if
478 478 # * any part of it is public,
479 479 # * any part of it is considered part of the result by previous logic,
480 480 # * if we have no markers to push to obsolete it.
481 481 if (any(ispublic(r) for r in branchrevs)
482 482 or any(n in futurecommon for n in branchnodes)
483 483 or any(not hasoutmarker(n) for n in branchnodes)):
484 484 newhs.add(nh)
485 485 else:
486 486 # note: there is a corner case if there is a merge in the branch.
487 487 # we might end up with -more- heads. However, these heads are not
488 488 # "added" by the push, but more by the "removal" on the remote so I
489 489 # think is a okay to ignore them,
490 490 discarded.add(nh)
491 491 newhs |= unknownheads
492 492 return newhs, discarded
493 493
494 494 def pushingmarkerfor(obsstore, pushset, node):
495 495 """true if some markers are to be pushed for node
496 496
497 497 We cannot just look in to the pushed obsmarkers from the pushop because
498 498 discovery might have filtered relevant markers. In addition listing all
499 499 markers relevant to all changesets in the pushed set would be too expensive
500 500 (O(len(repo)))
501 501
502 502 (note: There are cache opportunity in this function. but it would requires
503 503 a two dimensional stack.)
504 504 """
505 505 successorsmarkers = obsstore.successors
506 506 stack = [node]
507 507 seen = set(stack)
508 508 while stack:
509 509 current = stack.pop()
510 510 if current in pushset:
511 511 return True
512 512 markers = successorsmarkers.get(current, ())
513 513 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
514 514 for m in markers:
515 515 nexts = m[1] # successors
516 516 if not nexts: # this is a prune marker
517 517 nexts = m[5] or () # parents
518 518 for n in nexts:
519 519 if n not in seen:
520 520 seen.add(n)
521 521 stack.append(n)
522 522 return False
General Comments 0
You need to be logged in to leave comments. Login now