##// END OF EJS Templates
checkheads: drop now unused filtering of 'unsyncedheads'...
marmoute -
r32674:7a7c4f3a default
parent child Browse files
Show More
@@ -1,522 +1,521 b''
1 1 # discovery.py - protocol changeset discovery functions
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11
12 12 from .i18n import _
13 13 from .node import (
14 14 hex,
15 15 nullid,
16 16 short,
17 17 )
18 18
19 19 from . import (
20 20 bookmarks,
21 21 branchmap,
22 22 error,
23 23 phases,
24 24 setdiscovery,
25 25 treediscovery,
26 26 util,
27 27 )
28 28
29 29 def findcommonincoming(repo, remote, heads=None, force=False):
30 30 """Return a tuple (common, anyincoming, heads) used to identify the common
31 31 subset of nodes between repo and remote.
32 32
33 33 "common" is a list of (at least) the heads of the common subset.
34 34 "anyincoming" is testable as a boolean indicating if any nodes are missing
35 35 locally. If remote does not support getbundle, this actually is a list of
36 36 roots of the nodes that would be incoming, to be supplied to
37 37 changegroupsubset. No code except for pull should be relying on this fact
38 38 any longer.
39 39 "heads" is either the supplied heads, or else the remote's heads.
40 40
41 41 If you pass heads and they are all known locally, the response lists just
42 42 these heads in "common" and in "heads".
43 43
44 44 Please use findcommonoutgoing to compute the set of outgoing nodes to give
45 45 extensions a good hook into outgoing.
46 46 """
47 47
48 48 if not remote.capable('getbundle'):
49 49 return treediscovery.findcommonincoming(repo, remote, heads, force)
50 50
51 51 if heads:
52 52 allknown = True
53 53 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
54 54 for h in heads:
55 55 if not knownnode(h):
56 56 allknown = False
57 57 break
58 58 if allknown:
59 59 return (heads, False, heads)
60 60
61 61 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
62 62 abortwhenunrelated=not force)
63 63 common, anyinc, srvheads = res
64 64 return (list(common), anyinc, heads or list(srvheads))
65 65
66 66 class outgoing(object):
67 67 '''Represents the set of nodes present in a local repo but not in a
68 68 (possibly) remote one.
69 69
70 70 Members:
71 71
72 72 missing is a list of all nodes present in local but not in remote.
73 73 common is a list of all nodes shared between the two repos.
74 74 excluded is the list of missing changeset that shouldn't be sent remotely.
75 75 missingheads is the list of heads of missing.
76 76 commonheads is the list of heads of common.
77 77
78 78 The sets are computed on demand from the heads, unless provided upfront
79 79 by discovery.'''
80 80
81 81 def __init__(self, repo, commonheads=None, missingheads=None,
82 82 missingroots=None):
83 83 # at least one of them must not be set
84 84 assert None in (commonheads, missingroots)
85 85 cl = repo.changelog
86 86 if missingheads is None:
87 87 missingheads = cl.heads()
88 88 if missingroots:
89 89 discbases = []
90 90 for n in missingroots:
91 91 discbases.extend([p for p in cl.parents(n) if p != nullid])
92 92 # TODO remove call to nodesbetween.
93 93 # TODO populate attributes on outgoing instance instead of setting
94 94 # discbases.
95 95 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
96 96 included = set(csets)
97 97 missingheads = heads
98 98 commonheads = [n for n in discbases if n not in included]
99 99 elif not commonheads:
100 100 commonheads = [nullid]
101 101 self.commonheads = commonheads
102 102 self.missingheads = missingheads
103 103 self._revlog = cl
104 104 self._common = None
105 105 self._missing = None
106 106 self.excluded = []
107 107
108 108 def _computecommonmissing(self):
109 109 sets = self._revlog.findcommonmissing(self.commonheads,
110 110 self.missingheads)
111 111 self._common, self._missing = sets
112 112
113 113 @util.propertycache
114 114 def common(self):
115 115 if self._common is None:
116 116 self._computecommonmissing()
117 117 return self._common
118 118
119 119 @util.propertycache
120 120 def missing(self):
121 121 if self._missing is None:
122 122 self._computecommonmissing()
123 123 return self._missing
124 124
125 125 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
126 126 commoninc=None, portable=False):
127 127 '''Return an outgoing instance to identify the nodes present in repo but
128 128 not in other.
129 129
130 130 If onlyheads is given, only nodes ancestral to nodes in onlyheads
131 131 (inclusive) are included. If you already know the local repo's heads,
132 132 passing them in onlyheads is faster than letting them be recomputed here.
133 133
134 134 If commoninc is given, it must be the result of a prior call to
135 135 findcommonincoming(repo, other, force) to avoid recomputing it here.
136 136
137 137 If portable is given, compute more conservative common and missingheads,
138 138 to make bundles created from the instance more portable.'''
139 139 # declare an empty outgoing object to be filled later
140 140 og = outgoing(repo, None, None)
141 141
142 142 # get common set if not provided
143 143 if commoninc is None:
144 144 commoninc = findcommonincoming(repo, other, force=force)
145 145 og.commonheads, _any, _hds = commoninc
146 146
147 147 # compute outgoing
148 148 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
149 149 if not mayexclude:
150 150 og.missingheads = onlyheads or repo.heads()
151 151 elif onlyheads is None:
152 152 # use visible heads as it should be cached
153 153 og.missingheads = repo.filtered("served").heads()
154 154 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
155 155 else:
156 156 # compute common, missing and exclude secret stuff
157 157 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
158 158 og._common, allmissing = sets
159 159 og._missing = missing = []
160 160 og.excluded = excluded = []
161 161 for node in allmissing:
162 162 ctx = repo[node]
163 163 if ctx.phase() >= phases.secret or ctx.extinct():
164 164 excluded.append(node)
165 165 else:
166 166 missing.append(node)
167 167 if len(missing) == len(allmissing):
168 168 missingheads = onlyheads
169 169 else: # update missing heads
170 170 missingheads = phases.newheads(repo, onlyheads, excluded)
171 171 og.missingheads = missingheads
172 172 if portable:
173 173 # recompute common and missingheads as if -r<rev> had been given for
174 174 # each head of missing, and --base <rev> for each head of the proper
175 175 # ancestors of missing
176 176 og._computecommonmissing()
177 177 cl = repo.changelog
178 178 missingrevs = set(cl.rev(n) for n in og._missing)
179 179 og._common = set(cl.ancestors(missingrevs)) - missingrevs
180 180 commonheads = set(og.commonheads)
181 181 og.missingheads = [h for h in og.missingheads if h not in commonheads]
182 182
183 183 return og
184 184
185 185 def _headssummary(repo, remote, outgoing):
186 186 """compute a summary of branch and heads status before and after push
187 187
188 188 return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
189 189
190 190 - branch: the branch name
191 191 - remoteheads: the list of remote heads known locally
192 192 None if the branch is new
193 193 - newheads: the new remote heads (known locally) with outgoing pushed
194 194 - unsyncedheads: the list of remote heads unknown locally.
195 195 """
196 196 cl = repo.changelog
197 197 headssum = {}
198 198 # A. Create set of branches involved in the push.
199 199 branches = set(repo[n].branch() for n in outgoing.missing)
200 200 remotemap = remote.branchmap()
201 201 newbranches = branches - set(remotemap)
202 202 branches.difference_update(newbranches)
203 203
204 204 # A. register remote heads
205 205 remotebranches = set()
206 206 for branch, heads in remote.branchmap().iteritems():
207 207 remotebranches.add(branch)
208 208 known = []
209 209 unsynced = []
210 210 knownnode = cl.hasnode # do not use nodemap until it is filtered
211 211 for h in heads:
212 212 if knownnode(h):
213 213 known.append(h)
214 214 else:
215 215 unsynced.append(h)
216 216 headssum[branch] = (known, list(known), unsynced)
217 217 # B. add new branch data
218 218 missingctx = list(repo[n] for n in outgoing.missing)
219 219 touchedbranches = set()
220 220 for ctx in missingctx:
221 221 branch = ctx.branch()
222 222 touchedbranches.add(branch)
223 223 if branch not in headssum:
224 224 headssum[branch] = (None, [], [])
225 225
226 226 # C drop data about untouched branches:
227 227 for branch in remotebranches - touchedbranches:
228 228 del headssum[branch]
229 229
230 230 # D. Update newmap with outgoing changes.
231 231 # This will possibly add new heads and remove existing ones.
232 232 newmap = branchmap.branchcache((branch, heads[1])
233 233 for branch, heads in headssum.iteritems()
234 234 if heads[0] is not None)
235 235 newmap.update(repo, (ctx.rev() for ctx in missingctx))
236 236 for branch, newheads in newmap.iteritems():
237 237 headssum[branch][1][:] = newheads
238 238 for branch, items in headssum.iteritems():
239 239 for l in items:
240 240 if l is not None:
241 241 l.sort()
242 242 return headssum
243 243
244 244 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
245 245 """Compute branchmapsummary for repo without branchmap support"""
246 246
247 247 # 1-4b. old servers: Check for new topological heads.
248 248 # Construct {old,new}map with branch = None (topological branch).
249 249 # (code based on update)
250 250 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
251 251 oldheads = sorted(h for h in remoteheads if knownnode(h))
252 252 # all nodes in outgoing.missing are children of either:
253 253 # - an element of oldheads
254 254 # - another element of outgoing.missing
255 255 # - nullrev
256 256 # This explains why the new head are very simple to compute.
257 257 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
258 258 newheads = sorted(c.node() for c in r)
259 259 # set some unsynced head to issue the "unsynced changes" warning
260 260 if inc:
261 261 unsynced = [None]
262 262 else:
263 263 unsynced = []
264 264 return {None: (oldheads, newheads, unsynced)}
265 265
266 266 def _nowarnheads(pushop):
267 267 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
268 268 repo = pushop.repo.unfiltered()
269 269 remote = pushop.remote
270 270 localbookmarks = repo._bookmarks
271 271 remotebookmarks = remote.listkeys('bookmarks')
272 272 bookmarkedheads = set()
273 273
274 274 # internal config: bookmarks.pushing
275 275 newbookmarks = [localbookmarks.expandname(b)
276 276 for b in pushop.ui.configlist('bookmarks', 'pushing')]
277 277
278 278 for bm in localbookmarks:
279 279 rnode = remotebookmarks.get(bm)
280 280 if rnode and rnode in repo:
281 281 lctx, rctx = repo[bm], repo[rnode]
282 282 if bookmarks.validdest(repo, rctx, lctx):
283 283 bookmarkedheads.add(lctx.node())
284 284 else:
285 285 if bm in newbookmarks and bm not in remotebookmarks:
286 286 bookmarkedheads.add(repo[bm].node())
287 287
288 288 return bookmarkedheads
289 289
290 290 def checkheads(pushop):
291 291 """Check that a push won't add any outgoing head
292 292
293 293 raise Abort error and display ui message as needed.
294 294 """
295 295
296 296 repo = pushop.repo.unfiltered()
297 297 remote = pushop.remote
298 298 outgoing = pushop.outgoing
299 299 remoteheads = pushop.remoteheads
300 300 newbranch = pushop.newbranch
301 301 inc = bool(pushop.incoming)
302 302
303 303 # Check for each named branch if we're creating new remote heads.
304 304 # To be a remote head after push, node must be either:
305 305 # - unknown locally
306 306 # - a local outgoing head descended from update
307 307 # - a remote head that's known locally and not
308 308 # ancestral to an outgoing head
309 309 if remoteheads == [nullid]:
310 310 # remote is empty, nothing to check.
311 311 return
312 312
313 313 if remote.capable('branchmap'):
314 314 headssum = _headssummary(repo, remote, outgoing)
315 315 else:
316 316 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
317 317 newbranches = [branch for branch, heads in headssum.iteritems()
318 318 if heads[0] is None]
319 319 # 1. Check for new branches on the remote.
320 320 if newbranches and not newbranch: # new branch requires --new-branch
321 321 branchnames = ', '.join(sorted(newbranches))
322 322 raise error.Abort(_("push creates new remote branches: %s!")
323 323 % branchnames,
324 324 hint=_("use 'hg push --new-branch' to create"
325 325 " new remote branches"))
326 326
327 327 # 2. Find heads that we need not warn about
328 328 nowarnheads = _nowarnheads(pushop)
329 329
330 330 # 3. Check for new heads.
331 331 # If there are more heads after the push than before, a suitable
332 332 # error message, depending on unsynced status, is displayed.
333 333 errormsg = None
334 334 # If there is no obsstore, allfuturecommon won't be used, so no
335 335 # need to compute it.
336 336 if repo.obsstore:
337 337 allmissing = set(outgoing.missing)
338 338 cctx = repo.set('%ld', outgoing.common)
339 339 allfuturecommon = set(c.node() for c in cctx)
340 340 allfuturecommon.update(allmissing)
341 341 for branch, heads in sorted(headssum.iteritems()):
342 342 remoteheads, newheads, unsyncedheads = heads
343 343 # add unsynced data
344 344 if remoteheads is None:
345 345 oldhs = set()
346 346 else:
347 347 oldhs = set(remoteheads)
348 348 oldhs.update(unsyncedheads)
349 349 dhs = None # delta heads, the new heads on branch
350 350 if not repo.obsstore:
351 351 discardedheads = set()
352 352 newhs = set(newheads)
353 353 else:
354 354 newhs, discardedheads = _postprocessobsolete(pushop,
355 355 allfuturecommon,
356 356 newheads)
357 357 newhs.update(unsyncedheads)
358 unsynced = sorted(h for h in unsyncedheads if h not in discardedheads)
359 if unsynced:
360 if None in unsynced:
358 if unsyncedheads:
359 if None in unsyncedheads:
361 360 # old remote, no heads data
362 361 heads = None
363 elif len(unsynced) <= 4 or repo.ui.verbose:
364 heads = ' '.join(short(h) for h in unsynced)
362 elif len(unsyncedheads) <= 4 or repo.ui.verbose:
363 heads = ' '.join(short(h) for h in unsyncedheads)
365 364 else:
366 heads = (' '.join(short(h) for h in unsynced[:4]) +
367 ' ' + _("and %s others") % (len(unsynced) - 4))
365 heads = (' '.join(short(h) for h in unsyncedheads[:4]) +
366 ' ' + _("and %s others") % (len(unsyncedheads) - 4))
368 367 if heads is None:
369 368 repo.ui.status(_("remote has heads that are "
370 369 "not known locally\n"))
371 370 elif branch is None:
372 371 repo.ui.status(_("remote has heads that are "
373 372 "not known locally: %s\n") % heads)
374 373 else:
375 374 repo.ui.status(_("remote has heads on branch '%s' that are "
376 375 "not known locally: %s\n") % (branch, heads))
377 376 if remoteheads is None:
378 377 if len(newhs) > 1:
379 378 dhs = list(newhs)
380 379 if errormsg is None:
381 380 errormsg = (_("push creates new branch '%s' "
382 381 "with multiple heads") % (branch))
383 382 hint = _("merge or"
384 383 " see 'hg help push' for details about"
385 384 " pushing new heads")
386 385 elif len(newhs) > len(oldhs):
387 386 # remove bookmarked or existing remote heads from the new heads list
388 387 dhs = sorted(newhs - nowarnheads - oldhs)
389 388 if dhs:
390 389 if errormsg is None:
391 390 if branch not in ('default', None):
392 391 errormsg = _("push creates new remote head %s "
393 392 "on branch '%s'!") % (short(dhs[0]), branch)
394 393 elif repo[dhs[0]].bookmarks():
395 394 errormsg = _("push creates new remote head %s "
396 395 "with bookmark '%s'!") % (
397 396 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
398 397 else:
399 398 errormsg = _("push creates new remote head %s!"
400 399 ) % short(dhs[0])
401 400 if unsyncedheads:
402 401 hint = _("pull and merge or"
403 402 " see 'hg help push' for details about"
404 403 " pushing new heads")
405 404 else:
406 405 hint = _("merge or"
407 406 " see 'hg help push' for details about"
408 407 " pushing new heads")
409 408 if branch is None:
410 409 repo.ui.note(_("new remote heads:\n"))
411 410 else:
412 411 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
413 412 for h in dhs:
414 413 repo.ui.note((" %s\n") % short(h))
415 414 if errormsg:
416 415 raise error.Abort(errormsg, hint=hint)
417 416
418 417 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
419 418 """post process the list of new heads with obsolescence information
420 419
421 420 Exists as a sub-function to contain the complexity and allow extensions to
422 421 experiment with smarter logic.
423 422
424 423 Returns (newheads, discarded_heads) tuple
425 424 """
426 425 # known issue
427 426 #
428 427 # * We "silently" skip processing on all changeset unknown locally
429 428 #
430 429 # * if <nh> is public on the remote, it won't be affected by obsolete
431 430 # marker and a new is created
432 431
433 432 # define various utilities and containers
434 433 repo = pushop.repo
435 434 unfi = repo.unfiltered()
436 435 tonode = unfi.changelog.node
437 436 torev = unfi.changelog.rev
438 437 public = phases.public
439 438 getphase = unfi._phasecache.phase
440 439 ispublic = (lambda r: getphase(unfi, r) == public)
441 440 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore,
442 441 futurecommon)
443 442 successorsmarkers = unfi.obsstore.successors
444 443 newhs = set() # final set of new heads
445 444 discarded = set() # new head of fully replaced branch
446 445
447 446 localcandidate = set() # candidate heads known locally
448 447 unknownheads = set() # candidate heads unknown locally
449 448 for h in candidate_newhs:
450 449 if h in unfi:
451 450 localcandidate.add(h)
452 451 else:
453 452 if successorsmarkers.get(h) is not None:
454 453 msg = ('checkheads: remote head unknown locally has'
455 454 ' local marker: %s\n')
456 455 repo.ui.debug(msg % hex(h))
457 456 unknownheads.add(h)
458 457
459 458 # fast path the simple case
460 459 if len(localcandidate) == 1:
461 460 return unknownheads | set(candidate_newhs), set()
462 461
463 462 # actually process branch replacement
464 463 while localcandidate:
465 464 nh = localcandidate.pop()
466 465 # run this check early to skip the evaluation of the whole branch
467 466 if (nh in futurecommon or ispublic(torev(nh))):
468 467 newhs.add(nh)
469 468 continue
470 469
471 470 # Get all revs/nodes on the branch exclusive to this head
472 471 # (already filtered heads are "ignored"))
473 472 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
474 473 nh, localcandidate, newhs)
475 474 branchnodes = [tonode(r) for r in branchrevs]
476 475
477 476 # The branch won't be hidden on the remote if
478 477 # * any part of it is public,
479 478 # * any part of it is considered part of the result by previous logic,
480 479 # * if we have no markers to push to obsolete it.
481 480 if (any(ispublic(r) for r in branchrevs)
482 481 or any(n in futurecommon for n in branchnodes)
483 482 or any(not hasoutmarker(n) for n in branchnodes)):
484 483 newhs.add(nh)
485 484 else:
486 485 # note: there is a corner case if there is a merge in the branch.
487 486 # we might end up with -more- heads. However, these heads are not
488 487 # "added" by the push, but more by the "removal" on the remote so I
489 488 # think is a okay to ignore them,
490 489 discarded.add(nh)
491 490 newhs |= unknownheads
492 491 return newhs, discarded
493 492
494 493 def pushingmarkerfor(obsstore, pushset, node):
495 494 """true if some markers are to be pushed for node
496 495
497 496 We cannot just look in to the pushed obsmarkers from the pushop because
498 497 discovery might have filtered relevant markers. In addition listing all
499 498 markers relevant to all changesets in the pushed set would be too expensive
500 499 (O(len(repo)))
501 500
502 501 (note: There are cache opportunity in this function. but it would requires
503 502 a two dimensional stack.)
504 503 """
505 504 successorsmarkers = obsstore.successors
506 505 stack = [node]
507 506 seen = set(stack)
508 507 while stack:
509 508 current = stack.pop()
510 509 if current in pushset:
511 510 return True
512 511 markers = successorsmarkers.get(current, ())
513 512 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
514 513 for m in markers:
515 514 nexts = m[1] # successors
516 515 if not nexts: # this is a prune marker
517 516 nexts = m[5] or () # parents
518 517 for n in nexts:
519 518 if n not in seen:
520 519 seen.add(n)
521 520 stack.append(n)
522 521 return False
General Comments 0
You need to be logged in to leave comments. Login now