##// END OF EJS Templates
checkheads: clarify that we no longer touch the head unknown locally...
marmoute -
r32673:bd966b9f default
parent child Browse files
Show More
@@ -1,523 +1,522 b''
1 1 # discovery.py - protocol changeset discovery functions
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11
12 12 from .i18n import _
13 13 from .node import (
14 14 hex,
15 15 nullid,
16 16 short,
17 17 )
18 18
19 19 from . import (
20 20 bookmarks,
21 21 branchmap,
22 22 error,
23 23 phases,
24 24 setdiscovery,
25 25 treediscovery,
26 26 util,
27 27 )
28 28
29 29 def findcommonincoming(repo, remote, heads=None, force=False):
30 30 """Return a tuple (common, anyincoming, heads) used to identify the common
31 31 subset of nodes between repo and remote.
32 32
33 33 "common" is a list of (at least) the heads of the common subset.
34 34 "anyincoming" is testable as a boolean indicating if any nodes are missing
35 35 locally. If remote does not support getbundle, this actually is a list of
36 36 roots of the nodes that would be incoming, to be supplied to
37 37 changegroupsubset. No code except for pull should be relying on this fact
38 38 any longer.
39 39 "heads" is either the supplied heads, or else the remote's heads.
40 40
41 41 If you pass heads and they are all known locally, the response lists just
42 42 these heads in "common" and in "heads".
43 43
44 44 Please use findcommonoutgoing to compute the set of outgoing nodes to give
45 45 extensions a good hook into outgoing.
46 46 """
47 47
48 48 if not remote.capable('getbundle'):
49 49 return treediscovery.findcommonincoming(repo, remote, heads, force)
50 50
51 51 if heads:
52 52 allknown = True
53 53 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
54 54 for h in heads:
55 55 if not knownnode(h):
56 56 allknown = False
57 57 break
58 58 if allknown:
59 59 return (heads, False, heads)
60 60
61 61 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
62 62 abortwhenunrelated=not force)
63 63 common, anyinc, srvheads = res
64 64 return (list(common), anyinc, heads or list(srvheads))
65 65
66 66 class outgoing(object):
67 67 '''Represents the set of nodes present in a local repo but not in a
68 68 (possibly) remote one.
69 69
70 70 Members:
71 71
72 72 missing is a list of all nodes present in local but not in remote.
73 73 common is a list of all nodes shared between the two repos.
74 74 excluded is the list of missing changeset that shouldn't be sent remotely.
75 75 missingheads is the list of heads of missing.
76 76 commonheads is the list of heads of common.
77 77
78 78 The sets are computed on demand from the heads, unless provided upfront
79 79 by discovery.'''
80 80
81 81 def __init__(self, repo, commonheads=None, missingheads=None,
82 82 missingroots=None):
83 83 # at least one of them must not be set
84 84 assert None in (commonheads, missingroots)
85 85 cl = repo.changelog
86 86 if missingheads is None:
87 87 missingheads = cl.heads()
88 88 if missingroots:
89 89 discbases = []
90 90 for n in missingroots:
91 91 discbases.extend([p for p in cl.parents(n) if p != nullid])
92 92 # TODO remove call to nodesbetween.
93 93 # TODO populate attributes on outgoing instance instead of setting
94 94 # discbases.
95 95 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
96 96 included = set(csets)
97 97 missingheads = heads
98 98 commonheads = [n for n in discbases if n not in included]
99 99 elif not commonheads:
100 100 commonheads = [nullid]
101 101 self.commonheads = commonheads
102 102 self.missingheads = missingheads
103 103 self._revlog = cl
104 104 self._common = None
105 105 self._missing = None
106 106 self.excluded = []
107 107
108 108 def _computecommonmissing(self):
109 109 sets = self._revlog.findcommonmissing(self.commonheads,
110 110 self.missingheads)
111 111 self._common, self._missing = sets
112 112
113 113 @util.propertycache
114 114 def common(self):
115 115 if self._common is None:
116 116 self._computecommonmissing()
117 117 return self._common
118 118
119 119 @util.propertycache
120 120 def missing(self):
121 121 if self._missing is None:
122 122 self._computecommonmissing()
123 123 return self._missing
124 124
125 125 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
126 126 commoninc=None, portable=False):
127 127 '''Return an outgoing instance to identify the nodes present in repo but
128 128 not in other.
129 129
130 130 If onlyheads is given, only nodes ancestral to nodes in onlyheads
131 131 (inclusive) are included. If you already know the local repo's heads,
132 132 passing them in onlyheads is faster than letting them be recomputed here.
133 133
134 134 If commoninc is given, it must be the result of a prior call to
135 135 findcommonincoming(repo, other, force) to avoid recomputing it here.
136 136
137 137 If portable is given, compute more conservative common and missingheads,
138 138 to make bundles created from the instance more portable.'''
139 139 # declare an empty outgoing object to be filled later
140 140 og = outgoing(repo, None, None)
141 141
142 142 # get common set if not provided
143 143 if commoninc is None:
144 144 commoninc = findcommonincoming(repo, other, force=force)
145 145 og.commonheads, _any, _hds = commoninc
146 146
147 147 # compute outgoing
148 148 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
149 149 if not mayexclude:
150 150 og.missingheads = onlyheads or repo.heads()
151 151 elif onlyheads is None:
152 152 # use visible heads as it should be cached
153 153 og.missingheads = repo.filtered("served").heads()
154 154 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
155 155 else:
156 156 # compute common, missing and exclude secret stuff
157 157 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
158 158 og._common, allmissing = sets
159 159 og._missing = missing = []
160 160 og.excluded = excluded = []
161 161 for node in allmissing:
162 162 ctx = repo[node]
163 163 if ctx.phase() >= phases.secret or ctx.extinct():
164 164 excluded.append(node)
165 165 else:
166 166 missing.append(node)
167 167 if len(missing) == len(allmissing):
168 168 missingheads = onlyheads
169 169 else: # update missing heads
170 170 missingheads = phases.newheads(repo, onlyheads, excluded)
171 171 og.missingheads = missingheads
172 172 if portable:
173 173 # recompute common and missingheads as if -r<rev> had been given for
174 174 # each head of missing, and --base <rev> for each head of the proper
175 175 # ancestors of missing
176 176 og._computecommonmissing()
177 177 cl = repo.changelog
178 178 missingrevs = set(cl.rev(n) for n in og._missing)
179 179 og._common = set(cl.ancestors(missingrevs)) - missingrevs
180 180 commonheads = set(og.commonheads)
181 181 og.missingheads = [h for h in og.missingheads if h not in commonheads]
182 182
183 183 return og
184 184
185 185 def _headssummary(repo, remote, outgoing):
186 186 """compute a summary of branch and heads status before and after push
187 187
188 188 return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
189 189
190 190 - branch: the branch name
191 191 - remoteheads: the list of remote heads known locally
192 192 None if the branch is new
193 193 - newheads: the new remote heads (known locally) with outgoing pushed
194 194 - unsyncedheads: the list of remote heads unknown locally.
195 195 """
196 196 cl = repo.changelog
197 197 headssum = {}
198 198 # A. Create set of branches involved in the push.
199 199 branches = set(repo[n].branch() for n in outgoing.missing)
200 200 remotemap = remote.branchmap()
201 201 newbranches = branches - set(remotemap)
202 202 branches.difference_update(newbranches)
203 203
204 204 # A. register remote heads
205 205 remotebranches = set()
206 206 for branch, heads in remote.branchmap().iteritems():
207 207 remotebranches.add(branch)
208 208 known = []
209 209 unsynced = []
210 210 knownnode = cl.hasnode # do not use nodemap until it is filtered
211 211 for h in heads:
212 212 if knownnode(h):
213 213 known.append(h)
214 214 else:
215 215 unsynced.append(h)
216 216 headssum[branch] = (known, list(known), unsynced)
217 217 # B. add new branch data
218 218 missingctx = list(repo[n] for n in outgoing.missing)
219 219 touchedbranches = set()
220 220 for ctx in missingctx:
221 221 branch = ctx.branch()
222 222 touchedbranches.add(branch)
223 223 if branch not in headssum:
224 224 headssum[branch] = (None, [], [])
225 225
226 226 # C drop data about untouched branches:
227 227 for branch in remotebranches - touchedbranches:
228 228 del headssum[branch]
229 229
230 230 # D. Update newmap with outgoing changes.
231 231 # This will possibly add new heads and remove existing ones.
232 232 newmap = branchmap.branchcache((branch, heads[1])
233 233 for branch, heads in headssum.iteritems()
234 234 if heads[0] is not None)
235 235 newmap.update(repo, (ctx.rev() for ctx in missingctx))
236 236 for branch, newheads in newmap.iteritems():
237 237 headssum[branch][1][:] = newheads
238 238 for branch, items in headssum.iteritems():
239 239 for l in items:
240 240 if l is not None:
241 241 l.sort()
242 242 return headssum
243 243
244 244 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
245 245 """Compute branchmapsummary for repo without branchmap support"""
246 246
247 247 # 1-4b. old servers: Check for new topological heads.
248 248 # Construct {old,new}map with branch = None (topological branch).
249 249 # (code based on update)
250 250 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
251 251 oldheads = sorted(h for h in remoteheads if knownnode(h))
252 252 # all nodes in outgoing.missing are children of either:
253 253 # - an element of oldheads
254 254 # - another element of outgoing.missing
255 255 # - nullrev
256 256 # This explains why the new head are very simple to compute.
257 257 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
258 258 newheads = sorted(c.node() for c in r)
259 259 # set some unsynced head to issue the "unsynced changes" warning
260 260 if inc:
261 261 unsynced = [None]
262 262 else:
263 263 unsynced = []
264 264 return {None: (oldheads, newheads, unsynced)}
265 265
266 266 def _nowarnheads(pushop):
267 267 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
268 268 repo = pushop.repo.unfiltered()
269 269 remote = pushop.remote
270 270 localbookmarks = repo._bookmarks
271 271 remotebookmarks = remote.listkeys('bookmarks')
272 272 bookmarkedheads = set()
273 273
274 274 # internal config: bookmarks.pushing
275 275 newbookmarks = [localbookmarks.expandname(b)
276 276 for b in pushop.ui.configlist('bookmarks', 'pushing')]
277 277
278 278 for bm in localbookmarks:
279 279 rnode = remotebookmarks.get(bm)
280 280 if rnode and rnode in repo:
281 281 lctx, rctx = repo[bm], repo[rnode]
282 282 if bookmarks.validdest(repo, rctx, lctx):
283 283 bookmarkedheads.add(lctx.node())
284 284 else:
285 285 if bm in newbookmarks and bm not in remotebookmarks:
286 286 bookmarkedheads.add(repo[bm].node())
287 287
288 288 return bookmarkedheads
289 289
290 290 def checkheads(pushop):
291 291 """Check that a push won't add any outgoing head
292 292
293 293 raise Abort error and display ui message as needed.
294 294 """
295 295
296 296 repo = pushop.repo.unfiltered()
297 297 remote = pushop.remote
298 298 outgoing = pushop.outgoing
299 299 remoteheads = pushop.remoteheads
300 300 newbranch = pushop.newbranch
301 301 inc = bool(pushop.incoming)
302 302
303 303 # Check for each named branch if we're creating new remote heads.
304 304 # To be a remote head after push, node must be either:
305 305 # - unknown locally
306 306 # - a local outgoing head descended from update
307 307 # - a remote head that's known locally and not
308 308 # ancestral to an outgoing head
309 309 if remoteheads == [nullid]:
310 310 # remote is empty, nothing to check.
311 311 return
312 312
313 313 if remote.capable('branchmap'):
314 314 headssum = _headssummary(repo, remote, outgoing)
315 315 else:
316 316 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
317 317 newbranches = [branch for branch, heads in headssum.iteritems()
318 318 if heads[0] is None]
319 319 # 1. Check for new branches on the remote.
320 320 if newbranches and not newbranch: # new branch requires --new-branch
321 321 branchnames = ', '.join(sorted(newbranches))
322 322 raise error.Abort(_("push creates new remote branches: %s!")
323 323 % branchnames,
324 324 hint=_("use 'hg push --new-branch' to create"
325 325 " new remote branches"))
326 326
327 327 # 2. Find heads that we need not warn about
328 328 nowarnheads = _nowarnheads(pushop)
329 329
330 330 # 3. Check for new heads.
331 331 # If there are more heads after the push than before, a suitable
332 332 # error message, depending on unsynced status, is displayed.
333 333 errormsg = None
334 334 # If there is no obsstore, allfuturecommon won't be used, so no
335 335 # need to compute it.
336 336 if repo.obsstore:
337 337 allmissing = set(outgoing.missing)
338 338 cctx = repo.set('%ld', outgoing.common)
339 339 allfuturecommon = set(c.node() for c in cctx)
340 340 allfuturecommon.update(allmissing)
341 341 for branch, heads in sorted(headssum.iteritems()):
342 342 remoteheads, newheads, unsyncedheads = heads
343 candidate_newhs = set(newheads)
344 343 # add unsynced data
345 344 if remoteheads is None:
346 345 oldhs = set()
347 346 else:
348 347 oldhs = set(remoteheads)
349 348 oldhs.update(unsyncedheads)
350 candidate_newhs.update(unsyncedheads)
351 349 dhs = None # delta heads, the new heads on branch
352 350 if not repo.obsstore:
353 351 discardedheads = set()
354 newhs = candidate_newhs
352 newhs = set(newheads)
355 353 else:
356 354 newhs, discardedheads = _postprocessobsolete(pushop,
357 355 allfuturecommon,
358 candidate_newhs)
356 newheads)
357 newhs.update(unsyncedheads)
359 358 unsynced = sorted(h for h in unsyncedheads if h not in discardedheads)
360 359 if unsynced:
361 360 if None in unsynced:
362 361 # old remote, no heads data
363 362 heads = None
364 363 elif len(unsynced) <= 4 or repo.ui.verbose:
365 364 heads = ' '.join(short(h) for h in unsynced)
366 365 else:
367 366 heads = (' '.join(short(h) for h in unsynced[:4]) +
368 367 ' ' + _("and %s others") % (len(unsynced) - 4))
369 368 if heads is None:
370 369 repo.ui.status(_("remote has heads that are "
371 370 "not known locally\n"))
372 371 elif branch is None:
373 372 repo.ui.status(_("remote has heads that are "
374 373 "not known locally: %s\n") % heads)
375 374 else:
376 375 repo.ui.status(_("remote has heads on branch '%s' that are "
377 376 "not known locally: %s\n") % (branch, heads))
378 377 if remoteheads is None:
379 378 if len(newhs) > 1:
380 379 dhs = list(newhs)
381 380 if errormsg is None:
382 381 errormsg = (_("push creates new branch '%s' "
383 382 "with multiple heads") % (branch))
384 383 hint = _("merge or"
385 384 " see 'hg help push' for details about"
386 385 " pushing new heads")
387 386 elif len(newhs) > len(oldhs):
388 387 # remove bookmarked or existing remote heads from the new heads list
389 388 dhs = sorted(newhs - nowarnheads - oldhs)
390 389 if dhs:
391 390 if errormsg is None:
392 391 if branch not in ('default', None):
393 392 errormsg = _("push creates new remote head %s "
394 393 "on branch '%s'!") % (short(dhs[0]), branch)
395 394 elif repo[dhs[0]].bookmarks():
396 395 errormsg = _("push creates new remote head %s "
397 396 "with bookmark '%s'!") % (
398 397 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
399 398 else:
400 399 errormsg = _("push creates new remote head %s!"
401 400 ) % short(dhs[0])
402 401 if unsyncedheads:
403 402 hint = _("pull and merge or"
404 403 " see 'hg help push' for details about"
405 404 " pushing new heads")
406 405 else:
407 406 hint = _("merge or"
408 407 " see 'hg help push' for details about"
409 408 " pushing new heads")
410 409 if branch is None:
411 410 repo.ui.note(_("new remote heads:\n"))
412 411 else:
413 412 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
414 413 for h in dhs:
415 414 repo.ui.note((" %s\n") % short(h))
416 415 if errormsg:
417 416 raise error.Abort(errormsg, hint=hint)
418 417
419 418 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
420 419 """post process the list of new heads with obsolescence information
421 420
422 421 Exists as a sub-function to contain the complexity and allow extensions to
423 422 experiment with smarter logic.
424 423
425 424 Returns (newheads, discarded_heads) tuple
426 425 """
427 426 # known issue
428 427 #
429 428 # * We "silently" skip processing on all changeset unknown locally
430 429 #
431 430 # * if <nh> is public on the remote, it won't be affected by obsolete
432 431 # marker and a new is created
433 432
434 433 # define various utilities and containers
435 434 repo = pushop.repo
436 435 unfi = repo.unfiltered()
437 436 tonode = unfi.changelog.node
438 437 torev = unfi.changelog.rev
439 438 public = phases.public
440 439 getphase = unfi._phasecache.phase
441 440 ispublic = (lambda r: getphase(unfi, r) == public)
442 441 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore,
443 442 futurecommon)
444 443 successorsmarkers = unfi.obsstore.successors
445 444 newhs = set() # final set of new heads
446 445 discarded = set() # new head of fully replaced branch
447 446
448 447 localcandidate = set() # candidate heads known locally
449 448 unknownheads = set() # candidate heads unknown locally
450 449 for h in candidate_newhs:
451 450 if h in unfi:
452 451 localcandidate.add(h)
453 452 else:
454 453 if successorsmarkers.get(h) is not None:
455 454 msg = ('checkheads: remote head unknown locally has'
456 455 ' local marker: %s\n')
457 456 repo.ui.debug(msg % hex(h))
458 457 unknownheads.add(h)
459 458
460 459 # fast path the simple case
461 460 if len(localcandidate) == 1:
462 461 return unknownheads | set(candidate_newhs), set()
463 462
464 463 # actually process branch replacement
465 464 while localcandidate:
466 465 nh = localcandidate.pop()
467 466 # run this check early to skip the evaluation of the whole branch
468 467 if (nh in futurecommon or ispublic(torev(nh))):
469 468 newhs.add(nh)
470 469 continue
471 470
472 471 # Get all revs/nodes on the branch exclusive to this head
473 472 # (already filtered heads are "ignored"))
474 473 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
475 474 nh, localcandidate, newhs)
476 475 branchnodes = [tonode(r) for r in branchrevs]
477 476
478 477 # The branch won't be hidden on the remote if
479 478 # * any part of it is public,
480 479 # * any part of it is considered part of the result by previous logic,
481 480 # * if we have no markers to push to obsolete it.
482 481 if (any(ispublic(r) for r in branchrevs)
483 482 or any(n in futurecommon for n in branchnodes)
484 483 or any(not hasoutmarker(n) for n in branchnodes)):
485 484 newhs.add(nh)
486 485 else:
487 486 # note: there is a corner case if there is a merge in the branch.
488 487 # we might end up with -more- heads. However, these heads are not
489 488 # "added" by the push, but more by the "removal" on the remote so I
490 489 # think is a okay to ignore them,
491 490 discarded.add(nh)
492 491 newhs |= unknownheads
493 492 return newhs, discarded
494 493
495 494 def pushingmarkerfor(obsstore, pushset, node):
496 495 """true if some markers are to be pushed for node
497 496
498 497 We cannot just look in to the pushed obsmarkers from the pushop because
499 498 discovery might have filtered relevant markers. In addition listing all
500 499 markers relevant to all changesets in the pushed set would be too expensive
501 500 (O(len(repo)))
502 501
503 502 (note: There are cache opportunity in this function. but it would requires
504 503 a two dimensional stack.)
505 504 """
506 505 successorsmarkers = obsstore.successors
507 506 stack = [node]
508 507 seen = set(stack)
509 508 while stack:
510 509 current = stack.pop()
511 510 if current in pushset:
512 511 return True
513 512 markers = successorsmarkers.get(current, ())
514 513 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
515 514 for m in markers:
516 515 nexts = m[1] # successors
517 516 if not nexts: # this is a prune marker
518 517 nexts = m[5] or () # parents
519 518 for n in nexts:
520 519 if n not in seen:
521 520 seen.add(n)
522 521 stack.append(n)
523 522 return False
General Comments 0
You need to be logged in to leave comments. Login now