##// END OF EJS Templates
headssummary: ensure all returned lists are sorted...
marmoute -
r32672:315d74d0 default
parent child Browse files
Show More
@@ -1,519 +1,523 b''
1 1 # discovery.py - protocol changeset discovery functions
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11
12 12 from .i18n import _
13 13 from .node import (
14 14 hex,
15 15 nullid,
16 16 short,
17 17 )
18 18
19 19 from . import (
20 20 bookmarks,
21 21 branchmap,
22 22 error,
23 23 phases,
24 24 setdiscovery,
25 25 treediscovery,
26 26 util,
27 27 )
28 28
29 29 def findcommonincoming(repo, remote, heads=None, force=False):
30 30 """Return a tuple (common, anyincoming, heads) used to identify the common
31 31 subset of nodes between repo and remote.
32 32
33 33 "common" is a list of (at least) the heads of the common subset.
34 34 "anyincoming" is testable as a boolean indicating if any nodes are missing
35 35 locally. If remote does not support getbundle, this actually is a list of
36 36 roots of the nodes that would be incoming, to be supplied to
37 37 changegroupsubset. No code except for pull should be relying on this fact
38 38 any longer.
39 39 "heads" is either the supplied heads, or else the remote's heads.
40 40
41 41 If you pass heads and they are all known locally, the response lists just
42 42 these heads in "common" and in "heads".
43 43
44 44 Please use findcommonoutgoing to compute the set of outgoing nodes to give
45 45 extensions a good hook into outgoing.
46 46 """
47 47
48 48 if not remote.capable('getbundle'):
49 49 return treediscovery.findcommonincoming(repo, remote, heads, force)
50 50
51 51 if heads:
52 52 allknown = True
53 53 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
54 54 for h in heads:
55 55 if not knownnode(h):
56 56 allknown = False
57 57 break
58 58 if allknown:
59 59 return (heads, False, heads)
60 60
61 61 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
62 62 abortwhenunrelated=not force)
63 63 common, anyinc, srvheads = res
64 64 return (list(common), anyinc, heads or list(srvheads))
65 65
66 66 class outgoing(object):
67 67 '''Represents the set of nodes present in a local repo but not in a
68 68 (possibly) remote one.
69 69
70 70 Members:
71 71
72 72 missing is a list of all nodes present in local but not in remote.
73 73 common is a list of all nodes shared between the two repos.
74 74 excluded is the list of missing changeset that shouldn't be sent remotely.
75 75 missingheads is the list of heads of missing.
76 76 commonheads is the list of heads of common.
77 77
78 78 The sets are computed on demand from the heads, unless provided upfront
79 79 by discovery.'''
80 80
81 81 def __init__(self, repo, commonheads=None, missingheads=None,
82 82 missingroots=None):
83 83 # at least one of them must not be set
84 84 assert None in (commonheads, missingroots)
85 85 cl = repo.changelog
86 86 if missingheads is None:
87 87 missingheads = cl.heads()
88 88 if missingroots:
89 89 discbases = []
90 90 for n in missingroots:
91 91 discbases.extend([p for p in cl.parents(n) if p != nullid])
92 92 # TODO remove call to nodesbetween.
93 93 # TODO populate attributes on outgoing instance instead of setting
94 94 # discbases.
95 95 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
96 96 included = set(csets)
97 97 missingheads = heads
98 98 commonheads = [n for n in discbases if n not in included]
99 99 elif not commonheads:
100 100 commonheads = [nullid]
101 101 self.commonheads = commonheads
102 102 self.missingheads = missingheads
103 103 self._revlog = cl
104 104 self._common = None
105 105 self._missing = None
106 106 self.excluded = []
107 107
108 108 def _computecommonmissing(self):
109 109 sets = self._revlog.findcommonmissing(self.commonheads,
110 110 self.missingheads)
111 111 self._common, self._missing = sets
112 112
113 113 @util.propertycache
114 114 def common(self):
115 115 if self._common is None:
116 116 self._computecommonmissing()
117 117 return self._common
118 118
119 119 @util.propertycache
120 120 def missing(self):
121 121 if self._missing is None:
122 122 self._computecommonmissing()
123 123 return self._missing
124 124
125 125 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
126 126 commoninc=None, portable=False):
127 127 '''Return an outgoing instance to identify the nodes present in repo but
128 128 not in other.
129 129
130 130 If onlyheads is given, only nodes ancestral to nodes in onlyheads
131 131 (inclusive) are included. If you already know the local repo's heads,
132 132 passing them in onlyheads is faster than letting them be recomputed here.
133 133
134 134 If commoninc is given, it must be the result of a prior call to
135 135 findcommonincoming(repo, other, force) to avoid recomputing it here.
136 136
137 137 If portable is given, compute more conservative common and missingheads,
138 138 to make bundles created from the instance more portable.'''
139 139 # declare an empty outgoing object to be filled later
140 140 og = outgoing(repo, None, None)
141 141
142 142 # get common set if not provided
143 143 if commoninc is None:
144 144 commoninc = findcommonincoming(repo, other, force=force)
145 145 og.commonheads, _any, _hds = commoninc
146 146
147 147 # compute outgoing
148 148 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
149 149 if not mayexclude:
150 150 og.missingheads = onlyheads or repo.heads()
151 151 elif onlyheads is None:
152 152 # use visible heads as it should be cached
153 153 og.missingheads = repo.filtered("served").heads()
154 154 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
155 155 else:
156 156 # compute common, missing and exclude secret stuff
157 157 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
158 158 og._common, allmissing = sets
159 159 og._missing = missing = []
160 160 og.excluded = excluded = []
161 161 for node in allmissing:
162 162 ctx = repo[node]
163 163 if ctx.phase() >= phases.secret or ctx.extinct():
164 164 excluded.append(node)
165 165 else:
166 166 missing.append(node)
167 167 if len(missing) == len(allmissing):
168 168 missingheads = onlyheads
169 169 else: # update missing heads
170 170 missingheads = phases.newheads(repo, onlyheads, excluded)
171 171 og.missingheads = missingheads
172 172 if portable:
173 173 # recompute common and missingheads as if -r<rev> had been given for
174 174 # each head of missing, and --base <rev> for each head of the proper
175 175 # ancestors of missing
176 176 og._computecommonmissing()
177 177 cl = repo.changelog
178 178 missingrevs = set(cl.rev(n) for n in og._missing)
179 179 og._common = set(cl.ancestors(missingrevs)) - missingrevs
180 180 commonheads = set(og.commonheads)
181 181 og.missingheads = [h for h in og.missingheads if h not in commonheads]
182 182
183 183 return og
184 184
185 185 def _headssummary(repo, remote, outgoing):
186 186 """compute a summary of branch and heads status before and after push
187 187
188 188 return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
189 189
190 190 - branch: the branch name
191 191 - remoteheads: the list of remote heads known locally
192 192 None if the branch is new
193 193 - newheads: the new remote heads (known locally) with outgoing pushed
194 194 - unsyncedheads: the list of remote heads unknown locally.
195 195 """
196 196 cl = repo.changelog
197 197 headssum = {}
198 198 # A. Create set of branches involved in the push.
199 199 branches = set(repo[n].branch() for n in outgoing.missing)
200 200 remotemap = remote.branchmap()
201 201 newbranches = branches - set(remotemap)
202 202 branches.difference_update(newbranches)
203 203
204 204 # A. register remote heads
205 205 remotebranches = set()
206 206 for branch, heads in remote.branchmap().iteritems():
207 207 remotebranches.add(branch)
208 208 known = []
209 209 unsynced = []
210 210 knownnode = cl.hasnode # do not use nodemap until it is filtered
211 211 for h in heads:
212 212 if knownnode(h):
213 213 known.append(h)
214 214 else:
215 215 unsynced.append(h)
216 216 headssum[branch] = (known, list(known), unsynced)
217 217 # B. add new branch data
218 218 missingctx = list(repo[n] for n in outgoing.missing)
219 219 touchedbranches = set()
220 220 for ctx in missingctx:
221 221 branch = ctx.branch()
222 222 touchedbranches.add(branch)
223 223 if branch not in headssum:
224 224 headssum[branch] = (None, [], [])
225 225
226 226 # C drop data about untouched branches:
227 227 for branch in remotebranches - touchedbranches:
228 228 del headssum[branch]
229 229
230 230 # D. Update newmap with outgoing changes.
231 231 # This will possibly add new heads and remove existing ones.
232 232 newmap = branchmap.branchcache((branch, heads[1])
233 233 for branch, heads in headssum.iteritems()
234 234 if heads[0] is not None)
235 235 newmap.update(repo, (ctx.rev() for ctx in missingctx))
236 236 for branch, newheads in newmap.iteritems():
237 237 headssum[branch][1][:] = newheads
238 for branch, items in headssum.iteritems():
239 for l in items:
240 if l is not None:
241 l.sort()
238 242 return headssum
239 243
240 244 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
241 245 """Compute branchmapsummary for repo without branchmap support"""
242 246
243 247 # 1-4b. old servers: Check for new topological heads.
244 248 # Construct {old,new}map with branch = None (topological branch).
245 249 # (code based on update)
246 250 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
247 oldheads = list(h for h in remoteheads if knownnode(h))
251 oldheads = sorted(h for h in remoteheads if knownnode(h))
248 252 # all nodes in outgoing.missing are children of either:
249 253 # - an element of oldheads
250 254 # - another element of outgoing.missing
251 255 # - nullrev
252 256 # This explains why the new head are very simple to compute.
253 257 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
254 newheads = list(c.node() for c in r)
258 newheads = sorted(c.node() for c in r)
255 259 # set some unsynced head to issue the "unsynced changes" warning
256 260 if inc:
257 261 unsynced = [None]
258 262 else:
259 263 unsynced = []
260 264 return {None: (oldheads, newheads, unsynced)}
261 265
262 266 def _nowarnheads(pushop):
263 267 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
264 268 repo = pushop.repo.unfiltered()
265 269 remote = pushop.remote
266 270 localbookmarks = repo._bookmarks
267 271 remotebookmarks = remote.listkeys('bookmarks')
268 272 bookmarkedheads = set()
269 273
270 274 # internal config: bookmarks.pushing
271 275 newbookmarks = [localbookmarks.expandname(b)
272 276 for b in pushop.ui.configlist('bookmarks', 'pushing')]
273 277
274 278 for bm in localbookmarks:
275 279 rnode = remotebookmarks.get(bm)
276 280 if rnode and rnode in repo:
277 281 lctx, rctx = repo[bm], repo[rnode]
278 282 if bookmarks.validdest(repo, rctx, lctx):
279 283 bookmarkedheads.add(lctx.node())
280 284 else:
281 285 if bm in newbookmarks and bm not in remotebookmarks:
282 286 bookmarkedheads.add(repo[bm].node())
283 287
284 288 return bookmarkedheads
285 289
286 290 def checkheads(pushop):
287 291 """Check that a push won't add any outgoing head
288 292
289 293 raise Abort error and display ui message as needed.
290 294 """
291 295
292 296 repo = pushop.repo.unfiltered()
293 297 remote = pushop.remote
294 298 outgoing = pushop.outgoing
295 299 remoteheads = pushop.remoteheads
296 300 newbranch = pushop.newbranch
297 301 inc = bool(pushop.incoming)
298 302
299 303 # Check for each named branch if we're creating new remote heads.
300 304 # To be a remote head after push, node must be either:
301 305 # - unknown locally
302 306 # - a local outgoing head descended from update
303 307 # - a remote head that's known locally and not
304 308 # ancestral to an outgoing head
305 309 if remoteheads == [nullid]:
306 310 # remote is empty, nothing to check.
307 311 return
308 312
309 313 if remote.capable('branchmap'):
310 314 headssum = _headssummary(repo, remote, outgoing)
311 315 else:
312 316 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
313 317 newbranches = [branch for branch, heads in headssum.iteritems()
314 318 if heads[0] is None]
315 319 # 1. Check for new branches on the remote.
316 320 if newbranches and not newbranch: # new branch requires --new-branch
317 321 branchnames = ', '.join(sorted(newbranches))
318 322 raise error.Abort(_("push creates new remote branches: %s!")
319 323 % branchnames,
320 324 hint=_("use 'hg push --new-branch' to create"
321 325 " new remote branches"))
322 326
323 327 # 2. Find heads that we need not warn about
324 328 nowarnheads = _nowarnheads(pushop)
325 329
326 330 # 3. Check for new heads.
327 331 # If there are more heads after the push than before, a suitable
328 332 # error message, depending on unsynced status, is displayed.
329 333 errormsg = None
330 334 # If there is no obsstore, allfuturecommon won't be used, so no
331 335 # need to compute it.
332 336 if repo.obsstore:
333 337 allmissing = set(outgoing.missing)
334 338 cctx = repo.set('%ld', outgoing.common)
335 339 allfuturecommon = set(c.node() for c in cctx)
336 340 allfuturecommon.update(allmissing)
337 341 for branch, heads in sorted(headssum.iteritems()):
338 342 remoteheads, newheads, unsyncedheads = heads
339 343 candidate_newhs = set(newheads)
340 344 # add unsynced data
341 345 if remoteheads is None:
342 346 oldhs = set()
343 347 else:
344 348 oldhs = set(remoteheads)
345 349 oldhs.update(unsyncedheads)
346 350 candidate_newhs.update(unsyncedheads)
347 351 dhs = None # delta heads, the new heads on branch
348 352 if not repo.obsstore:
349 353 discardedheads = set()
350 354 newhs = candidate_newhs
351 355 else:
352 356 newhs, discardedheads = _postprocessobsolete(pushop,
353 357 allfuturecommon,
354 358 candidate_newhs)
355 359 unsynced = sorted(h for h in unsyncedheads if h not in discardedheads)
356 360 if unsynced:
357 361 if None in unsynced:
358 362 # old remote, no heads data
359 363 heads = None
360 364 elif len(unsynced) <= 4 or repo.ui.verbose:
361 365 heads = ' '.join(short(h) for h in unsynced)
362 366 else:
363 367 heads = (' '.join(short(h) for h in unsynced[:4]) +
364 368 ' ' + _("and %s others") % (len(unsynced) - 4))
365 369 if heads is None:
366 370 repo.ui.status(_("remote has heads that are "
367 371 "not known locally\n"))
368 372 elif branch is None:
369 373 repo.ui.status(_("remote has heads that are "
370 374 "not known locally: %s\n") % heads)
371 375 else:
372 376 repo.ui.status(_("remote has heads on branch '%s' that are "
373 377 "not known locally: %s\n") % (branch, heads))
374 378 if remoteheads is None:
375 379 if len(newhs) > 1:
376 380 dhs = list(newhs)
377 381 if errormsg is None:
378 382 errormsg = (_("push creates new branch '%s' "
379 383 "with multiple heads") % (branch))
380 384 hint = _("merge or"
381 385 " see 'hg help push' for details about"
382 386 " pushing new heads")
383 387 elif len(newhs) > len(oldhs):
384 388 # remove bookmarked or existing remote heads from the new heads list
385 389 dhs = sorted(newhs - nowarnheads - oldhs)
386 390 if dhs:
387 391 if errormsg is None:
388 392 if branch not in ('default', None):
389 393 errormsg = _("push creates new remote head %s "
390 394 "on branch '%s'!") % (short(dhs[0]), branch)
391 395 elif repo[dhs[0]].bookmarks():
392 396 errormsg = _("push creates new remote head %s "
393 397 "with bookmark '%s'!") % (
394 398 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
395 399 else:
396 400 errormsg = _("push creates new remote head %s!"
397 401 ) % short(dhs[0])
398 402 if unsyncedheads:
399 403 hint = _("pull and merge or"
400 404 " see 'hg help push' for details about"
401 405 " pushing new heads")
402 406 else:
403 407 hint = _("merge or"
404 408 " see 'hg help push' for details about"
405 409 " pushing new heads")
406 410 if branch is None:
407 411 repo.ui.note(_("new remote heads:\n"))
408 412 else:
409 413 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
410 414 for h in dhs:
411 415 repo.ui.note((" %s\n") % short(h))
412 416 if errormsg:
413 417 raise error.Abort(errormsg, hint=hint)
414 418
415 419 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
416 420 """post process the list of new heads with obsolescence information
417 421
418 422 Exists as a sub-function to contain the complexity and allow extensions to
419 423 experiment with smarter logic.
420 424
421 425 Returns (newheads, discarded_heads) tuple
422 426 """
423 427 # known issue
424 428 #
425 429 # * We "silently" skip processing on all changeset unknown locally
426 430 #
427 431 # * if <nh> is public on the remote, it won't be affected by obsolete
428 432 # marker and a new is created
429 433
430 434 # define various utilities and containers
431 435 repo = pushop.repo
432 436 unfi = repo.unfiltered()
433 437 tonode = unfi.changelog.node
434 438 torev = unfi.changelog.rev
435 439 public = phases.public
436 440 getphase = unfi._phasecache.phase
437 441 ispublic = (lambda r: getphase(unfi, r) == public)
438 442 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore,
439 443 futurecommon)
440 444 successorsmarkers = unfi.obsstore.successors
441 445 newhs = set() # final set of new heads
442 446 discarded = set() # new head of fully replaced branch
443 447
444 448 localcandidate = set() # candidate heads known locally
445 449 unknownheads = set() # candidate heads unknown locally
446 450 for h in candidate_newhs:
447 451 if h in unfi:
448 452 localcandidate.add(h)
449 453 else:
450 454 if successorsmarkers.get(h) is not None:
451 455 msg = ('checkheads: remote head unknown locally has'
452 456 ' local marker: %s\n')
453 457 repo.ui.debug(msg % hex(h))
454 458 unknownheads.add(h)
455 459
456 460 # fast path the simple case
457 461 if len(localcandidate) == 1:
458 462 return unknownheads | set(candidate_newhs), set()
459 463
460 464 # actually process branch replacement
461 465 while localcandidate:
462 466 nh = localcandidate.pop()
463 467 # run this check early to skip the evaluation of the whole branch
464 468 if (nh in futurecommon or ispublic(torev(nh))):
465 469 newhs.add(nh)
466 470 continue
467 471
468 472 # Get all revs/nodes on the branch exclusive to this head
469 473 # (already filtered heads are "ignored"))
470 474 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
471 475 nh, localcandidate, newhs)
472 476 branchnodes = [tonode(r) for r in branchrevs]
473 477
474 478 # The branch won't be hidden on the remote if
475 479 # * any part of it is public,
476 480 # * any part of it is considered part of the result by previous logic,
477 481 # * if we have no markers to push to obsolete it.
478 482 if (any(ispublic(r) for r in branchrevs)
479 483 or any(n in futurecommon for n in branchnodes)
480 484 or any(not hasoutmarker(n) for n in branchnodes)):
481 485 newhs.add(nh)
482 486 else:
483 487 # note: there is a corner case if there is a merge in the branch.
484 488 # we might end up with -more- heads. However, these heads are not
485 489 # "added" by the push, but more by the "removal" on the remote so I
486 490 # think is a okay to ignore them,
487 491 discarded.add(nh)
488 492 newhs |= unknownheads
489 493 return newhs, discarded
490 494
491 495 def pushingmarkerfor(obsstore, pushset, node):
492 496 """true if some markers are to be pushed for node
493 497
494 498 We cannot just look in to the pushed obsmarkers from the pushop because
495 499 discovery might have filtered relevant markers. In addition listing all
496 500 markers relevant to all changesets in the pushed set would be too expensive
497 501 (O(len(repo)))
498 502
499 503 (note: There are cache opportunity in this function. but it would requires
500 504 a two dimensional stack.)
501 505 """
502 506 successorsmarkers = obsstore.successors
503 507 stack = [node]
504 508 seen = set(stack)
505 509 while stack:
506 510 current = stack.pop()
507 511 if current in pushset:
508 512 return True
509 513 markers = successorsmarkers.get(current, ())
510 514 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
511 515 for m in markers:
512 516 nexts = m[1] # successors
513 517 if not nexts: # this is a prune marker
514 518 nexts = m[5] or () # parents
515 519 for n in nexts:
516 520 if n not in seen:
517 521 seen.add(n)
518 522 stack.append(n)
519 523 return False
General Comments 0
You need to be logged in to leave comments. Login now