##// END OF EJS Templates
headssummary: directly feed the function with the 'pushop' object...
marmoute -
r32706:993f58db default
parent child Browse files
Show More
@@ -1,519 +1,522 b''
1 1 # discovery.py - protocol changeset discovery functions
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11
12 12 from .i18n import _
13 13 from .node import (
14 14 hex,
15 15 nullid,
16 16 short,
17 17 )
18 18
19 19 from . import (
20 20 bookmarks,
21 21 branchmap,
22 22 error,
23 23 phases,
24 24 setdiscovery,
25 25 treediscovery,
26 26 util,
27 27 )
28 28
29 29 def findcommonincoming(repo, remote, heads=None, force=False):
30 30 """Return a tuple (common, anyincoming, heads) used to identify the common
31 31 subset of nodes between repo and remote.
32 32
33 33 "common" is a list of (at least) the heads of the common subset.
34 34 "anyincoming" is testable as a boolean indicating if any nodes are missing
35 35 locally. If remote does not support getbundle, this actually is a list of
36 36 roots of the nodes that would be incoming, to be supplied to
37 37 changegroupsubset. No code except for pull should be relying on this fact
38 38 any longer.
39 39 "heads" is either the supplied heads, or else the remote's heads.
40 40
41 41 If you pass heads and they are all known locally, the response lists just
42 42 these heads in "common" and in "heads".
43 43
44 44 Please use findcommonoutgoing to compute the set of outgoing nodes to give
45 45 extensions a good hook into outgoing.
46 46 """
47 47
48 48 if not remote.capable('getbundle'):
49 49 return treediscovery.findcommonincoming(repo, remote, heads, force)
50 50
51 51 if heads:
52 52 allknown = True
53 53 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
54 54 for h in heads:
55 55 if not knownnode(h):
56 56 allknown = False
57 57 break
58 58 if allknown:
59 59 return (heads, False, heads)
60 60
61 61 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
62 62 abortwhenunrelated=not force)
63 63 common, anyinc, srvheads = res
64 64 return (list(common), anyinc, heads or list(srvheads))
65 65
66 66 class outgoing(object):
67 67 '''Represents the set of nodes present in a local repo but not in a
68 68 (possibly) remote one.
69 69
70 70 Members:
71 71
72 72 missing is a list of all nodes present in local but not in remote.
73 73 common is a list of all nodes shared between the two repos.
74 74 excluded is the list of missing changeset that shouldn't be sent remotely.
75 75 missingheads is the list of heads of missing.
76 76 commonheads is the list of heads of common.
77 77
78 78 The sets are computed on demand from the heads, unless provided upfront
79 79 by discovery.'''
80 80
81 81 def __init__(self, repo, commonheads=None, missingheads=None,
82 82 missingroots=None):
83 83 # at least one of them must not be set
84 84 assert None in (commonheads, missingroots)
85 85 cl = repo.changelog
86 86 if missingheads is None:
87 87 missingheads = cl.heads()
88 88 if missingroots:
89 89 discbases = []
90 90 for n in missingroots:
91 91 discbases.extend([p for p in cl.parents(n) if p != nullid])
92 92 # TODO remove call to nodesbetween.
93 93 # TODO populate attributes on outgoing instance instead of setting
94 94 # discbases.
95 95 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
96 96 included = set(csets)
97 97 missingheads = heads
98 98 commonheads = [n for n in discbases if n not in included]
99 99 elif not commonheads:
100 100 commonheads = [nullid]
101 101 self.commonheads = commonheads
102 102 self.missingheads = missingheads
103 103 self._revlog = cl
104 104 self._common = None
105 105 self._missing = None
106 106 self.excluded = []
107 107
108 108 def _computecommonmissing(self):
109 109 sets = self._revlog.findcommonmissing(self.commonheads,
110 110 self.missingheads)
111 111 self._common, self._missing = sets
112 112
113 113 @util.propertycache
114 114 def common(self):
115 115 if self._common is None:
116 116 self._computecommonmissing()
117 117 return self._common
118 118
119 119 @util.propertycache
120 120 def missing(self):
121 121 if self._missing is None:
122 122 self._computecommonmissing()
123 123 return self._missing
124 124
125 125 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
126 126 commoninc=None, portable=False):
127 127 '''Return an outgoing instance to identify the nodes present in repo but
128 128 not in other.
129 129
130 130 If onlyheads is given, only nodes ancestral to nodes in onlyheads
131 131 (inclusive) are included. If you already know the local repo's heads,
132 132 passing them in onlyheads is faster than letting them be recomputed here.
133 133
134 134 If commoninc is given, it must be the result of a prior call to
135 135 findcommonincoming(repo, other, force) to avoid recomputing it here.
136 136
137 137 If portable is given, compute more conservative common and missingheads,
138 138 to make bundles created from the instance more portable.'''
139 139 # declare an empty outgoing object to be filled later
140 140 og = outgoing(repo, None, None)
141 141
142 142 # get common set if not provided
143 143 if commoninc is None:
144 144 commoninc = findcommonincoming(repo, other, force=force)
145 145 og.commonheads, _any, _hds = commoninc
146 146
147 147 # compute outgoing
148 148 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
149 149 if not mayexclude:
150 150 og.missingheads = onlyheads or repo.heads()
151 151 elif onlyheads is None:
152 152 # use visible heads as it should be cached
153 153 og.missingheads = repo.filtered("served").heads()
154 154 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
155 155 else:
156 156 # compute common, missing and exclude secret stuff
157 157 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
158 158 og._common, allmissing = sets
159 159 og._missing = missing = []
160 160 og.excluded = excluded = []
161 161 for node in allmissing:
162 162 ctx = repo[node]
163 163 if ctx.phase() >= phases.secret or ctx.extinct():
164 164 excluded.append(node)
165 165 else:
166 166 missing.append(node)
167 167 if len(missing) == len(allmissing):
168 168 missingheads = onlyheads
169 169 else: # update missing heads
170 170 missingheads = phases.newheads(repo, onlyheads, excluded)
171 171 og.missingheads = missingheads
172 172 if portable:
173 173 # recompute common and missingheads as if -r<rev> had been given for
174 174 # each head of missing, and --base <rev> for each head of the proper
175 175 # ancestors of missing
176 176 og._computecommonmissing()
177 177 cl = repo.changelog
178 178 missingrevs = set(cl.rev(n) for n in og._missing)
179 179 og._common = set(cl.ancestors(missingrevs)) - missingrevs
180 180 commonheads = set(og.commonheads)
181 181 og.missingheads = [h for h in og.missingheads if h not in commonheads]
182 182
183 183 return og
184 184
185 def _headssummary(repo, remote, outgoing):
185 def _headssummary(pushop):
186 186 """compute a summary of branch and heads status before and after push
187 187
188 188 return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
189 189
190 190 - branch: the branch name
191 191 - remoteheads: the list of remote heads known locally
192 192 None if the branch is new
193 193 - newheads: the new remote heads (known locally) with outgoing pushed
194 194 - unsyncedheads: the list of remote heads unknown locally.
195 195 """
196 repo = pushop.repo.unfiltered()
197 remote = pushop.remote
198 outgoing = pushop.outgoing
196 199 cl = repo.changelog
197 200 headssum = {}
198 201 # A. Create set of branches involved in the push.
199 202 branches = set(repo[n].branch() for n in outgoing.missing)
200 203 remotemap = remote.branchmap()
201 204 newbranches = branches - set(remotemap)
202 205 branches.difference_update(newbranches)
203 206
204 207 # A. register remote heads
205 208 remotebranches = set()
206 209 for branch, heads in remote.branchmap().iteritems():
207 210 remotebranches.add(branch)
208 211 known = []
209 212 unsynced = []
210 213 knownnode = cl.hasnode # do not use nodemap until it is filtered
211 214 for h in heads:
212 215 if knownnode(h):
213 216 known.append(h)
214 217 else:
215 218 unsynced.append(h)
216 219 headssum[branch] = (known, list(known), unsynced)
217 220 # B. add new branch data
218 221 missingctx = list(repo[n] for n in outgoing.missing)
219 222 touchedbranches = set()
220 223 for ctx in missingctx:
221 224 branch = ctx.branch()
222 225 touchedbranches.add(branch)
223 226 if branch not in headssum:
224 227 headssum[branch] = (None, [], [])
225 228
226 229 # C drop data about untouched branches:
227 230 for branch in remotebranches - touchedbranches:
228 231 del headssum[branch]
229 232
230 233 # D. Update newmap with outgoing changes.
231 234 # This will possibly add new heads and remove existing ones.
232 235 newmap = branchmap.branchcache((branch, heads[1])
233 236 for branch, heads in headssum.iteritems()
234 237 if heads[0] is not None)
235 238 newmap.update(repo, (ctx.rev() for ctx in missingctx))
236 239 for branch, newheads in newmap.iteritems():
237 240 headssum[branch][1][:] = newheads
238 241 for branch, items in headssum.iteritems():
239 242 for l in items:
240 243 if l is not None:
241 244 l.sort()
242 245 return headssum
243 246
244 247 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
245 248 """Compute branchmapsummary for repo without branchmap support"""
246 249
247 250 # 1-4b. old servers: Check for new topological heads.
248 251 # Construct {old,new}map with branch = None (topological branch).
249 252 # (code based on update)
250 253 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
251 254 oldheads = sorted(h for h in remoteheads if knownnode(h))
252 255 # all nodes in outgoing.missing are children of either:
253 256 # - an element of oldheads
254 257 # - another element of outgoing.missing
255 258 # - nullrev
256 259 # This explains why the new head are very simple to compute.
257 260 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
258 261 newheads = sorted(c.node() for c in r)
259 262 # set some unsynced head to issue the "unsynced changes" warning
260 263 if inc:
261 264 unsynced = [None]
262 265 else:
263 266 unsynced = []
264 267 return {None: (oldheads, newheads, unsynced)}
265 268
266 269 def _nowarnheads(pushop):
267 270 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
268 271 repo = pushop.repo.unfiltered()
269 272 remote = pushop.remote
270 273 localbookmarks = repo._bookmarks
271 274 remotebookmarks = remote.listkeys('bookmarks')
272 275 bookmarkedheads = set()
273 276
274 277 # internal config: bookmarks.pushing
275 278 newbookmarks = [localbookmarks.expandname(b)
276 279 for b in pushop.ui.configlist('bookmarks', 'pushing')]
277 280
278 281 for bm in localbookmarks:
279 282 rnode = remotebookmarks.get(bm)
280 283 if rnode and rnode in repo:
281 284 lctx, rctx = repo[bm], repo[rnode]
282 285 if bookmarks.validdest(repo, rctx, lctx):
283 286 bookmarkedheads.add(lctx.node())
284 287 else:
285 288 if bm in newbookmarks and bm not in remotebookmarks:
286 289 bookmarkedheads.add(repo[bm].node())
287 290
288 291 return bookmarkedheads
289 292
290 293 def checkheads(pushop):
291 294 """Check that a push won't add any outgoing head
292 295
293 296 raise Abort error and display ui message as needed.
294 297 """
295 298
296 299 repo = pushop.repo.unfiltered()
297 300 remote = pushop.remote
298 301 outgoing = pushop.outgoing
299 302 remoteheads = pushop.remoteheads
300 303 newbranch = pushop.newbranch
301 304 inc = bool(pushop.incoming)
302 305
303 306 # Check for each named branch if we're creating new remote heads.
304 307 # To be a remote head after push, node must be either:
305 308 # - unknown locally
306 309 # - a local outgoing head descended from update
307 310 # - a remote head that's known locally and not
308 311 # ancestral to an outgoing head
309 312 if remoteheads == [nullid]:
310 313 # remote is empty, nothing to check.
311 314 return
312 315
313 316 if remote.capable('branchmap'):
314 headssum = _headssummary(repo, remote, outgoing)
317 headssum = _headssummary(pushop)
315 318 else:
316 319 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
317 320 newbranches = [branch for branch, heads in headssum.iteritems()
318 321 if heads[0] is None]
319 322 # 1. Check for new branches on the remote.
320 323 if newbranches and not newbranch: # new branch requires --new-branch
321 324 branchnames = ', '.join(sorted(newbranches))
322 325 raise error.Abort(_("push creates new remote branches: %s!")
323 326 % branchnames,
324 327 hint=_("use 'hg push --new-branch' to create"
325 328 " new remote branches"))
326 329
327 330 # 2. Find heads that we need not warn about
328 331 nowarnheads = _nowarnheads(pushop)
329 332
330 333 # 3. Check for new heads.
331 334 # If there are more heads after the push than before, a suitable
332 335 # error message, depending on unsynced status, is displayed.
333 336 errormsg = None
334 337 # If there are no obsstore, no post-processing are needed.
335 338 if repo.obsstore:
336 339 allmissing = set(outgoing.missing)
337 340 cctx = repo.set('%ld', outgoing.common)
338 341 allfuturecommon = set(c.node() for c in cctx)
339 342 allfuturecommon.update(allmissing)
340 343 for branch, heads in sorted(headssum.iteritems()):
341 344 remoteheads, newheads, unsyncedheads = heads
342 345 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
343 346 newheads = sorted(result[0])
344 347 headssum[branch] = (remoteheads, newheads, unsyncedheads)
345 348 for branch, heads in sorted(headssum.iteritems()):
346 349 remoteheads, newheads, unsyncedheads = heads
347 350 # add unsynced data
348 351 if remoteheads is None:
349 352 oldhs = set()
350 353 else:
351 354 oldhs = set(remoteheads)
352 355 oldhs.update(unsyncedheads)
353 356 dhs = None # delta heads, the new heads on branch
354 357 newhs = set(newheads)
355 358 newhs.update(unsyncedheads)
356 359 if unsyncedheads:
357 360 if None in unsyncedheads:
358 361 # old remote, no heads data
359 362 heads = None
360 363 elif len(unsyncedheads) <= 4 or repo.ui.verbose:
361 364 heads = ' '.join(short(h) for h in unsyncedheads)
362 365 else:
363 366 heads = (' '.join(short(h) for h in unsyncedheads[:4]) +
364 367 ' ' + _("and %s others") % (len(unsyncedheads) - 4))
365 368 if heads is None:
366 369 repo.ui.status(_("remote has heads that are "
367 370 "not known locally\n"))
368 371 elif branch is None:
369 372 repo.ui.status(_("remote has heads that are "
370 373 "not known locally: %s\n") % heads)
371 374 else:
372 375 repo.ui.status(_("remote has heads on branch '%s' that are "
373 376 "not known locally: %s\n") % (branch, heads))
374 377 if remoteheads is None:
375 378 if len(newhs) > 1:
376 379 dhs = list(newhs)
377 380 if errormsg is None:
378 381 errormsg = (_("push creates new branch '%s' "
379 382 "with multiple heads") % (branch))
380 383 hint = _("merge or"
381 384 " see 'hg help push' for details about"
382 385 " pushing new heads")
383 386 elif len(newhs) > len(oldhs):
384 387 # remove bookmarked or existing remote heads from the new heads list
385 388 dhs = sorted(newhs - nowarnheads - oldhs)
386 389 if dhs:
387 390 if errormsg is None:
388 391 if branch not in ('default', None):
389 392 errormsg = _("push creates new remote head %s "
390 393 "on branch '%s'!") % (short(dhs[0]), branch)
391 394 elif repo[dhs[0]].bookmarks():
392 395 errormsg = _("push creates new remote head %s "
393 396 "with bookmark '%s'!") % (
394 397 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
395 398 else:
396 399 errormsg = _("push creates new remote head %s!"
397 400 ) % short(dhs[0])
398 401 if unsyncedheads:
399 402 hint = _("pull and merge or"
400 403 " see 'hg help push' for details about"
401 404 " pushing new heads")
402 405 else:
403 406 hint = _("merge or"
404 407 " see 'hg help push' for details about"
405 408 " pushing new heads")
406 409 if branch is None:
407 410 repo.ui.note(_("new remote heads:\n"))
408 411 else:
409 412 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
410 413 for h in dhs:
411 414 repo.ui.note((" %s\n") % short(h))
412 415 if errormsg:
413 416 raise error.Abort(errormsg, hint=hint)
414 417
415 418 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
416 419 """post process the list of new heads with obsolescence information
417 420
418 421 Exists as a sub-function to contain the complexity and allow extensions to
419 422 experiment with smarter logic.
420 423
421 424 Returns (newheads, discarded_heads) tuple
422 425 """
423 426 # known issue
424 427 #
425 428 # * We "silently" skip processing on all changeset unknown locally
426 429 #
427 430 # * if <nh> is public on the remote, it won't be affected by obsolete
428 431 # marker and a new is created
429 432
430 433 # define various utilities and containers
431 434 repo = pushop.repo
432 435 unfi = repo.unfiltered()
433 436 tonode = unfi.changelog.node
434 437 torev = unfi.changelog.rev
435 438 public = phases.public
436 439 getphase = unfi._phasecache.phase
437 440 ispublic = (lambda r: getphase(unfi, r) == public)
438 441 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore,
439 442 futurecommon)
440 443 successorsmarkers = unfi.obsstore.successors
441 444 newhs = set() # final set of new heads
442 445 discarded = set() # new head of fully replaced branch
443 446
444 447 localcandidate = set() # candidate heads known locally
445 448 unknownheads = set() # candidate heads unknown locally
446 449 for h in candidate_newhs:
447 450 if h in unfi:
448 451 localcandidate.add(h)
449 452 else:
450 453 if successorsmarkers.get(h) is not None:
451 454 msg = ('checkheads: remote head unknown locally has'
452 455 ' local marker: %s\n')
453 456 repo.ui.debug(msg % hex(h))
454 457 unknownheads.add(h)
455 458
456 459 # fast path the simple case
457 460 if len(localcandidate) == 1:
458 461 return unknownheads | set(candidate_newhs), set()
459 462
460 463 # actually process branch replacement
461 464 while localcandidate:
462 465 nh = localcandidate.pop()
463 466 # run this check early to skip the evaluation of the whole branch
464 467 if (nh in futurecommon or ispublic(torev(nh))):
465 468 newhs.add(nh)
466 469 continue
467 470
468 471 # Get all revs/nodes on the branch exclusive to this head
469 472 # (already filtered heads are "ignored"))
470 473 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
471 474 nh, localcandidate, newhs)
472 475 branchnodes = [tonode(r) for r in branchrevs]
473 476
474 477 # The branch won't be hidden on the remote if
475 478 # * any part of it is public,
476 479 # * any part of it is considered part of the result by previous logic,
477 480 # * if we have no markers to push to obsolete it.
478 481 if (any(ispublic(r) for r in branchrevs)
479 482 or any(n in futurecommon for n in branchnodes)
480 483 or any(not hasoutmarker(n) for n in branchnodes)):
481 484 newhs.add(nh)
482 485 else:
483 486 # note: there is a corner case if there is a merge in the branch.
484 487 # we might end up with -more- heads. However, these heads are not
485 488 # "added" by the push, but more by the "removal" on the remote so I
486 489 # think is a okay to ignore them,
487 490 discarded.add(nh)
488 491 newhs |= unknownheads
489 492 return newhs, discarded
490 493
491 494 def pushingmarkerfor(obsstore, pushset, node):
492 495 """true if some markers are to be pushed for node
493 496
494 497 We cannot just look in to the pushed obsmarkers from the pushop because
495 498 discovery might have filtered relevant markers. In addition listing all
496 499 markers relevant to all changesets in the pushed set would be too expensive
497 500 (O(len(repo)))
498 501
499 502 (note: There are cache opportunity in this function. but it would requires
500 503 a two dimensional stack.)
501 504 """
502 505 successorsmarkers = obsstore.successors
503 506 stack = [node]
504 507 seen = set(stack)
505 508 while stack:
506 509 current = stack.pop()
507 510 if current in pushset:
508 511 return True
509 512 markers = successorsmarkers.get(current, ())
510 513 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
511 514 for m in markers:
512 515 nexts = m[1] # successors
513 516 if not nexts: # this is a prune marker
514 517 nexts = m[5] or () # parents
515 518 for n in nexts:
516 519 if n not in seen:
517 520 seen.add(n)
518 521 stack.append(n)
519 522 return False
General Comments 0
You need to be logged in to leave comments. Login now