##// END OF EJS Templates
discovery: re-wrap expression to avoid a black bug...
Augie Fackler -
r43319:6e8582cc default
parent child Browse files
Show More
@@ -1,538 +1,540 b''
1 1 # discovery.py - protocol changeset discovery functions
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11
12 12 from .i18n import _
13 13 from .node import (
14 14 hex,
15 15 nullid,
16 16 short,
17 17 )
18 18
19 19 from . import (
20 20 bookmarks,
21 21 branchmap,
22 22 error,
23 23 phases,
24 24 scmutil,
25 25 setdiscovery,
26 26 treediscovery,
27 27 util,
28 28 )
29 29
30 30 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
31 31 """Return a tuple (common, anyincoming, heads) used to identify the common
32 32 subset of nodes between repo and remote.
33 33
34 34 "common" is a list of (at least) the heads of the common subset.
35 35 "anyincoming" is testable as a boolean indicating if any nodes are missing
36 36 locally. If remote does not support getbundle, this actually is a list of
37 37 roots of the nodes that would be incoming, to be supplied to
38 38 changegroupsubset. No code except for pull should be relying on this fact
39 39 any longer.
40 40 "heads" is either the supplied heads, or else the remote's heads.
41 41 "ancestorsof" if not None, restrict the discovery to a subset defined by
42 42 these nodes. Changeset outside of this set won't be considered (and
43 43 won't appears in "common")
44 44
45 45 If you pass heads and they are all known locally, the response lists just
46 46 these heads in "common" and in "heads".
47 47
48 48 Please use findcommonoutgoing to compute the set of outgoing nodes to give
49 49 extensions a good hook into outgoing.
50 50 """
51 51
52 52 if not remote.capable('getbundle'):
53 53 return treediscovery.findcommonincoming(repo, remote, heads, force)
54 54
55 55 if heads:
56 56 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
57 57 if all(knownnode(h) for h in heads):
58 58 return (heads, False, heads)
59 59
60 60 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
61 61 abortwhenunrelated=not force,
62 62 ancestorsof=ancestorsof)
63 63 common, anyinc, srvheads = res
64 64 return (list(common), anyinc, heads or list(srvheads))
65 65
66 66 class outgoing(object):
67 67 '''Represents the set of nodes present in a local repo but not in a
68 68 (possibly) remote one.
69 69
70 70 Members:
71 71
72 72 missing is a list of all nodes present in local but not in remote.
73 73 common is a list of all nodes shared between the two repos.
74 74 excluded is the list of missing changeset that shouldn't be sent remotely.
75 75 missingheads is the list of heads of missing.
76 76 commonheads is the list of heads of common.
77 77
78 78 The sets are computed on demand from the heads, unless provided upfront
79 79 by discovery.'''
80 80
81 81 def __init__(self, repo, commonheads=None, missingheads=None,
82 82 missingroots=None):
83 83 # at least one of them must not be set
84 84 assert None in (commonheads, missingroots)
85 85 cl = repo.changelog
86 86 if missingheads is None:
87 87 missingheads = cl.heads()
88 88 if missingroots:
89 89 discbases = []
90 90 for n in missingroots:
91 91 discbases.extend([p for p in cl.parents(n) if p != nullid])
92 92 # TODO remove call to nodesbetween.
93 93 # TODO populate attributes on outgoing instance instead of setting
94 94 # discbases.
95 95 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
96 96 included = set(csets)
97 97 missingheads = heads
98 98 commonheads = [n for n in discbases if n not in included]
99 99 elif not commonheads:
100 100 commonheads = [nullid]
101 101 self.commonheads = commonheads
102 102 self.missingheads = missingheads
103 103 self._revlog = cl
104 104 self._common = None
105 105 self._missing = None
106 106 self.excluded = []
107 107
108 108 def _computecommonmissing(self):
109 109 sets = self._revlog.findcommonmissing(self.commonheads,
110 110 self.missingheads)
111 111 self._common, self._missing = sets
112 112
113 113 @util.propertycache
114 114 def common(self):
115 115 if self._common is None:
116 116 self._computecommonmissing()
117 117 return self._common
118 118
119 119 @util.propertycache
120 120 def missing(self):
121 121 if self._missing is None:
122 122 self._computecommonmissing()
123 123 return self._missing
124 124
125 125 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
126 126 commoninc=None, portable=False):
127 127 '''Return an outgoing instance to identify the nodes present in repo but
128 128 not in other.
129 129
130 130 If onlyheads is given, only nodes ancestral to nodes in onlyheads
131 131 (inclusive) are included. If you already know the local repo's heads,
132 132 passing them in onlyheads is faster than letting them be recomputed here.
133 133
134 134 If commoninc is given, it must be the result of a prior call to
135 135 findcommonincoming(repo, other, force) to avoid recomputing it here.
136 136
137 137 If portable is given, compute more conservative common and missingheads,
138 138 to make bundles created from the instance more portable.'''
139 139 # declare an empty outgoing object to be filled later
140 140 og = outgoing(repo, None, None)
141 141
142 142 # get common set if not provided
143 143 if commoninc is None:
144 144 commoninc = findcommonincoming(repo, other, force=force,
145 145 ancestorsof=onlyheads)
146 146 og.commonheads, _any, _hds = commoninc
147 147
148 148 # compute outgoing
149 149 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
150 150 if not mayexclude:
151 151 og.missingheads = onlyheads or repo.heads()
152 152 elif onlyheads is None:
153 153 # use visible heads as it should be cached
154 154 og.missingheads = repo.filtered("served").heads()
155 155 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
156 156 else:
157 157 # compute common, missing and exclude secret stuff
158 158 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
159 159 og._common, allmissing = sets
160 160 og._missing = missing = []
161 161 og.excluded = excluded = []
162 162 for node in allmissing:
163 163 ctx = repo[node]
164 164 if ctx.phase() >= phases.secret or ctx.extinct():
165 165 excluded.append(node)
166 166 else:
167 167 missing.append(node)
168 168 if len(missing) == len(allmissing):
169 169 missingheads = onlyheads
170 170 else: # update missing heads
171 171 missingheads = phases.newheads(repo, onlyheads, excluded)
172 172 og.missingheads = missingheads
173 173 if portable:
174 174 # recompute common and missingheads as if -r<rev> had been given for
175 175 # each head of missing, and --base <rev> for each head of the proper
176 176 # ancestors of missing
177 177 og._computecommonmissing()
178 178 cl = repo.changelog
179 179 missingrevs = set(cl.rev(n) for n in og._missing)
180 180 og._common = set(cl.ancestors(missingrevs)) - missingrevs
181 181 commonheads = set(og.commonheads)
182 182 og.missingheads = [h for h in og.missingheads if h not in commonheads]
183 183
184 184 return og
185 185
186 186 def _headssummary(pushop):
187 187 """compute a summary of branch and heads status before and after push
188 188
189 189 return {'branch': ([remoteheads], [newheads],
190 190 [unsyncedheads], [discardedheads])} mapping
191 191
192 192 - branch: the branch name,
193 193 - remoteheads: the list of remote heads known locally
194 194 None if the branch is new,
195 195 - newheads: the new remote heads (known locally) with outgoing pushed,
196 196 - unsyncedheads: the list of remote heads unknown locally,
197 197 - discardedheads: the list of heads made obsolete by the push.
198 198 """
199 199 repo = pushop.repo.unfiltered()
200 200 remote = pushop.remote
201 201 outgoing = pushop.outgoing
202 202 cl = repo.changelog
203 203 headssum = {}
204 204 missingctx = set()
205 205 # A. Create set of branches involved in the push.
206 206 branches = set()
207 207 for n in outgoing.missing:
208 208 ctx = repo[n]
209 209 missingctx.add(ctx)
210 210 branches.add(ctx.branch())
211 211
212 212 with remote.commandexecutor() as e:
213 213 remotemap = e.callcommand('branchmap', {}).result()
214 214
215 215 knownnode = cl.hasnode # do not use nodemap until it is filtered
216 216 # A. register remote heads of branches which are in outgoing set
217 217 for branch, heads in remotemap.iteritems():
218 218 # don't add head info about branches which we don't have locally
219 219 if branch not in branches:
220 220 continue
221 221 known = []
222 222 unsynced = []
223 223 for h in heads:
224 224 if knownnode(h):
225 225 known.append(h)
226 226 else:
227 227 unsynced.append(h)
228 228 headssum[branch] = (known, list(known), unsynced)
229 229
230 230 # B. add new branch data
231 231 for branch in branches:
232 232 if branch not in headssum:
233 233 headssum[branch] = (None, [], [])
234 234
235 235 # C. Update newmap with outgoing changes.
236 236 # This will possibly add new heads and remove existing ones.
237 237 newmap = branchmap.remotebranchcache((branch, heads[1])
238 238 for branch, heads in headssum.iteritems()
239 239 if heads[0] is not None)
240 240 newmap.update(repo, (ctx.rev() for ctx in missingctx))
241 241 for branch, newheads in newmap.iteritems():
242 242 headssum[branch][1][:] = newheads
243 243 for branch, items in headssum.iteritems():
244 244 for l in items:
245 245 if l is not None:
246 246 l.sort()
247 247 headssum[branch] = items + ([],)
248 248
249 249 # If there are no obsstore, no post processing are needed.
250 250 if repo.obsstore:
251 251 torev = repo.changelog.rev
252 252 futureheads = set(torev(h) for h in outgoing.missingheads)
253 253 futureheads |= set(torev(h) for h in outgoing.commonheads)
254 254 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
255 255 for branch, heads in sorted(headssum.iteritems()):
256 256 remoteheads, newheads, unsyncedheads, placeholder = heads
257 257 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
258 258 headssum[branch] = (remoteheads, sorted(result[0]), unsyncedheads,
259 259 sorted(result[1]))
260 260 return headssum
261 261
262 262 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
263 263 """Compute branchmapsummary for repo without branchmap support"""
264 264
265 265 # 1-4b. old servers: Check for new topological heads.
266 266 # Construct {old,new}map with branch = None (topological branch).
267 267 # (code based on update)
268 268 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
269 269 oldheads = sorted(h for h in remoteheads if knownnode(h))
270 270 # all nodes in outgoing.missing are children of either:
271 271 # - an element of oldheads
272 272 # - another element of outgoing.missing
273 273 # - nullrev
274 274 # This explains why the new head are very simple to compute.
275 275 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
276 276 newheads = sorted(c.node() for c in r)
277 277 # set some unsynced head to issue the "unsynced changes" warning
278 278 if inc:
279 279 unsynced = [None]
280 280 else:
281 281 unsynced = []
282 282 return {None: (oldheads, newheads, unsynced, [])}
283 283
284 284 def _nowarnheads(pushop):
285 285 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
286 286 repo = pushop.repo.unfiltered()
287 287 remote = pushop.remote
288 288 localbookmarks = repo._bookmarks
289 289
290 290 with remote.commandexecutor() as e:
291 291 remotebookmarks = e.callcommand('listkeys', {
292 292 'namespace': 'bookmarks',
293 293 }).result()
294 294
295 295 bookmarkedheads = set()
296 296
297 297 # internal config: bookmarks.pushing
298 298 newbookmarks = [localbookmarks.expandname(b)
299 299 for b in pushop.ui.configlist('bookmarks', 'pushing')]
300 300
301 301 for bm in localbookmarks:
302 302 rnode = remotebookmarks.get(bm)
303 303 if rnode and rnode in repo:
304 304 lctx, rctx = repo[localbookmarks[bm]], repo[rnode]
305 305 if bookmarks.validdest(repo, rctx, lctx):
306 306 bookmarkedheads.add(lctx.node())
307 307 else:
308 308 if bm in newbookmarks and bm not in remotebookmarks:
309 309 bookmarkedheads.add(localbookmarks[bm])
310 310
311 311 return bookmarkedheads
312 312
313 313 def checkheads(pushop):
314 314 """Check that a push won't add any outgoing head
315 315
316 316 raise Abort error and display ui message as needed.
317 317 """
318 318
319 319 repo = pushop.repo.unfiltered()
320 320 remote = pushop.remote
321 321 outgoing = pushop.outgoing
322 322 remoteheads = pushop.remoteheads
323 323 newbranch = pushop.newbranch
324 324 inc = bool(pushop.incoming)
325 325
326 326 # Check for each named branch if we're creating new remote heads.
327 327 # To be a remote head after push, node must be either:
328 328 # - unknown locally
329 329 # - a local outgoing head descended from update
330 330 # - a remote head that's known locally and not
331 331 # ancestral to an outgoing head
332 332 if remoteheads == [nullid]:
333 333 # remote is empty, nothing to check.
334 334 return
335 335
336 336 if remote.capable('branchmap'):
337 337 headssum = _headssummary(pushop)
338 338 else:
339 339 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
340 340 pushop.pushbranchmap = headssum
341 341 newbranches = [branch for branch, heads in headssum.iteritems()
342 342 if heads[0] is None]
343 343 # 1. Check for new branches on the remote.
344 344 if newbranches and not newbranch: # new branch requires --new-branch
345 345 branchnames = ', '.join(sorted(newbranches))
346 346 # Calculate how many of the new branches are closed branches
347 347 closedbranches = set()
348 348 for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
349 349 if isclosed:
350 350 closedbranches.add(tag)
351 351 closedbranches = (closedbranches & set(newbranches))
352 352 if closedbranches:
353 353 errmsg = (_("push creates new remote branches: %s (%d closed)!")
354 354 % (branchnames, len(closedbranches)))
355 355 else:
356 356 errmsg = (_("push creates new remote branches: %s!")% branchnames)
357 357 hint=_("use 'hg push --new-branch' to create new remote branches")
358 358 raise error.Abort(errmsg, hint=hint)
359 359
360 360 # 2. Find heads that we need not warn about
361 361 nowarnheads = _nowarnheads(pushop)
362 362
363 363 # 3. Check for new heads.
364 364 # If there are more heads after the push than before, a suitable
365 365 # error message, depending on unsynced status, is displayed.
366 366 errormsg = None
367 367 for branch, heads in sorted(headssum.iteritems()):
368 368 remoteheads, newheads, unsyncedheads, discardedheads = heads
369 369 # add unsynced data
370 370 if remoteheads is None:
371 371 oldhs = set()
372 372 else:
373 373 oldhs = set(remoteheads)
374 374 oldhs.update(unsyncedheads)
375 375 dhs = None # delta heads, the new heads on branch
376 376 newhs = set(newheads)
377 377 newhs.update(unsyncedheads)
378 378 if unsyncedheads:
379 379 if None in unsyncedheads:
380 380 # old remote, no heads data
381 381 heads = None
382 382 else:
383 383 heads = scmutil.nodesummaries(repo, unsyncedheads)
384 384 if heads is None:
385 385 repo.ui.status(_("remote has heads that are "
386 386 "not known locally\n"))
387 387 elif branch is None:
388 388 repo.ui.status(_("remote has heads that are "
389 389 "not known locally: %s\n") % heads)
390 390 else:
391 391 repo.ui.status(_("remote has heads on branch '%s' that are "
392 392 "not known locally: %s\n") % (branch, heads))
393 393 if remoteheads is None:
394 394 if len(newhs) > 1:
395 395 dhs = list(newhs)
396 396 if errormsg is None:
397 errormsg = (_("push creates new branch '%s' "
398 "with multiple heads") % (branch))
397 errormsg = (
398 _("push creates new branch '%s' with multiple heads") %
399 branch
400 )
399 401 hint = _("merge or"
400 402 " see 'hg help push' for details about"
401 403 " pushing new heads")
402 404 elif len(newhs) > len(oldhs):
403 405 # remove bookmarked or existing remote heads from the new heads list
404 406 dhs = sorted(newhs - nowarnheads - oldhs)
405 407 if dhs:
406 408 if errormsg is None:
407 409 if branch not in ('default', None):
408 410 errormsg = _("push creates new remote head %s "
409 411 "on branch '%s'!") % (short(dhs[0]), branch)
410 412 elif repo[dhs[0]].bookmarks():
411 413 errormsg = _("push creates new remote head %s "
412 414 "with bookmark '%s'!") % (
413 415 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
414 416 else:
415 417 errormsg = _("push creates new remote head %s!"
416 418 ) % short(dhs[0])
417 419 if unsyncedheads:
418 420 hint = _("pull and merge or"
419 421 " see 'hg help push' for details about"
420 422 " pushing new heads")
421 423 else:
422 424 hint = _("merge or"
423 425 " see 'hg help push' for details about"
424 426 " pushing new heads")
425 427 if branch is None:
426 428 repo.ui.note(_("new remote heads:\n"))
427 429 else:
428 430 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
429 431 for h in dhs:
430 432 repo.ui.note((" %s\n") % short(h))
431 433 if errormsg:
432 434 raise error.Abort(errormsg, hint=hint)
433 435
434 436 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
435 437 """post process the list of new heads with obsolescence information
436 438
437 439 Exists as a sub-function to contain the complexity and allow extensions to
438 440 experiment with smarter logic.
439 441
440 442 Returns (newheads, discarded_heads) tuple
441 443 """
442 444 # known issue
443 445 #
444 446 # * We "silently" skip processing on all changeset unknown locally
445 447 #
446 448 # * if <nh> is public on the remote, it won't be affected by obsolete
447 449 # marker and a new is created
448 450
449 451 # define various utilities and containers
450 452 repo = pushop.repo
451 453 unfi = repo.unfiltered()
452 454 tonode = unfi.changelog.node
453 455 torev = unfi.changelog.nodemap.get
454 456 public = phases.public
455 457 getphase = unfi._phasecache.phase
456 458 ispublic = (lambda r: getphase(unfi, r) == public)
457 459 ispushed = (lambda n: torev(n) in futurecommon)
458 460 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
459 461 successorsmarkers = unfi.obsstore.successors
460 462 newhs = set() # final set of new heads
461 463 discarded = set() # new head of fully replaced branch
462 464
463 465 localcandidate = set() # candidate heads known locally
464 466 unknownheads = set() # candidate heads unknown locally
465 467 for h in candidate_newhs:
466 468 if h in unfi:
467 469 localcandidate.add(h)
468 470 else:
469 471 if successorsmarkers.get(h) is not None:
470 472 msg = ('checkheads: remote head unknown locally has'
471 473 ' local marker: %s\n')
472 474 repo.ui.debug(msg % hex(h))
473 475 unknownheads.add(h)
474 476
475 477 # fast path the simple case
476 478 if len(localcandidate) == 1:
477 479 return unknownheads | set(candidate_newhs), set()
478 480
479 481 # actually process branch replacement
480 482 while localcandidate:
481 483 nh = localcandidate.pop()
482 484 # run this check early to skip the evaluation of the whole branch
483 485 if (torev(nh) in futurecommon or ispublic(torev(nh))):
484 486 newhs.add(nh)
485 487 continue
486 488
487 489 # Get all revs/nodes on the branch exclusive to this head
488 490 # (already filtered heads are "ignored"))
489 491 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
490 492 nh, localcandidate, newhs)
491 493 branchnodes = [tonode(r) for r in branchrevs]
492 494
493 495 # The branch won't be hidden on the remote if
494 496 # * any part of it is public,
495 497 # * any part of it is considered part of the result by previous logic,
496 498 # * if we have no markers to push to obsolete it.
497 499 if (any(ispublic(r) for r in branchrevs)
498 500 or any(torev(n) in futurecommon for n in branchnodes)
499 501 or any(not hasoutmarker(n) for n in branchnodes)):
500 502 newhs.add(nh)
501 503 else:
502 504 # note: there is a corner case if there is a merge in the branch.
503 505 # we might end up with -more- heads. However, these heads are not
504 506 # "added" by the push, but more by the "removal" on the remote so I
505 507 # think is a okay to ignore them,
506 508 discarded.add(nh)
507 509 newhs |= unknownheads
508 510 return newhs, discarded
509 511
510 512 def pushingmarkerfor(obsstore, ispushed, node):
511 513 """true if some markers are to be pushed for node
512 514
513 515 We cannot just look in to the pushed obsmarkers from the pushop because
514 516 discovery might have filtered relevant markers. In addition listing all
515 517 markers relevant to all changesets in the pushed set would be too expensive
516 518 (O(len(repo)))
517 519
518 520 (note: There are cache opportunity in this function. but it would requires
519 521 a two dimensional stack.)
520 522 """
521 523 successorsmarkers = obsstore.successors
522 524 stack = [node]
523 525 seen = set(stack)
524 526 while stack:
525 527 current = stack.pop()
526 528 if ispushed(current):
527 529 return True
528 530 markers = successorsmarkers.get(current, ())
529 531 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
530 532 for m in markers:
531 533 nexts = m[1] # successors
532 534 if not nexts: # this is a prune marker
533 535 nexts = m[5] or () # parents
534 536 for n in nexts:
535 537 if n not in seen:
536 538 seen.add(n)
537 539 stack.append(n)
538 540 return False
General Comments 0
You need to be logged in to leave comments. Login now