Show More
@@ -466,7 +466,7 b' def _rebundle(bundlerepo, bundleroots, u' | |||
|
466 | 466 | |
|
467 | 467 | version = b'02' |
|
468 | 468 | outgoing = discovery.outgoing( |
|
469 |
bundlerepo, commonheads=bundleroots, |
|
|
469 | bundlerepo, commonheads=bundleroots, ancestorsof=[unknownhead] | |
|
470 | 470 | ) |
|
471 | 471 | cgstream = changegroup.makestream(bundlerepo, outgoing, version, b'pull') |
|
472 | 472 | cgstream = util.chunkbuffer(cgstream).read() |
@@ -1711,7 +1711,7 b' def _addpartsfromopts(ui, repo, bundler,' | |||
|
1711 | 1711 | b'nbchanges', b'%d' % cg.extras[b'clcount'], mandatory=False |
|
1712 | 1712 | ) |
|
1713 | 1713 | if opts.get(b'phases') and repo.revs( |
|
1714 |
b'%ln and secret()', outgoing. |
|
|
1714 | b'%ln and secret()', outgoing.ancestorsof | |
|
1715 | 1715 | ): |
|
1716 | 1716 | part.addparam( |
|
1717 | 1717 | b'targetphase', b'%d' % phases.secret, mandatory=False |
@@ -1753,7 +1753,7 b' def addparttagsfnodescache(repo, bundler' | |||
|
1753 | 1753 | # consume little memory (1M heads is 40MB) b) we don't want to send the |
|
1754 | 1754 | # part if we don't have entries and knowing if we have entries requires |
|
1755 | 1755 | # cache lookups. |
|
1756 |
for node in outgoing. |
|
|
1756 | for node in outgoing.ancestorsof: | |
|
1757 | 1757 | # Don't compute missing, as this may slow down serving. |
|
1758 | 1758 | fnode = cache.getfnode(node, computemissing=False) |
|
1759 | 1759 | if fnode is not None: |
@@ -1629,7 +1629,7 b' def makestream(' | |||
|
1629 | 1629 | repo = repo.unfiltered() |
|
1630 | 1630 | commonrevs = outgoing.common |
|
1631 | 1631 | csets = outgoing.missing |
|
1632 |
heads = outgoing. |
|
|
1632 | heads = outgoing.ancestorsof | |
|
1633 | 1633 | # We go through the fast path if we get told to, or if all (unfiltered |
|
1634 | 1634 | # heads have been requested (since we then know there all linkrevs will |
|
1635 | 1635 | # be pulled by the client). |
@@ -93,20 +93,17 b' class outgoing(object):' | |||
|
93 | 93 | excluded is the list of missing changeset that shouldn't be sent |
|
94 | 94 | remotely. |
|
95 | 95 | |
|
96 | missingheads is an alias to ancestorsof, but the name is wrong and it | |
|
97 | will be removed | |
|
98 | ||
|
99 | 96 | Some members are computed on demand from the heads, unless provided upfront |
|
100 | 97 | by discovery.''' |
|
101 | 98 | |
|
102 | 99 | def __init__( |
|
103 |
self, repo, commonheads=None, |
|
|
100 | self, repo, commonheads=None, ancestorsof=None, missingroots=None | |
|
104 | 101 | ): |
|
105 | 102 | # at least one of them must not be set |
|
106 | 103 | assert None in (commonheads, missingroots) |
|
107 | 104 | cl = repo.changelog |
|
108 |
if |
|
|
109 |
|
|
|
105 | if ancestorsof is None: | |
|
106 | ancestorsof = cl.heads() | |
|
110 | 107 | if missingroots: |
|
111 | 108 | discbases = [] |
|
112 | 109 | for n in missingroots: |
@@ -114,14 +111,14 b' class outgoing(object):' | |||
|
114 | 111 | # TODO remove call to nodesbetween. |
|
115 | 112 | # TODO populate attributes on outgoing instance instead of setting |
|
116 | 113 | # discbases. |
|
117 |
csets, roots, heads = cl.nodesbetween(missingroots, |
|
|
114 | csets, roots, heads = cl.nodesbetween(missingroots, ancestorsof) | |
|
118 | 115 | included = set(csets) |
|
119 |
|
|
|
116 | ancestorsof = heads | |
|
120 | 117 | commonheads = [n for n in discbases if n not in included] |
|
121 | 118 | elif not commonheads: |
|
122 | 119 | commonheads = [nullid] |
|
123 | 120 | self.commonheads = commonheads |
|
124 | self.missingheads = missingheads | |
|
121 | self.ancestorsof = ancestorsof | |
|
125 | 122 | self._revlog = cl |
|
126 | 123 | self._common = None |
|
127 | 124 | self._missing = None |
@@ -129,7 +126,7 b' class outgoing(object):' | |||
|
129 | 126 | |
|
130 | 127 | def _computecommonmissing(self): |
|
131 | 128 | sets = self._revlog.findcommonmissing( |
|
132 |
self.commonheads, self. |
|
|
129 | self.commonheads, self.ancestorsof | |
|
133 | 130 | ) |
|
134 | 131 | self._common, self._missing = sets |
|
135 | 132 | |
@@ -146,8 +143,15 b' class outgoing(object):' | |||
|
146 | 143 | return self._missing |
|
147 | 144 | |
|
148 | 145 | @property |
|
149 |
def |
|
|
150 | return self.missingheads | |
|
146 | def missingheads(self): | |
|
147 | util.nouideprecwarn( | |
|
148 | b'outgoing.missingheads never contained what the name suggests and ' | |
|
149 | b'was renamed to outgoing.ancestorsof. check your code for ' | |
|
150 | b'correctness.', | |
|
151 | b'5.5', | |
|
152 | stacklevel=2, | |
|
153 | ) | |
|
154 | return self.ancestorsof | |
|
151 | 155 | |
|
152 | 156 | |
|
153 | 157 | def findcommonoutgoing( |
@@ -163,7 +167,7 b' def findcommonoutgoing(' | |||
|
163 | 167 | If commoninc is given, it must be the result of a prior call to |
|
164 | 168 | findcommonincoming(repo, other, force) to avoid recomputing it here. |
|
165 | 169 | |
|
166 |
If portable is given, compute more conservative common and |
|
|
170 | If portable is given, compute more conservative common and ancestorsof, | |
|
167 | 171 | to make bundles created from the instance more portable.''' |
|
168 | 172 | # declare an empty outgoing object to be filled later |
|
169 | 173 | og = outgoing(repo, None, None) |
@@ -178,10 +182,10 b' def findcommonoutgoing(' | |||
|
178 | 182 | # compute outgoing |
|
179 | 183 | mayexclude = repo._phasecache.phaseroots[phases.secret] or repo.obsstore |
|
180 | 184 | if not mayexclude: |
|
181 |
og. |
|
|
185 | og.ancestorsof = onlyheads or repo.heads() | |
|
182 | 186 | elif onlyheads is None: |
|
183 | 187 | # use visible heads as it should be cached |
|
184 |
og. |
|
|
188 | og.ancestorsof = repo.filtered(b"served").heads() | |
|
185 | 189 | og.excluded = [ctx.node() for ctx in repo.set(b'secret() or extinct()')] |
|
186 | 190 | else: |
|
187 | 191 | # compute common, missing and exclude secret stuff |
@@ -196,12 +200,12 b' def findcommonoutgoing(' | |||
|
196 | 200 | else: |
|
197 | 201 | missing.append(node) |
|
198 | 202 | if len(missing) == len(allmissing): |
|
199 |
|
|
|
203 | ancestorsof = onlyheads | |
|
200 | 204 | else: # update missing heads |
|
201 |
|
|
|
202 | og.missingheads = missingheads | |
|
205 | ancestorsof = phases.newheads(repo, onlyheads, excluded) | |
|
206 | og.ancestorsof = ancestorsof | |
|
203 | 207 | if portable: |
|
204 |
# recompute common and |
|
|
208 | # recompute common and ancestorsof as if -r<rev> had been given for | |
|
205 | 209 | # each head of missing, and --base <rev> for each head of the proper |
|
206 | 210 | # ancestors of missing |
|
207 | 211 | og._computecommonmissing() |
@@ -209,7 +213,7 b' def findcommonoutgoing(' | |||
|
209 | 213 | missingrevs = {cl.rev(n) for n in og._missing} |
|
210 | 214 | og._common = set(cl.ancestors(missingrevs)) - missingrevs |
|
211 | 215 | commonheads = set(og.commonheads) |
|
212 |
og. |
|
|
216 | og.ancestorsof = [h for h in og.ancestorsof if h not in commonheads] | |
|
213 | 217 | |
|
214 | 218 | return og |
|
215 | 219 | |
@@ -282,7 +286,7 b' def _headssummary(pushop):' | |||
|
282 | 286 | # If there are no obsstore, no post processing are needed. |
|
283 | 287 | if repo.obsstore: |
|
284 | 288 | torev = repo.changelog.rev |
|
285 |
futureheads = {torev(h) for h in outgoing. |
|
|
289 | futureheads = {torev(h) for h in outgoing.ancestorsof} | |
|
286 | 290 | futureheads |= {torev(h) for h in outgoing.commonheads} |
|
287 | 291 | allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True) |
|
288 | 292 | for branch, heads in sorted(pycompat.iteritems(headssum)): |
@@ -503,7 +503,7 b' class pushoperation(object):' | |||
|
503 | 503 | @util.propertycache |
|
504 | 504 | def futureheads(self): |
|
505 | 505 | """future remote heads if the changeset push succeeds""" |
|
506 |
return self.outgoing. |
|
|
506 | return self.outgoing.ancestorsof | |
|
507 | 507 | |
|
508 | 508 | @util.propertycache |
|
509 | 509 | def fallbackheads(self): |
@@ -512,20 +512,20 b' class pushoperation(object):' | |||
|
512 | 512 | # not target to push, all common are relevant |
|
513 | 513 | return self.outgoing.commonheads |
|
514 | 514 | unfi = self.repo.unfiltered() |
|
515 |
# I want cheads = heads(:: |
|
|
516 |
# ( |
|
|
515 | # I want cheads = heads(::ancestorsof and ::commonheads) | |
|
516 | # (ancestorsof is revs with secret changeset filtered out) | |
|
517 | 517 | # |
|
518 | 518 | # This can be expressed as: |
|
519 |
# cheads = ( ( |
|
|
520 |
# + (commonheads and :: |
|
|
519 | # cheads = ( (ancestorsof and ::commonheads) | |
|
520 | # + (commonheads and ::ancestorsof))" | |
|
521 | 521 | # ) |
|
522 | 522 | # |
|
523 | 523 | # while trying to push we already computed the following: |
|
524 | 524 | # common = (::commonheads) |
|
525 |
# missing = ((commonheads:: |
|
|
525 | # missing = ((commonheads::ancestorsof) - commonheads) | |
|
526 | 526 | # |
|
527 | 527 | # We can pick: |
|
528 |
# * |
|
|
528 | # * ancestorsof part of common (::commonheads) | |
|
529 | 529 | common = self.outgoing.common |
|
530 | 530 | rev = self.repo.changelog.index.rev |
|
531 | 531 | cheads = [node for node in self.revs if rev(node) in common] |
@@ -918,7 +918,7 b' def _pushcheckoutgoing(pushop):' | |||
|
918 | 918 | # obsolete or unstable changeset in missing, at |
|
919 | 919 | # least one of the missinghead will be obsolete or |
|
920 | 920 | # unstable. So checking heads only is ok |
|
921 |
for node in outgoing. |
|
|
921 | for node in outgoing.ancestorsof: | |
|
922 | 922 | ctx = unfi[node] |
|
923 | 923 | if ctx.obsolete(): |
|
924 | 924 | raise error.Abort(mso % ctx) |
@@ -969,7 +969,7 b' def _pushb2ctxcheckheads(pushop, bundler' | |||
|
969 | 969 | """ |
|
970 | 970 | # * 'force' do not check for push race, |
|
971 | 971 | # * if we don't push anything, there are nothing to check. |
|
972 |
if not pushop.force and pushop.outgoing. |
|
|
972 | if not pushop.force and pushop.outgoing.ancestorsof: | |
|
973 | 973 | allowunrelated = b'related' in bundler.capabilities.get( |
|
974 | 974 | b'checkheads', () |
|
975 | 975 | ) |
@@ -412,13 +412,13 b' class locallegacypeer(localpeer):' | |||
|
412 | 412 | |
|
413 | 413 | def changegroup(self, nodes, source): |
|
414 | 414 | outgoing = discovery.outgoing( |
|
415 |
self._repo, missingroots=nodes, |
|
|
415 | self._repo, missingroots=nodes, ancestorsof=self._repo.heads() | |
|
416 | 416 | ) |
|
417 | 417 | return changegroup.makechangegroup(self._repo, outgoing, b'01', source) |
|
418 | 418 | |
|
419 | 419 | def changegroupsubset(self, bases, heads, source): |
|
420 | 420 | outgoing = discovery.outgoing( |
|
421 |
self._repo, missingroots=bases, |
|
|
421 | self._repo, missingroots=bases, ancestorsof=heads | |
|
422 | 422 | ) |
|
423 | 423 | return changegroup.makechangegroup(self._repo, outgoing, b'01', source) |
|
424 | 424 |
@@ -66,7 +66,7 b' def backupbundle(' | |||
|
66 | 66 | else: |
|
67 | 67 | bundletype = b"HG10UN" |
|
68 | 68 | |
|
69 |
outgoing = discovery.outgoing(repo, missingroots=bases, |
|
|
69 | outgoing = discovery.outgoing(repo, missingroots=bases, ancestorsof=heads) | |
|
70 | 70 | contentopts = { |
|
71 | 71 | b'cg.version': cgversion, |
|
72 | 72 | b'obsolescence': obsolescence, |
@@ -162,7 +162,7 b' class shelvedfile(object):' | |||
|
162 | 162 | repo = self.repo.unfiltered() |
|
163 | 163 | |
|
164 | 164 | outgoing = discovery.outgoing( |
|
165 |
repo, missingroots=bases, |
|
|
165 | repo, missingroots=bases, ancestorsof=[node] | |
|
166 | 166 | ) |
|
167 | 167 | cg = changegroup.makechangegroup(repo, outgoing, cgversion, b'shelve') |
|
168 | 168 |
@@ -339,7 +339,7 b' def capabilities(repo, proto):' | |||
|
339 | 339 | def changegroup(repo, proto, roots): |
|
340 | 340 | nodes = wireprototypes.decodelist(roots) |
|
341 | 341 | outgoing = discovery.outgoing( |
|
342 |
repo, missingroots=nodes, |
|
|
342 | repo, missingroots=nodes, ancestorsof=repo.heads() | |
|
343 | 343 | ) |
|
344 | 344 | cg = changegroupmod.makechangegroup(repo, outgoing, b'01', b'serve') |
|
345 | 345 | gen = iter(lambda: cg.read(32768), b'') |
@@ -350,7 +350,7 b' def changegroup(repo, proto, roots):' | |||
|
350 | 350 | def changegroupsubset(repo, proto, bases, heads): |
|
351 | 351 | bases = wireprototypes.decodelist(bases) |
|
352 | 352 | heads = wireprototypes.decodelist(heads) |
|
353 |
outgoing = discovery.outgoing(repo, missingroots=bases, |
|
|
353 | outgoing = discovery.outgoing(repo, missingroots=bases, ancestorsof=heads) | |
|
354 | 354 | cg = changegroupmod.makechangegroup(repo, outgoing, b'01', b'serve') |
|
355 | 355 | gen = iter(lambda: cg.read(32768), b'') |
|
356 | 356 | return wireprototypes.streamres(gen=gen) |
General Comments 0
You need to be logged in to leave comments.
Login now