##// END OF EJS Templates
discovery: rename `srvheads` to `knownsrvheads`...
Georges Racinet -
r42044:82884bbf default
parent child Browse files
Show More
@@ -1,356 +1,356 b''
1 1 # setdiscovery.py - improved discovery of common nodeset for mercurial
2 2 #
3 3 # Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
4 4 # and Peter Arrenbrecht <peter@arrenbrecht.ch>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8 """
9 9 Algorithm works in the following way. You have two repository: local and
10 10 remote. They both contains a DAG of changelists.
11 11
12 12 The goal of the discovery protocol is to find one set of node *common*,
13 13 the set of nodes shared by local and remote.
14 14
15 15 One of the issue with the original protocol was latency, it could
16 16 potentially require lots of roundtrips to discover that the local repo was a
17 17 subset of remote (which is a very common case, you usually have few changes
18 18 compared to upstream, while upstream probably had lots of development).
19 19
20 20 The new protocol only requires one interface for the remote repo: `known()`,
21 21 which given a set of changelists tells you if they are present in the DAG.
22 22
23 23 The algorithm then works as follow:
24 24
25 25 - We will be using three sets, `common`, `missing`, `unknown`. Originally
26 26 all nodes are in `unknown`.
27 27 - Take a sample from `unknown`, call `remote.known(sample)`
28 28 - For each node that remote knows, move it and all its ancestors to `common`
29 29 - For each node that remote doesn't know, move it and all its descendants
30 30 to `missing`
31 31 - Iterate until `unknown` is empty
32 32
33 33 There are a couple optimizations, first is instead of starting with a random
34 34 sample of missing, start by sending all heads, in the case where the local
35 35 repo is a subset, you computed the answer in one round trip.
36 36
37 37 Then you can do something similar to the bisecting strategy used when
38 38 finding faulty changesets. Instead of random samples, you can try picking
39 39 nodes that will maximize the number of nodes that will be
40 40 classified with it (since all ancestors or descendants will be marked as well).
41 41 """
42 42
43 43 from __future__ import absolute_import
44 44
45 45 import collections
46 46 import random
47 47
48 48 from .i18n import _
49 49 from .node import (
50 50 nullid,
51 51 nullrev,
52 52 )
53 53 from . import (
54 54 error,
55 55 util,
56 56 )
57 57
58 58 def _updatesample(revs, heads, sample, parentfn, quicksamplesize=0):
59 59 """update an existing sample to match the expected size
60 60
61 61 The sample is updated with revs exponentially distant from each head of the
62 62 <revs> set. (H~1, H~2, H~4, H~8, etc).
63 63
64 64 If a target size is specified, the sampling will stop once this size is
65 65 reached. Otherwise sampling will happen until roots of the <revs> set are
66 66 reached.
67 67
68 68 :revs: set of revs we want to discover (if None, assume the whole dag)
69 69 :heads: set of DAG head revs
70 70 :sample: a sample to update
71 71 :parentfn: a callable to resolve parents for a revision
72 72 :quicksamplesize: optional target size of the sample"""
73 73 dist = {}
74 74 visit = collections.deque(heads)
75 75 seen = set()
76 76 factor = 1
77 77 while visit:
78 78 curr = visit.popleft()
79 79 if curr in seen:
80 80 continue
81 81 d = dist.setdefault(curr, 1)
82 82 if d > factor:
83 83 factor *= 2
84 84 if d == factor:
85 85 sample.add(curr)
86 86 if quicksamplesize and (len(sample) >= quicksamplesize):
87 87 return
88 88 seen.add(curr)
89 89
90 90 for p in parentfn(curr):
91 91 if p != nullrev and (not revs or p in revs):
92 92 dist.setdefault(p, d + 1)
93 93 visit.append(p)
94 94
95 95 def _takequicksample(repo, headrevs, revs, size):
96 96 """takes a quick sample of size <size>
97 97
98 98 It is meant for initial sampling and focuses on querying heads and close
99 99 ancestors of heads.
100 100
101 101 :dag: a dag object
102 102 :headrevs: set of head revisions in local DAG to consider
103 103 :revs: set of revs to discover
104 104 :size: the maximum size of the sample"""
105 105 if len(revs) <= size:
106 106 return list(revs)
107 107 sample = set(repo.revs('heads(%ld)', revs))
108 108
109 109 if len(sample) >= size:
110 110 return _limitsample(sample, size)
111 111
112 112 _updatesample(None, headrevs, sample, repo.changelog.parentrevs,
113 113 quicksamplesize=size)
114 114 return sample
115 115
116 116 def _takefullsample(repo, headrevs, revs, size):
117 117 if len(revs) <= size:
118 118 return list(revs)
119 119 sample = set(repo.revs('heads(%ld)', revs))
120 120
121 121 # update from heads
122 122 revsheads = set(repo.revs('heads(%ld)', revs))
123 123 _updatesample(revs, revsheads, sample, repo.changelog.parentrevs)
124 124
125 125 # update from roots
126 126 revsroots = set(repo.revs('roots(%ld)', revs))
127 127
128 128 # _updatesample() essentially does interaction over revisions to look up
129 129 # their children. This lookup is expensive and doing it in a loop is
130 130 # quadratic. We precompute the children for all relevant revisions and
131 131 # make the lookup in _updatesample() a simple dict lookup.
132 132 #
133 133 # Because this function can be called multiple times during discovery, we
134 134 # may still perform redundant work and there is room to optimize this by
135 135 # keeping a persistent cache of children across invocations.
136 136 children = {}
137 137
138 138 parentrevs = repo.changelog.parentrevs
139 139 for rev in repo.changelog.revs(start=min(revsroots)):
140 140 # Always ensure revision has an entry so we don't need to worry about
141 141 # missing keys.
142 142 children.setdefault(rev, [])
143 143
144 144 for prev in parentrevs(rev):
145 145 if prev == nullrev:
146 146 continue
147 147
148 148 children.setdefault(prev, []).append(rev)
149 149
150 150 _updatesample(revs, revsroots, sample, children.__getitem__)
151 151 assert sample
152 152 sample = _limitsample(sample, size)
153 153 if len(sample) < size:
154 154 more = size - len(sample)
155 155 sample.update(random.sample(list(revs - sample), more))
156 156 return sample
157 157
158 158 def _limitsample(sample, desiredlen):
159 159 """return a random subset of sample of at most desiredlen item"""
160 160 if len(sample) > desiredlen:
161 161 sample = set(random.sample(sample, desiredlen))
162 162 return sample
163 163
164 164 class partialdiscovery(object):
165 165 """an object representing ongoing discovery
166 166
167 167 Feed with data from the remote repository, this object keep track of the
168 168 current set of changeset in various states:
169 169
170 170 - common: revs also known remotely
171 171 - undecided: revs we don't have information on yet
172 172 - missing: revs missing remotely
173 173 (all tracked revisions are known locally)
174 174 """
175 175
176 176 def __init__(self, repo, targetheads):
177 177 self._repo = repo
178 178 self._targetheads = targetheads
179 179 self._common = repo.changelog.incrementalmissingrevs()
180 180 self._undecided = None
181 181 self.missing = set()
182 182
183 183 def addcommons(self, commons):
184 184 """registrer nodes known as common"""
185 185 self._common.addbases(commons)
186 186 if self._undecided is not None:
187 187 self._common.removeancestorsfrom(self._undecided)
188 188
189 189 def addmissings(self, missings):
190 190 """registrer some nodes as missing"""
191 191 newmissing = self._repo.revs('%ld::%ld', missings, self.undecided)
192 192 if newmissing:
193 193 self.missing.update(newmissing)
194 194 self.undecided.difference_update(newmissing)
195 195
196 196 def addinfo(self, sample):
197 197 """consume an iterable of (rev, known) tuples"""
198 198 common = set()
199 199 missing = set()
200 200 for rev, known in sample:
201 201 if known:
202 202 common.add(rev)
203 203 else:
204 204 missing.add(rev)
205 205 if common:
206 206 self.addcommons(common)
207 207 if missing:
208 208 self.addmissings(missing)
209 209
210 210 def hasinfo(self):
211 211 """return True is we have any clue about the remote state"""
212 212 return self._common.hasbases()
213 213
214 214 def iscomplete(self):
215 215 """True if all the necessary data have been gathered"""
216 216 return self._undecided is not None and not self._undecided
217 217
218 218 @property
219 219 def undecided(self):
220 220 if self._undecided is not None:
221 221 return self._undecided
222 222 self._undecided = set(self._common.missingancestors(self._targetheads))
223 223 return self._undecided
224 224
225 225 def commonheads(self):
226 226 """the heads of the known common set"""
227 227 # heads(common) == heads(common.bases) since common represents
228 228 # common.bases and all its ancestors
229 229 return self._common.basesheads()
230 230
231 231 def findcommonheads(ui, local, remote,
232 232 initialsamplesize=100,
233 233 fullsamplesize=200,
234 234 abortwhenunrelated=True,
235 235 ancestorsof=None):
236 236 '''Return a tuple (common, anyincoming, remoteheads) used to identify
237 237 missing nodes from or in remote.
238 238 '''
239 239 start = util.timer()
240 240
241 241 roundtrips = 0
242 242 cl = local.changelog
243 243 clnode = cl.node
244 244 clrev = cl.rev
245 245
246 246 if ancestorsof is not None:
247 247 ownheads = [clrev(n) for n in ancestorsof]
248 248 else:
249 249 ownheads = [rev for rev in cl.headrevs() if rev != nullrev]
250 250
251 251 # early exit if we know all the specified remote heads already
252 252 ui.debug("query 1; heads\n")
253 253 roundtrips += 1
254 254 sample = _limitsample(ownheads, initialsamplesize)
255 255 # indices between sample and externalized version must match
256 256 sample = list(sample)
257 257
258 258 with remote.commandexecutor() as e:
259 259 fheads = e.callcommand('heads', {})
260 260 fknown = e.callcommand('known', {
261 261 'nodes': [clnode(r) for r in sample],
262 262 })
263 263
264 264 srvheadhashes, yesno = fheads.result(), fknown.result()
265 265
266 266 if cl.tip() == nullid:
267 267 if srvheadhashes != [nullid]:
268 268 return [nullid], True, srvheadhashes
269 269 return [nullid], False, []
270 270
271 271 # start actual discovery (we note this before the next "if" for
272 272 # compatibility reasons)
273 273 ui.status(_("searching for changes\n"))
274 274
275 srvheads = []
275 knownsrvheads = [] # revnos of remote heads that are known locally
276 276 for node in srvheadhashes:
277 277 if node == nullid:
278 278 continue
279 279
280 280 try:
281 srvheads.append(clrev(node))
281 knownsrvheads.append(clrev(node))
282 282 # Catches unknown and filtered nodes.
283 283 except error.LookupError:
284 284 continue
285 285
286 if len(srvheads) == len(srvheadhashes):
286 if len(knownsrvheads) == len(srvheadhashes):
287 287 ui.debug("all remote heads known locally\n")
288 288 return srvheadhashes, False, srvheadhashes
289 289
290 290 if len(sample) == len(ownheads) and all(yesno):
291 291 ui.note(_("all local heads known remotely\n"))
292 292 ownheadhashes = [clnode(r) for r in ownheads]
293 293 return ownheadhashes, True, srvheadhashes
294 294
295 295 # full blown discovery
296 296
297 297 disco = partialdiscovery(local, ownheads)
298 298 # treat remote heads (and maybe own heads) as a first implicit sample
299 299 # response
300 disco.addcommons(srvheads)
300 disco.addcommons(knownsrvheads)
301 301 disco.addinfo(zip(sample, yesno))
302 302
303 303 full = False
304 304 progress = ui.makeprogress(_('searching'), unit=_('queries'))
305 305 while not disco.iscomplete():
306 306
307 307 if full or disco.hasinfo():
308 308 if full:
309 309 ui.note(_("sampling from both directions\n"))
310 310 else:
311 311 ui.debug("taking initial sample\n")
312 312 samplefunc = _takefullsample
313 313 targetsize = fullsamplesize
314 314 else:
315 315 # use even cheaper initial sample
316 316 ui.debug("taking quick initial sample\n")
317 317 samplefunc = _takequicksample
318 318 targetsize = initialsamplesize
319 319 sample = samplefunc(local, ownheads, disco.undecided, targetsize)
320 320
321 321 roundtrips += 1
322 322 progress.update(roundtrips)
323 323 ui.debug("query %i; still undecided: %i, sample size is: %i\n"
324 324 % (roundtrips, len(disco.undecided), len(sample)))
325 325 # indices between sample and externalized version must match
326 326 sample = list(sample)
327 327
328 328 with remote.commandexecutor() as e:
329 329 yesno = e.callcommand('known', {
330 330 'nodes': [clnode(r) for r in sample],
331 331 }).result()
332 332
333 333 full = True
334 334
335 335 disco.addinfo(zip(sample, yesno))
336 336
337 337 result = disco.commonheads()
338 338 elapsed = util.timer() - start
339 339 progress.complete()
340 340 ui.debug("%d total queries in %.4fs\n" % (roundtrips, elapsed))
341 341 msg = ('found %d common and %d unknown server heads,'
342 342 ' %d roundtrips in %.4fs\n')
343 missing = set(result) - set(srvheads)
343 missing = set(result) - set(knownsrvheads)
344 344 ui.log('discovery', msg, len(result), len(missing), roundtrips,
345 345 elapsed)
346 346
347 347 if not result and srvheadhashes != [nullid]:
348 348 if abortwhenunrelated:
349 349 raise error.Abort(_("repository is unrelated"))
350 350 else:
351 351 ui.warn(_("warning: repository is unrelated\n"))
352 352 return ({nullid}, True, srvheadhashes,)
353 353
354 354 anyincoming = (srvheadhashes != [nullid])
355 355 result = {clnode(r) for r in result}
356 356 return result, anyincoming, srvheadhashes
General Comments 0
You need to be logged in to leave comments. Login now