##// END OF EJS Templates
discovery: improve partial discovery documentation...
Boris Feld -
r41208:3dcc9658 default
parent child Browse files
Show More
@@ -1,360 +1,361
1 1 # setdiscovery.py - improved discovery of common nodeset for mercurial
2 2 #
3 3 # Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
4 4 # and Peter Arrenbrecht <peter@arrenbrecht.ch>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8 """
9 9 Algorithm works in the following way. You have two repository: local and
10 10 remote. They both contains a DAG of changelists.
11 11
12 12 The goal of the discovery protocol is to find one set of node *common*,
13 13 the set of nodes shared by local and remote.
14 14
15 15 One of the issue with the original protocol was latency, it could
16 16 potentially require lots of roundtrips to discover that the local repo was a
17 17 subset of remote (which is a very common case, you usually have few changes
18 18 compared to upstream, while upstream probably had lots of development).
19 19
20 20 The new protocol only requires one interface for the remote repo: `known()`,
21 21 which given a set of changelists tells you if they are present in the DAG.
22 22
23 23 The algorithm then works as follow:
24 24
25 25 - We will be using three sets, `common`, `missing`, `unknown`. Originally
26 26 all nodes are in `unknown`.
27 27 - Take a sample from `unknown`, call `remote.known(sample)`
28 28 - For each node that remote knows, move it and all its ancestors to `common`
29 29 - For each node that remote doesn't know, move it and all its descendants
30 30 to `missing`
31 31 - Iterate until `unknown` is empty
32 32
33 33 There are a couple optimizations, first is instead of starting with a random
34 34 sample of missing, start by sending all heads, in the case where the local
35 35 repo is a subset, you computed the answer in one round trip.
36 36
37 37 Then you can do something similar to the bisecting strategy used when
38 38 finding faulty changesets. Instead of random samples, you can try picking
39 39 nodes that will maximize the number of nodes that will be
40 40 classified with it (since all ancestors or descendants will be marked as well).
41 41 """
42 42
43 43 from __future__ import absolute_import
44 44
45 45 import collections
46 46 import random
47 47
48 48 from .i18n import _
49 49 from .node import (
50 50 nullid,
51 51 nullrev,
52 52 )
53 53 from . import (
54 54 error,
55 55 util,
56 56 )
57 57
58 58 def _updatesample(revs, heads, sample, parentfn, quicksamplesize=0):
59 59 """update an existing sample to match the expected size
60 60
61 61 The sample is updated with revs exponentially distant from each head of the
62 62 <revs> set. (H~1, H~2, H~4, H~8, etc).
63 63
64 64 If a target size is specified, the sampling will stop once this size is
65 65 reached. Otherwise sampling will happen until roots of the <revs> set are
66 66 reached.
67 67
68 68 :revs: set of revs we want to discover (if None, assume the whole dag)
69 69 :heads: set of DAG head revs
70 70 :sample: a sample to update
71 71 :parentfn: a callable to resolve parents for a revision
72 72 :quicksamplesize: optional target size of the sample"""
73 73 dist = {}
74 74 visit = collections.deque(heads)
75 75 seen = set()
76 76 factor = 1
77 77 while visit:
78 78 curr = visit.popleft()
79 79 if curr in seen:
80 80 continue
81 81 d = dist.setdefault(curr, 1)
82 82 if d > factor:
83 83 factor *= 2
84 84 if d == factor:
85 85 sample.add(curr)
86 86 if quicksamplesize and (len(sample) >= quicksamplesize):
87 87 return
88 88 seen.add(curr)
89 89
90 90 for p in parentfn(curr):
91 91 if p != nullrev and (not revs or p in revs):
92 92 dist.setdefault(p, d + 1)
93 93 visit.append(p)
94 94
95 95 def _takequicksample(repo, headrevs, revs, size):
96 96 """takes a quick sample of size <size>
97 97
98 98 It is meant for initial sampling and focuses on querying heads and close
99 99 ancestors of heads.
100 100
101 101 :dag: a dag object
102 102 :headrevs: set of head revisions in local DAG to consider
103 103 :revs: set of revs to discover
104 104 :size: the maximum size of the sample"""
105 105 if len(revs) <= size:
106 106 return list(revs)
107 107 sample = set(repo.revs('heads(%ld)', revs))
108 108
109 109 if len(sample) >= size:
110 110 return _limitsample(sample, size)
111 111
112 112 _updatesample(None, headrevs, sample, repo.changelog.parentrevs,
113 113 quicksamplesize=size)
114 114 return sample
115 115
116 116 def _takefullsample(repo, headrevs, revs, size):
117 117 if len(revs) <= size:
118 118 return list(revs)
119 119 sample = set(repo.revs('heads(%ld)', revs))
120 120
121 121 # update from heads
122 122 revsheads = set(repo.revs('heads(%ld)', revs))
123 123 _updatesample(revs, revsheads, sample, repo.changelog.parentrevs)
124 124
125 125 # update from roots
126 126 revsroots = set(repo.revs('roots(%ld)', revs))
127 127
128 128 # _updatesample() essentially does interaction over revisions to look up
129 129 # their children. This lookup is expensive and doing it in a loop is
130 130 # quadratic. We precompute the children for all relevant revisions and
131 131 # make the lookup in _updatesample() a simple dict lookup.
132 132 #
133 133 # Because this function can be called multiple times during discovery, we
134 134 # may still perform redundant work and there is room to optimize this by
135 135 # keeping a persistent cache of children across invocations.
136 136 children = {}
137 137
138 138 parentrevs = repo.changelog.parentrevs
139 139 for rev in repo.changelog.revs(start=min(revsroots)):
140 140 # Always ensure revision has an entry so we don't need to worry about
141 141 # missing keys.
142 142 children.setdefault(rev, [])
143 143
144 144 for prev in parentrevs(rev):
145 145 if prev == nullrev:
146 146 continue
147 147
148 148 children.setdefault(prev, []).append(rev)
149 149
150 150 _updatesample(revs, revsroots, sample, children.__getitem__)
151 151 assert sample
152 152 sample = _limitsample(sample, size)
153 153 if len(sample) < size:
154 154 more = size - len(sample)
155 155 sample.update(random.sample(list(revs - sample), more))
156 156 return sample
157 157
158 158 def _limitsample(sample, desiredlen):
159 159 """return a random subset of sample of at most desiredlen item"""
160 160 if len(sample) > desiredlen:
161 161 sample = set(random.sample(sample, desiredlen))
162 162 return sample
163 163
164 164 class partialdiscovery(object):
165 165 """an object representing ongoing discovery
166 166
167 167 Feed with data from the remote repository, this object keep track of the
168 168 current set of changeset in various states:
169 169
170 - common: own nodes I know we both know
171 - undecided: own nodes where I don't know if remote knows them
172 - missing: own nodes I know remote lacks
170 - common: revs also known remotely
171 - undecided: revs we don't have information on yet
172 - missing: revs missing remotely
173 (all tracked revisions are known locally)
173 174 """
174 175
175 176 def __init__(self, repo, targetheads):
176 177 self._repo = repo
177 178 self._targetheads = targetheads
178 179 self._common = repo.changelog.incrementalmissingrevs()
179 180 self._undecided = None
180 181 self.missing = set()
181 182
182 183 def addcommons(self, commons):
183 184 """registrer nodes known as common"""
184 185 self._common.addbases(commons)
185 186 self._common.removeancestorsfrom(self.undecided)
186 187
187 188 def addmissings(self, missings):
188 189 """registrer some nodes as missing"""
189 190 if self.missing:
190 191 new = self._repo.revs('descendants(%ld) - descendants(%ld)',
191 192 missings, self.missing)
192 193 self.missing.update(new)
193 194 else:
194 195 self.missing.update(self._repo.revs('descendants(%ld)', missings))
195 196
196 197 self.undecided.difference_update(self.missing)
197 198
198 199 def addinfo(self, sample):
199 200 """consume an iterable of (rev, known) tuples"""
200 201 common = set()
201 202 missing = set()
202 203 for rev, known in sample:
203 204 if known:
204 205 common.add(rev)
205 206 else:
206 207 missing.add(rev)
207 208 if common:
208 209 self.addcommons(common)
209 210 if missing:
210 211 self.addmissings(missing)
211 212
212 213 def hasinfo(self):
213 214 """return True is we have any clue about the remote state"""
214 215 return self._common.hasbases()
215 216
216 217 def iscomplete(self):
217 218 """True if all the necessary data have been gathered"""
218 219 return self._undecided is not None and not self._undecided
219 220
220 221 @property
221 222 def undecided(self):
222 223 if self._undecided is not None:
223 224 return self._undecided
224 225 self._undecided = set(self._common.missingancestors(self._targetheads))
225 226 return self._undecided
226 227
227 228 def commonheads(self):
228 229 """the heads of the known common set"""
229 230 # heads(common) == heads(common.bases) since common represents
230 231 # common.bases and all its ancestors
231 232 # The presence of nullrev will confuse heads(). So filter it out.
232 233 return set(self._repo.revs('heads(%ld)',
233 234 self._common.bases - {nullrev}))
234 235
235 236 def findcommonheads(ui, local, remote,
236 237 initialsamplesize=100,
237 238 fullsamplesize=200,
238 239 abortwhenunrelated=True,
239 240 ancestorsof=None):
240 241 '''Return a tuple (common, anyincoming, remoteheads) used to identify
241 242 missing nodes from or in remote.
242 243 '''
243 244 start = util.timer()
244 245
245 246 roundtrips = 0
246 247 cl = local.changelog
247 248 clnode = cl.node
248 249 clrev = cl.rev
249 250
250 251 if ancestorsof is not None:
251 252 ownheads = [clrev(n) for n in ancestorsof]
252 253 else:
253 254 ownheads = [rev for rev in cl.headrevs() if rev != nullrev]
254 255
255 256 # early exit if we know all the specified remote heads already
256 257 ui.debug("query 1; heads\n")
257 258 roundtrips += 1
258 259 sample = _limitsample(ownheads, initialsamplesize)
259 260 # indices between sample and externalized version must match
260 261 sample = list(sample)
261 262
262 263 with remote.commandexecutor() as e:
263 264 fheads = e.callcommand('heads', {})
264 265 fknown = e.callcommand('known', {
265 266 'nodes': [clnode(r) for r in sample],
266 267 })
267 268
268 269 srvheadhashes, yesno = fheads.result(), fknown.result()
269 270
270 271 if cl.tip() == nullid:
271 272 if srvheadhashes != [nullid]:
272 273 return [nullid], True, srvheadhashes
273 274 return [nullid], False, []
274 275
275 276 # start actual discovery (we note this before the next "if" for
276 277 # compatibility reasons)
277 278 ui.status(_("searching for changes\n"))
278 279
279 280 srvheads = []
280 281 for node in srvheadhashes:
281 282 if node == nullid:
282 283 continue
283 284
284 285 try:
285 286 srvheads.append(clrev(node))
286 287 # Catches unknown and filtered nodes.
287 288 except error.LookupError:
288 289 continue
289 290
290 291 if len(srvheads) == len(srvheadhashes):
291 292 ui.debug("all remote heads known locally\n")
292 293 return srvheadhashes, False, srvheadhashes
293 294
294 295 if len(sample) == len(ownheads) and all(yesno):
295 296 ui.note(_("all local heads known remotely\n"))
296 297 ownheadhashes = [clnode(r) for r in ownheads]
297 298 return ownheadhashes, True, srvheadhashes
298 299
299 300 # full blown discovery
300 301
301 302 disco = partialdiscovery(local, ownheads)
302 303 # treat remote heads (and maybe own heads) as a first implicit sample
303 304 # response
304 305 disco.addcommons(srvheads)
305 306 disco.addinfo(zip(sample, yesno))
306 307
307 308 full = False
308 309 progress = ui.makeprogress(_('searching'), unit=_('queries'))
309 310 while not disco.iscomplete():
310 311
311 312 if full or disco.hasinfo():
312 313 if full:
313 314 ui.note(_("sampling from both directions\n"))
314 315 else:
315 316 ui.debug("taking initial sample\n")
316 317 samplefunc = _takefullsample
317 318 targetsize = fullsamplesize
318 319 else:
319 320 # use even cheaper initial sample
320 321 ui.debug("taking quick initial sample\n")
321 322 samplefunc = _takequicksample
322 323 targetsize = initialsamplesize
323 324 sample = samplefunc(local, ownheads, disco.undecided, targetsize)
324 325
325 326 roundtrips += 1
326 327 progress.update(roundtrips)
327 328 ui.debug("query %i; still undecided: %i, sample size is: %i\n"
328 329 % (roundtrips, len(disco.undecided), len(sample)))
329 330 # indices between sample and externalized version must match
330 331 sample = list(sample)
331 332
332 333 with remote.commandexecutor() as e:
333 334 yesno = e.callcommand('known', {
334 335 'nodes': [clnode(r) for r in sample],
335 336 }).result()
336 337
337 338 full = True
338 339
339 340 disco.addinfo(zip(sample, yesno))
340 341
341 342 result = disco.commonheads()
342 343 elapsed = util.timer() - start
343 344 progress.complete()
344 345 ui.debug("%d total queries in %.4fs\n" % (roundtrips, elapsed))
345 346 msg = ('found %d common and %d unknown server heads,'
346 347 ' %d roundtrips in %.4fs\n')
347 348 missing = set(result) - set(srvheads)
348 349 ui.log('discovery', msg, len(result), len(missing), roundtrips,
349 350 elapsed)
350 351
351 352 if not result and srvheadhashes != [nullid]:
352 353 if abortwhenunrelated:
353 354 raise error.Abort(_("repository is unrelated"))
354 355 else:
355 356 ui.warn(_("warning: repository is unrelated\n"))
356 357 return ({nullid}, True, srvheadhashes,)
357 358
358 359 anyincoming = (srvheadhashes != [nullid])
359 360 result = {clnode(r) for r in result}
360 361 return result, anyincoming, srvheadhashes
General Comments 0
You need to be logged in to leave comments. Login now