##// END OF EJS Templates
discovery: compute newly discovered missing in a more efficient way...
Boris Feld -
r41316:f4277a35 default
parent child Browse files
Show More
@@ -1,359 +1,355 b''
1 1 # setdiscovery.py - improved discovery of common nodeset for mercurial
2 2 #
3 3 # Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
4 4 # and Peter Arrenbrecht <peter@arrenbrecht.ch>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8 """
9 9 Algorithm works in the following way. You have two repository: local and
10 10 remote. They both contains a DAG of changelists.
11 11
12 12 The goal of the discovery protocol is to find one set of node *common*,
13 13 the set of nodes shared by local and remote.
14 14
15 15 One of the issue with the original protocol was latency, it could
16 16 potentially require lots of roundtrips to discover that the local repo was a
17 17 subset of remote (which is a very common case, you usually have few changes
18 18 compared to upstream, while upstream probably had lots of development).
19 19
20 20 The new protocol only requires one interface for the remote repo: `known()`,
21 21 which given a set of changelists tells you if they are present in the DAG.
22 22
23 23 The algorithm then works as follow:
24 24
25 25 - We will be using three sets, `common`, `missing`, `unknown`. Originally
26 26 all nodes are in `unknown`.
27 27 - Take a sample from `unknown`, call `remote.known(sample)`
28 28 - For each node that remote knows, move it and all its ancestors to `common`
29 29 - For each node that remote doesn't know, move it and all its descendants
30 30 to `missing`
31 31 - Iterate until `unknown` is empty
32 32
33 33 There are a couple optimizations, first is instead of starting with a random
34 34 sample of missing, start by sending all heads, in the case where the local
35 35 repo is a subset, you computed the answer in one round trip.
36 36
37 37 Then you can do something similar to the bisecting strategy used when
38 38 finding faulty changesets. Instead of random samples, you can try picking
39 39 nodes that will maximize the number of nodes that will be
40 40 classified with it (since all ancestors or descendants will be marked as well).
41 41 """
42 42
43 43 from __future__ import absolute_import
44 44
45 45 import collections
46 46 import random
47 47
48 48 from .i18n import _
49 49 from .node import (
50 50 nullid,
51 51 nullrev,
52 52 )
53 53 from . import (
54 54 error,
55 55 util,
56 56 )
57 57
58 58 def _updatesample(revs, heads, sample, parentfn, quicksamplesize=0):
59 59 """update an existing sample to match the expected size
60 60
61 61 The sample is updated with revs exponentially distant from each head of the
62 62 <revs> set. (H~1, H~2, H~4, H~8, etc).
63 63
64 64 If a target size is specified, the sampling will stop once this size is
65 65 reached. Otherwise sampling will happen until roots of the <revs> set are
66 66 reached.
67 67
68 68 :revs: set of revs we want to discover (if None, assume the whole dag)
69 69 :heads: set of DAG head revs
70 70 :sample: a sample to update
71 71 :parentfn: a callable to resolve parents for a revision
72 72 :quicksamplesize: optional target size of the sample"""
73 73 dist = {}
74 74 visit = collections.deque(heads)
75 75 seen = set()
76 76 factor = 1
77 77 while visit:
78 78 curr = visit.popleft()
79 79 if curr in seen:
80 80 continue
81 81 d = dist.setdefault(curr, 1)
82 82 if d > factor:
83 83 factor *= 2
84 84 if d == factor:
85 85 sample.add(curr)
86 86 if quicksamplesize and (len(sample) >= quicksamplesize):
87 87 return
88 88 seen.add(curr)
89 89
90 90 for p in parentfn(curr):
91 91 if p != nullrev and (not revs or p in revs):
92 92 dist.setdefault(p, d + 1)
93 93 visit.append(p)
94 94
95 95 def _takequicksample(repo, headrevs, revs, size):
96 96 """takes a quick sample of size <size>
97 97
98 98 It is meant for initial sampling and focuses on querying heads and close
99 99 ancestors of heads.
100 100
101 101 :dag: a dag object
102 102 :headrevs: set of head revisions in local DAG to consider
103 103 :revs: set of revs to discover
104 104 :size: the maximum size of the sample"""
105 105 if len(revs) <= size:
106 106 return list(revs)
107 107 sample = set(repo.revs('heads(%ld)', revs))
108 108
109 109 if len(sample) >= size:
110 110 return _limitsample(sample, size)
111 111
112 112 _updatesample(None, headrevs, sample, repo.changelog.parentrevs,
113 113 quicksamplesize=size)
114 114 return sample
115 115
116 116 def _takefullsample(repo, headrevs, revs, size):
117 117 if len(revs) <= size:
118 118 return list(revs)
119 119 sample = set(repo.revs('heads(%ld)', revs))
120 120
121 121 # update from heads
122 122 revsheads = set(repo.revs('heads(%ld)', revs))
123 123 _updatesample(revs, revsheads, sample, repo.changelog.parentrevs)
124 124
125 125 # update from roots
126 126 revsroots = set(repo.revs('roots(%ld)', revs))
127 127
128 128 # _updatesample() essentially does interaction over revisions to look up
129 129 # their children. This lookup is expensive and doing it in a loop is
130 130 # quadratic. We precompute the children for all relevant revisions and
131 131 # make the lookup in _updatesample() a simple dict lookup.
132 132 #
133 133 # Because this function can be called multiple times during discovery, we
134 134 # may still perform redundant work and there is room to optimize this by
135 135 # keeping a persistent cache of children across invocations.
136 136 children = {}
137 137
138 138 parentrevs = repo.changelog.parentrevs
139 139 for rev in repo.changelog.revs(start=min(revsroots)):
140 140 # Always ensure revision has an entry so we don't need to worry about
141 141 # missing keys.
142 142 children.setdefault(rev, [])
143 143
144 144 for prev in parentrevs(rev):
145 145 if prev == nullrev:
146 146 continue
147 147
148 148 children.setdefault(prev, []).append(rev)
149 149
150 150 _updatesample(revs, revsroots, sample, children.__getitem__)
151 151 assert sample
152 152 sample = _limitsample(sample, size)
153 153 if len(sample) < size:
154 154 more = size - len(sample)
155 155 sample.update(random.sample(list(revs - sample), more))
156 156 return sample
157 157
158 158 def _limitsample(sample, desiredlen):
159 159 """return a random subset of sample of at most desiredlen item"""
160 160 if len(sample) > desiredlen:
161 161 sample = set(random.sample(sample, desiredlen))
162 162 return sample
163 163
164 164 class partialdiscovery(object):
165 165 """an object representing ongoing discovery
166 166
167 167 Feed with data from the remote repository, this object keep track of the
168 168 current set of changeset in various states:
169 169
170 170 - common: revs also known remotely
171 171 - undecided: revs we don't have information on yet
172 172 - missing: revs missing remotely
173 173 (all tracked revisions are known locally)
174 174 """
175 175
176 176 def __init__(self, repo, targetheads):
177 177 self._repo = repo
178 178 self._targetheads = targetheads
179 179 self._common = repo.changelog.incrementalmissingrevs()
180 180 self._undecided = None
181 181 self.missing = set()
182 182
183 183 def addcommons(self, commons):
184 184 """registrer nodes known as common"""
185 185 self._common.addbases(commons)
186 186 self._common.removeancestorsfrom(self.undecided)
187 187
188 188 def addmissings(self, missings):
189 189 """registrer some nodes as missing"""
190 if self.missing:
191 new = self._repo.revs('descendants(%ld) - descendants(%ld)',
192 missings, self.missing)
193 self.missing.update(new)
194 else:
195 self.missing.update(self._repo.revs('descendants(%ld)', missings))
196
197 self.undecided.difference_update(self.missing)
190 newmissing = self._repo.revs('%ld::%ld', missings, self.undecided)
191 if newmissing:
192 self.missing.update(newmissing)
193 self.undecided.difference_update(newmissing)
198 194
199 195 def addinfo(self, sample):
200 196 """consume an iterable of (rev, known) tuples"""
201 197 common = set()
202 198 missing = set()
203 199 for rev, known in sample:
204 200 if known:
205 201 common.add(rev)
206 202 else:
207 203 missing.add(rev)
208 204 if common:
209 205 self.addcommons(common)
210 206 if missing:
211 207 self.addmissings(missing)
212 208
213 209 def hasinfo(self):
214 210 """return True is we have any clue about the remote state"""
215 211 return self._common.hasbases()
216 212
217 213 def iscomplete(self):
218 214 """True if all the necessary data have been gathered"""
219 215 return self._undecided is not None and not self._undecided
220 216
221 217 @property
222 218 def undecided(self):
223 219 if self._undecided is not None:
224 220 return self._undecided
225 221 self._undecided = set(self._common.missingancestors(self._targetheads))
226 222 return self._undecided
227 223
228 224 def commonheads(self):
229 225 """the heads of the known common set"""
230 226 # heads(common) == heads(common.bases) since common represents
231 227 # common.bases and all its ancestors
232 228 return self._common.basesheads()
233 229
234 230 def findcommonheads(ui, local, remote,
235 231 initialsamplesize=100,
236 232 fullsamplesize=200,
237 233 abortwhenunrelated=True,
238 234 ancestorsof=None):
239 235 '''Return a tuple (common, anyincoming, remoteheads) used to identify
240 236 missing nodes from or in remote.
241 237 '''
242 238 start = util.timer()
243 239
244 240 roundtrips = 0
245 241 cl = local.changelog
246 242 clnode = cl.node
247 243 clrev = cl.rev
248 244
249 245 if ancestorsof is not None:
250 246 ownheads = [clrev(n) for n in ancestorsof]
251 247 else:
252 248 ownheads = [rev for rev in cl.headrevs() if rev != nullrev]
253 249
254 250 # early exit if we know all the specified remote heads already
255 251 ui.debug("query 1; heads\n")
256 252 roundtrips += 1
257 253 sample = _limitsample(ownheads, initialsamplesize)
258 254 # indices between sample and externalized version must match
259 255 sample = list(sample)
260 256
261 257 with remote.commandexecutor() as e:
262 258 fheads = e.callcommand('heads', {})
263 259 fknown = e.callcommand('known', {
264 260 'nodes': [clnode(r) for r in sample],
265 261 })
266 262
267 263 srvheadhashes, yesno = fheads.result(), fknown.result()
268 264
269 265 if cl.tip() == nullid:
270 266 if srvheadhashes != [nullid]:
271 267 return [nullid], True, srvheadhashes
272 268 return [nullid], False, []
273 269
274 270 # start actual discovery (we note this before the next "if" for
275 271 # compatibility reasons)
276 272 ui.status(_("searching for changes\n"))
277 273
278 274 srvheads = []
279 275 for node in srvheadhashes:
280 276 if node == nullid:
281 277 continue
282 278
283 279 try:
284 280 srvheads.append(clrev(node))
285 281 # Catches unknown and filtered nodes.
286 282 except error.LookupError:
287 283 continue
288 284
289 285 if len(srvheads) == len(srvheadhashes):
290 286 ui.debug("all remote heads known locally\n")
291 287 return srvheadhashes, False, srvheadhashes
292 288
293 289 if len(sample) == len(ownheads) and all(yesno):
294 290 ui.note(_("all local heads known remotely\n"))
295 291 ownheadhashes = [clnode(r) for r in ownheads]
296 292 return ownheadhashes, True, srvheadhashes
297 293
298 294 # full blown discovery
299 295
300 296 disco = partialdiscovery(local, ownheads)
301 297 # treat remote heads (and maybe own heads) as a first implicit sample
302 298 # response
303 299 disco.addcommons(srvheads)
304 300 disco.addinfo(zip(sample, yesno))
305 301
306 302 full = False
307 303 progress = ui.makeprogress(_('searching'), unit=_('queries'))
308 304 while not disco.iscomplete():
309 305
310 306 if full or disco.hasinfo():
311 307 if full:
312 308 ui.note(_("sampling from both directions\n"))
313 309 else:
314 310 ui.debug("taking initial sample\n")
315 311 samplefunc = _takefullsample
316 312 targetsize = fullsamplesize
317 313 else:
318 314 # use even cheaper initial sample
319 315 ui.debug("taking quick initial sample\n")
320 316 samplefunc = _takequicksample
321 317 targetsize = initialsamplesize
322 318 sample = samplefunc(local, ownheads, disco.undecided, targetsize)
323 319
324 320 roundtrips += 1
325 321 progress.update(roundtrips)
326 322 ui.debug("query %i; still undecided: %i, sample size is: %i\n"
327 323 % (roundtrips, len(disco.undecided), len(sample)))
328 324 # indices between sample and externalized version must match
329 325 sample = list(sample)
330 326
331 327 with remote.commandexecutor() as e:
332 328 yesno = e.callcommand('known', {
333 329 'nodes': [clnode(r) for r in sample],
334 330 }).result()
335 331
336 332 full = True
337 333
338 334 disco.addinfo(zip(sample, yesno))
339 335
340 336 result = disco.commonheads()
341 337 elapsed = util.timer() - start
342 338 progress.complete()
343 339 ui.debug("%d total queries in %.4fs\n" % (roundtrips, elapsed))
344 340 msg = ('found %d common and %d unknown server heads,'
345 341 ' %d roundtrips in %.4fs\n')
346 342 missing = set(result) - set(srvheads)
347 343 ui.log('discovery', msg, len(result), len(missing), roundtrips,
348 344 elapsed)
349 345
350 346 if not result and srvheadhashes != [nullid]:
351 347 if abortwhenunrelated:
352 348 raise error.Abort(_("repository is unrelated"))
353 349 else:
354 350 ui.warn(_("warning: repository is unrelated\n"))
355 351 return ({nullid}, True, srvheadhashes,)
356 352
357 353 anyincoming = (srvheadhashes != [nullid])
358 354 result = {clnode(r) for r in result}
359 355 return result, anyincoming, srvheadhashes
General Comments 0
You need to be logged in to leave comments. Login now