##// END OF EJS Templates
discovery: improve partial discovery documentation...
Boris Feld -
r41208:3dcc9658 default
parent child Browse files
Show More
@@ -1,360 +1,361
1 # setdiscovery.py - improved discovery of common nodeset for mercurial
1 # setdiscovery.py - improved discovery of common nodeset for mercurial
2 #
2 #
3 # Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
3 # Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
4 # and Peter Arrenbrecht <peter@arrenbrecht.ch>
4 # and Peter Arrenbrecht <peter@arrenbrecht.ch>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8 """
8 """
9 Algorithm works in the following way. You have two repository: local and
9 Algorithm works in the following way. You have two repository: local and
10 remote. They both contains a DAG of changelists.
10 remote. They both contains a DAG of changelists.
11
11
12 The goal of the discovery protocol is to find one set of node *common*,
12 The goal of the discovery protocol is to find one set of node *common*,
13 the set of nodes shared by local and remote.
13 the set of nodes shared by local and remote.
14
14
15 One of the issue with the original protocol was latency, it could
15 One of the issue with the original protocol was latency, it could
16 potentially require lots of roundtrips to discover that the local repo was a
16 potentially require lots of roundtrips to discover that the local repo was a
17 subset of remote (which is a very common case, you usually have few changes
17 subset of remote (which is a very common case, you usually have few changes
18 compared to upstream, while upstream probably had lots of development).
18 compared to upstream, while upstream probably had lots of development).
19
19
20 The new protocol only requires one interface for the remote repo: `known()`,
20 The new protocol only requires one interface for the remote repo: `known()`,
21 which given a set of changelists tells you if they are present in the DAG.
21 which given a set of changelists tells you if they are present in the DAG.
22
22
23 The algorithm then works as follow:
23 The algorithm then works as follow:
24
24
25 - We will be using three sets, `common`, `missing`, `unknown`. Originally
25 - We will be using three sets, `common`, `missing`, `unknown`. Originally
26 all nodes are in `unknown`.
26 all nodes are in `unknown`.
27 - Take a sample from `unknown`, call `remote.known(sample)`
27 - Take a sample from `unknown`, call `remote.known(sample)`
28 - For each node that remote knows, move it and all its ancestors to `common`
28 - For each node that remote knows, move it and all its ancestors to `common`
29 - For each node that remote doesn't know, move it and all its descendants
29 - For each node that remote doesn't know, move it and all its descendants
30 to `missing`
30 to `missing`
31 - Iterate until `unknown` is empty
31 - Iterate until `unknown` is empty
32
32
33 There are a couple optimizations, first is instead of starting with a random
33 There are a couple optimizations, first is instead of starting with a random
34 sample of missing, start by sending all heads, in the case where the local
34 sample of missing, start by sending all heads, in the case where the local
35 repo is a subset, you computed the answer in one round trip.
35 repo is a subset, you computed the answer in one round trip.
36
36
37 Then you can do something similar to the bisecting strategy used when
37 Then you can do something similar to the bisecting strategy used when
38 finding faulty changesets. Instead of random samples, you can try picking
38 finding faulty changesets. Instead of random samples, you can try picking
39 nodes that will maximize the number of nodes that will be
39 nodes that will maximize the number of nodes that will be
40 classified with it (since all ancestors or descendants will be marked as well).
40 classified with it (since all ancestors or descendants will be marked as well).
41 """
41 """
42
42
43 from __future__ import absolute_import
43 from __future__ import absolute_import
44
44
45 import collections
45 import collections
46 import random
46 import random
47
47
48 from .i18n import _
48 from .i18n import _
49 from .node import (
49 from .node import (
50 nullid,
50 nullid,
51 nullrev,
51 nullrev,
52 )
52 )
53 from . import (
53 from . import (
54 error,
54 error,
55 util,
55 util,
56 )
56 )
57
57
58 def _updatesample(revs, heads, sample, parentfn, quicksamplesize=0):
58 def _updatesample(revs, heads, sample, parentfn, quicksamplesize=0):
59 """update an existing sample to match the expected size
59 """update an existing sample to match the expected size
60
60
61 The sample is updated with revs exponentially distant from each head of the
61 The sample is updated with revs exponentially distant from each head of the
62 <revs> set. (H~1, H~2, H~4, H~8, etc).
62 <revs> set. (H~1, H~2, H~4, H~8, etc).
63
63
64 If a target size is specified, the sampling will stop once this size is
64 If a target size is specified, the sampling will stop once this size is
65 reached. Otherwise sampling will happen until roots of the <revs> set are
65 reached. Otherwise sampling will happen until roots of the <revs> set are
66 reached.
66 reached.
67
67
68 :revs: set of revs we want to discover (if None, assume the whole dag)
68 :revs: set of revs we want to discover (if None, assume the whole dag)
69 :heads: set of DAG head revs
69 :heads: set of DAG head revs
70 :sample: a sample to update
70 :sample: a sample to update
71 :parentfn: a callable to resolve parents for a revision
71 :parentfn: a callable to resolve parents for a revision
72 :quicksamplesize: optional target size of the sample"""
72 :quicksamplesize: optional target size of the sample"""
73 dist = {}
73 dist = {}
74 visit = collections.deque(heads)
74 visit = collections.deque(heads)
75 seen = set()
75 seen = set()
76 factor = 1
76 factor = 1
77 while visit:
77 while visit:
78 curr = visit.popleft()
78 curr = visit.popleft()
79 if curr in seen:
79 if curr in seen:
80 continue
80 continue
81 d = dist.setdefault(curr, 1)
81 d = dist.setdefault(curr, 1)
82 if d > factor:
82 if d > factor:
83 factor *= 2
83 factor *= 2
84 if d == factor:
84 if d == factor:
85 sample.add(curr)
85 sample.add(curr)
86 if quicksamplesize and (len(sample) >= quicksamplesize):
86 if quicksamplesize and (len(sample) >= quicksamplesize):
87 return
87 return
88 seen.add(curr)
88 seen.add(curr)
89
89
90 for p in parentfn(curr):
90 for p in parentfn(curr):
91 if p != nullrev and (not revs or p in revs):
91 if p != nullrev and (not revs or p in revs):
92 dist.setdefault(p, d + 1)
92 dist.setdefault(p, d + 1)
93 visit.append(p)
93 visit.append(p)
94
94
95 def _takequicksample(repo, headrevs, revs, size):
95 def _takequicksample(repo, headrevs, revs, size):
96 """takes a quick sample of size <size>
96 """takes a quick sample of size <size>
97
97
98 It is meant for initial sampling and focuses on querying heads and close
98 It is meant for initial sampling and focuses on querying heads and close
99 ancestors of heads.
99 ancestors of heads.
100
100
101 :dag: a dag object
101 :dag: a dag object
102 :headrevs: set of head revisions in local DAG to consider
102 :headrevs: set of head revisions in local DAG to consider
103 :revs: set of revs to discover
103 :revs: set of revs to discover
104 :size: the maximum size of the sample"""
104 :size: the maximum size of the sample"""
105 if len(revs) <= size:
105 if len(revs) <= size:
106 return list(revs)
106 return list(revs)
107 sample = set(repo.revs('heads(%ld)', revs))
107 sample = set(repo.revs('heads(%ld)', revs))
108
108
109 if len(sample) >= size:
109 if len(sample) >= size:
110 return _limitsample(sample, size)
110 return _limitsample(sample, size)
111
111
112 _updatesample(None, headrevs, sample, repo.changelog.parentrevs,
112 _updatesample(None, headrevs, sample, repo.changelog.parentrevs,
113 quicksamplesize=size)
113 quicksamplesize=size)
114 return sample
114 return sample
115
115
116 def _takefullsample(repo, headrevs, revs, size):
116 def _takefullsample(repo, headrevs, revs, size):
117 if len(revs) <= size:
117 if len(revs) <= size:
118 return list(revs)
118 return list(revs)
119 sample = set(repo.revs('heads(%ld)', revs))
119 sample = set(repo.revs('heads(%ld)', revs))
120
120
121 # update from heads
121 # update from heads
122 revsheads = set(repo.revs('heads(%ld)', revs))
122 revsheads = set(repo.revs('heads(%ld)', revs))
123 _updatesample(revs, revsheads, sample, repo.changelog.parentrevs)
123 _updatesample(revs, revsheads, sample, repo.changelog.parentrevs)
124
124
125 # update from roots
125 # update from roots
126 revsroots = set(repo.revs('roots(%ld)', revs))
126 revsroots = set(repo.revs('roots(%ld)', revs))
127
127
128 # _updatesample() essentially does interaction over revisions to look up
128 # _updatesample() essentially does interaction over revisions to look up
129 # their children. This lookup is expensive and doing it in a loop is
129 # their children. This lookup is expensive and doing it in a loop is
130 # quadratic. We precompute the children for all relevant revisions and
130 # quadratic. We precompute the children for all relevant revisions and
131 # make the lookup in _updatesample() a simple dict lookup.
131 # make the lookup in _updatesample() a simple dict lookup.
132 #
132 #
133 # Because this function can be called multiple times during discovery, we
133 # Because this function can be called multiple times during discovery, we
134 # may still perform redundant work and there is room to optimize this by
134 # may still perform redundant work and there is room to optimize this by
135 # keeping a persistent cache of children across invocations.
135 # keeping a persistent cache of children across invocations.
136 children = {}
136 children = {}
137
137
138 parentrevs = repo.changelog.parentrevs
138 parentrevs = repo.changelog.parentrevs
139 for rev in repo.changelog.revs(start=min(revsroots)):
139 for rev in repo.changelog.revs(start=min(revsroots)):
140 # Always ensure revision has an entry so we don't need to worry about
140 # Always ensure revision has an entry so we don't need to worry about
141 # missing keys.
141 # missing keys.
142 children.setdefault(rev, [])
142 children.setdefault(rev, [])
143
143
144 for prev in parentrevs(rev):
144 for prev in parentrevs(rev):
145 if prev == nullrev:
145 if prev == nullrev:
146 continue
146 continue
147
147
148 children.setdefault(prev, []).append(rev)
148 children.setdefault(prev, []).append(rev)
149
149
150 _updatesample(revs, revsroots, sample, children.__getitem__)
150 _updatesample(revs, revsroots, sample, children.__getitem__)
151 assert sample
151 assert sample
152 sample = _limitsample(sample, size)
152 sample = _limitsample(sample, size)
153 if len(sample) < size:
153 if len(sample) < size:
154 more = size - len(sample)
154 more = size - len(sample)
155 sample.update(random.sample(list(revs - sample), more))
155 sample.update(random.sample(list(revs - sample), more))
156 return sample
156 return sample
157
157
158 def _limitsample(sample, desiredlen):
158 def _limitsample(sample, desiredlen):
159 """return a random subset of sample of at most desiredlen item"""
159 """return a random subset of sample of at most desiredlen item"""
160 if len(sample) > desiredlen:
160 if len(sample) > desiredlen:
161 sample = set(random.sample(sample, desiredlen))
161 sample = set(random.sample(sample, desiredlen))
162 return sample
162 return sample
163
163
164 class partialdiscovery(object):
164 class partialdiscovery(object):
165 """an object representing ongoing discovery
165 """an object representing ongoing discovery
166
166
167 Feed with data from the remote repository, this object keep track of the
167 Feed with data from the remote repository, this object keep track of the
168 current set of changeset in various states:
168 current set of changeset in various states:
169
169
170 - common: own nodes I know we both know
170 - common: revs also known remotely
171 - undecided: own nodes where I don't know if remote knows them
171 - undecided: revs we don't have information on yet
172 - missing: own nodes I know remote lacks
172 - missing: revs missing remotely
173 (all tracked revisions are known locally)
173 """
174 """
174
175
175 def __init__(self, repo, targetheads):
176 def __init__(self, repo, targetheads):
176 self._repo = repo
177 self._repo = repo
177 self._targetheads = targetheads
178 self._targetheads = targetheads
178 self._common = repo.changelog.incrementalmissingrevs()
179 self._common = repo.changelog.incrementalmissingrevs()
179 self._undecided = None
180 self._undecided = None
180 self.missing = set()
181 self.missing = set()
181
182
182 def addcommons(self, commons):
183 def addcommons(self, commons):
183 """registrer nodes known as common"""
184 """registrer nodes known as common"""
184 self._common.addbases(commons)
185 self._common.addbases(commons)
185 self._common.removeancestorsfrom(self.undecided)
186 self._common.removeancestorsfrom(self.undecided)
186
187
187 def addmissings(self, missings):
188 def addmissings(self, missings):
188 """registrer some nodes as missing"""
189 """registrer some nodes as missing"""
189 if self.missing:
190 if self.missing:
190 new = self._repo.revs('descendants(%ld) - descendants(%ld)',
191 new = self._repo.revs('descendants(%ld) - descendants(%ld)',
191 missings, self.missing)
192 missings, self.missing)
192 self.missing.update(new)
193 self.missing.update(new)
193 else:
194 else:
194 self.missing.update(self._repo.revs('descendants(%ld)', missings))
195 self.missing.update(self._repo.revs('descendants(%ld)', missings))
195
196
196 self.undecided.difference_update(self.missing)
197 self.undecided.difference_update(self.missing)
197
198
198 def addinfo(self, sample):
199 def addinfo(self, sample):
199 """consume an iterable of (rev, known) tuples"""
200 """consume an iterable of (rev, known) tuples"""
200 common = set()
201 common = set()
201 missing = set()
202 missing = set()
202 for rev, known in sample:
203 for rev, known in sample:
203 if known:
204 if known:
204 common.add(rev)
205 common.add(rev)
205 else:
206 else:
206 missing.add(rev)
207 missing.add(rev)
207 if common:
208 if common:
208 self.addcommons(common)
209 self.addcommons(common)
209 if missing:
210 if missing:
210 self.addmissings(missing)
211 self.addmissings(missing)
211
212
212 def hasinfo(self):
213 def hasinfo(self):
213 """return True is we have any clue about the remote state"""
214 """return True is we have any clue about the remote state"""
214 return self._common.hasbases()
215 return self._common.hasbases()
215
216
216 def iscomplete(self):
217 def iscomplete(self):
217 """True if all the necessary data have been gathered"""
218 """True if all the necessary data have been gathered"""
218 return self._undecided is not None and not self._undecided
219 return self._undecided is not None and not self._undecided
219
220
220 @property
221 @property
221 def undecided(self):
222 def undecided(self):
222 if self._undecided is not None:
223 if self._undecided is not None:
223 return self._undecided
224 return self._undecided
224 self._undecided = set(self._common.missingancestors(self._targetheads))
225 self._undecided = set(self._common.missingancestors(self._targetheads))
225 return self._undecided
226 return self._undecided
226
227
227 def commonheads(self):
228 def commonheads(self):
228 """the heads of the known common set"""
229 """the heads of the known common set"""
229 # heads(common) == heads(common.bases) since common represents
230 # heads(common) == heads(common.bases) since common represents
230 # common.bases and all its ancestors
231 # common.bases and all its ancestors
231 # The presence of nullrev will confuse heads(). So filter it out.
232 # The presence of nullrev will confuse heads(). So filter it out.
232 return set(self._repo.revs('heads(%ld)',
233 return set(self._repo.revs('heads(%ld)',
233 self._common.bases - {nullrev}))
234 self._common.bases - {nullrev}))
234
235
235 def findcommonheads(ui, local, remote,
236 def findcommonheads(ui, local, remote,
236 initialsamplesize=100,
237 initialsamplesize=100,
237 fullsamplesize=200,
238 fullsamplesize=200,
238 abortwhenunrelated=True,
239 abortwhenunrelated=True,
239 ancestorsof=None):
240 ancestorsof=None):
240 '''Return a tuple (common, anyincoming, remoteheads) used to identify
241 '''Return a tuple (common, anyincoming, remoteheads) used to identify
241 missing nodes from or in remote.
242 missing nodes from or in remote.
242 '''
243 '''
243 start = util.timer()
244 start = util.timer()
244
245
245 roundtrips = 0
246 roundtrips = 0
246 cl = local.changelog
247 cl = local.changelog
247 clnode = cl.node
248 clnode = cl.node
248 clrev = cl.rev
249 clrev = cl.rev
249
250
250 if ancestorsof is not None:
251 if ancestorsof is not None:
251 ownheads = [clrev(n) for n in ancestorsof]
252 ownheads = [clrev(n) for n in ancestorsof]
252 else:
253 else:
253 ownheads = [rev for rev in cl.headrevs() if rev != nullrev]
254 ownheads = [rev for rev in cl.headrevs() if rev != nullrev]
254
255
255 # early exit if we know all the specified remote heads already
256 # early exit if we know all the specified remote heads already
256 ui.debug("query 1; heads\n")
257 ui.debug("query 1; heads\n")
257 roundtrips += 1
258 roundtrips += 1
258 sample = _limitsample(ownheads, initialsamplesize)
259 sample = _limitsample(ownheads, initialsamplesize)
259 # indices between sample and externalized version must match
260 # indices between sample and externalized version must match
260 sample = list(sample)
261 sample = list(sample)
261
262
262 with remote.commandexecutor() as e:
263 with remote.commandexecutor() as e:
263 fheads = e.callcommand('heads', {})
264 fheads = e.callcommand('heads', {})
264 fknown = e.callcommand('known', {
265 fknown = e.callcommand('known', {
265 'nodes': [clnode(r) for r in sample],
266 'nodes': [clnode(r) for r in sample],
266 })
267 })
267
268
268 srvheadhashes, yesno = fheads.result(), fknown.result()
269 srvheadhashes, yesno = fheads.result(), fknown.result()
269
270
270 if cl.tip() == nullid:
271 if cl.tip() == nullid:
271 if srvheadhashes != [nullid]:
272 if srvheadhashes != [nullid]:
272 return [nullid], True, srvheadhashes
273 return [nullid], True, srvheadhashes
273 return [nullid], False, []
274 return [nullid], False, []
274
275
275 # start actual discovery (we note this before the next "if" for
276 # start actual discovery (we note this before the next "if" for
276 # compatibility reasons)
277 # compatibility reasons)
277 ui.status(_("searching for changes\n"))
278 ui.status(_("searching for changes\n"))
278
279
279 srvheads = []
280 srvheads = []
280 for node in srvheadhashes:
281 for node in srvheadhashes:
281 if node == nullid:
282 if node == nullid:
282 continue
283 continue
283
284
284 try:
285 try:
285 srvheads.append(clrev(node))
286 srvheads.append(clrev(node))
286 # Catches unknown and filtered nodes.
287 # Catches unknown and filtered nodes.
287 except error.LookupError:
288 except error.LookupError:
288 continue
289 continue
289
290
290 if len(srvheads) == len(srvheadhashes):
291 if len(srvheads) == len(srvheadhashes):
291 ui.debug("all remote heads known locally\n")
292 ui.debug("all remote heads known locally\n")
292 return srvheadhashes, False, srvheadhashes
293 return srvheadhashes, False, srvheadhashes
293
294
294 if len(sample) == len(ownheads) and all(yesno):
295 if len(sample) == len(ownheads) and all(yesno):
295 ui.note(_("all local heads known remotely\n"))
296 ui.note(_("all local heads known remotely\n"))
296 ownheadhashes = [clnode(r) for r in ownheads]
297 ownheadhashes = [clnode(r) for r in ownheads]
297 return ownheadhashes, True, srvheadhashes
298 return ownheadhashes, True, srvheadhashes
298
299
299 # full blown discovery
300 # full blown discovery
300
301
301 disco = partialdiscovery(local, ownheads)
302 disco = partialdiscovery(local, ownheads)
302 # treat remote heads (and maybe own heads) as a first implicit sample
303 # treat remote heads (and maybe own heads) as a first implicit sample
303 # response
304 # response
304 disco.addcommons(srvheads)
305 disco.addcommons(srvheads)
305 disco.addinfo(zip(sample, yesno))
306 disco.addinfo(zip(sample, yesno))
306
307
307 full = False
308 full = False
308 progress = ui.makeprogress(_('searching'), unit=_('queries'))
309 progress = ui.makeprogress(_('searching'), unit=_('queries'))
309 while not disco.iscomplete():
310 while not disco.iscomplete():
310
311
311 if full or disco.hasinfo():
312 if full or disco.hasinfo():
312 if full:
313 if full:
313 ui.note(_("sampling from both directions\n"))
314 ui.note(_("sampling from both directions\n"))
314 else:
315 else:
315 ui.debug("taking initial sample\n")
316 ui.debug("taking initial sample\n")
316 samplefunc = _takefullsample
317 samplefunc = _takefullsample
317 targetsize = fullsamplesize
318 targetsize = fullsamplesize
318 else:
319 else:
319 # use even cheaper initial sample
320 # use even cheaper initial sample
320 ui.debug("taking quick initial sample\n")
321 ui.debug("taking quick initial sample\n")
321 samplefunc = _takequicksample
322 samplefunc = _takequicksample
322 targetsize = initialsamplesize
323 targetsize = initialsamplesize
323 sample = samplefunc(local, ownheads, disco.undecided, targetsize)
324 sample = samplefunc(local, ownheads, disco.undecided, targetsize)
324
325
325 roundtrips += 1
326 roundtrips += 1
326 progress.update(roundtrips)
327 progress.update(roundtrips)
327 ui.debug("query %i; still undecided: %i, sample size is: %i\n"
328 ui.debug("query %i; still undecided: %i, sample size is: %i\n"
328 % (roundtrips, len(disco.undecided), len(sample)))
329 % (roundtrips, len(disco.undecided), len(sample)))
329 # indices between sample and externalized version must match
330 # indices between sample and externalized version must match
330 sample = list(sample)
331 sample = list(sample)
331
332
332 with remote.commandexecutor() as e:
333 with remote.commandexecutor() as e:
333 yesno = e.callcommand('known', {
334 yesno = e.callcommand('known', {
334 'nodes': [clnode(r) for r in sample],
335 'nodes': [clnode(r) for r in sample],
335 }).result()
336 }).result()
336
337
337 full = True
338 full = True
338
339
339 disco.addinfo(zip(sample, yesno))
340 disco.addinfo(zip(sample, yesno))
340
341
341 result = disco.commonheads()
342 result = disco.commonheads()
342 elapsed = util.timer() - start
343 elapsed = util.timer() - start
343 progress.complete()
344 progress.complete()
344 ui.debug("%d total queries in %.4fs\n" % (roundtrips, elapsed))
345 ui.debug("%d total queries in %.4fs\n" % (roundtrips, elapsed))
345 msg = ('found %d common and %d unknown server heads,'
346 msg = ('found %d common and %d unknown server heads,'
346 ' %d roundtrips in %.4fs\n')
347 ' %d roundtrips in %.4fs\n')
347 missing = set(result) - set(srvheads)
348 missing = set(result) - set(srvheads)
348 ui.log('discovery', msg, len(result), len(missing), roundtrips,
349 ui.log('discovery', msg, len(result), len(missing), roundtrips,
349 elapsed)
350 elapsed)
350
351
351 if not result and srvheadhashes != [nullid]:
352 if not result and srvheadhashes != [nullid]:
352 if abortwhenunrelated:
353 if abortwhenunrelated:
353 raise error.Abort(_("repository is unrelated"))
354 raise error.Abort(_("repository is unrelated"))
354 else:
355 else:
355 ui.warn(_("warning: repository is unrelated\n"))
356 ui.warn(_("warning: repository is unrelated\n"))
356 return ({nullid}, True, srvheadhashes,)
357 return ({nullid}, True, srvheadhashes,)
357
358
358 anyincoming = (srvheadhashes != [nullid])
359 anyincoming = (srvheadhashes != [nullid])
359 result = {clnode(r) for r in result}
360 result = {clnode(r) for r in result}
360 return result, anyincoming, srvheadhashes
361 return result, anyincoming, srvheadhashes
General Comments 0
You need to be logged in to leave comments. Login now