##// END OF EJS Templates
discovery: move undecided set on the partialdiscovery...
Boris Feld -
r41203:870a89c6 default
parent child Browse files
Show More
@@ -1,340 +1,348 b''
1 # setdiscovery.py - improved discovery of common nodeset for mercurial
1 # setdiscovery.py - improved discovery of common nodeset for mercurial
2 #
2 #
3 # Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
3 # Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
4 # and Peter Arrenbrecht <peter@arrenbrecht.ch>
4 # and Peter Arrenbrecht <peter@arrenbrecht.ch>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8 """
8 """
9 Algorithm works in the following way. You have two repository: local and
9 Algorithm works in the following way. You have two repository: local and
10 remote. They both contains a DAG of changelists.
10 remote. They both contains a DAG of changelists.
11
11
12 The goal of the discovery protocol is to find one set of node *common*,
12 The goal of the discovery protocol is to find one set of node *common*,
13 the set of nodes shared by local and remote.
13 the set of nodes shared by local and remote.
14
14
15 One of the issue with the original protocol was latency, it could
15 One of the issue with the original protocol was latency, it could
16 potentially require lots of roundtrips to discover that the local repo was a
16 potentially require lots of roundtrips to discover that the local repo was a
17 subset of remote (which is a very common case, you usually have few changes
17 subset of remote (which is a very common case, you usually have few changes
18 compared to upstream, while upstream probably had lots of development).
18 compared to upstream, while upstream probably had lots of development).
19
19
20 The new protocol only requires one interface for the remote repo: `known()`,
20 The new protocol only requires one interface for the remote repo: `known()`,
21 which given a set of changelists tells you if they are present in the DAG.
21 which given a set of changelists tells you if they are present in the DAG.
22
22
23 The algorithm then works as follow:
23 The algorithm then works as follow:
24
24
25 - We will be using three sets, `common`, `missing`, `unknown`. Originally
25 - We will be using three sets, `common`, `missing`, `unknown`. Originally
26 all nodes are in `unknown`.
26 all nodes are in `unknown`.
27 - Take a sample from `unknown`, call `remote.known(sample)`
27 - Take a sample from `unknown`, call `remote.known(sample)`
28 - For each node that remote knows, move it and all its ancestors to `common`
28 - For each node that remote knows, move it and all its ancestors to `common`
29 - For each node that remote doesn't know, move it and all its descendants
29 - For each node that remote doesn't know, move it and all its descendants
30 to `missing`
30 to `missing`
31 - Iterate until `unknown` is empty
31 - Iterate until `unknown` is empty
32
32
33 There are a couple optimizations, first is instead of starting with a random
33 There are a couple optimizations, first is instead of starting with a random
34 sample of missing, start by sending all heads, in the case where the local
34 sample of missing, start by sending all heads, in the case where the local
35 repo is a subset, you computed the answer in one round trip.
35 repo is a subset, you computed the answer in one round trip.
36
36
37 Then you can do something similar to the bisecting strategy used when
37 Then you can do something similar to the bisecting strategy used when
38 finding faulty changesets. Instead of random samples, you can try picking
38 finding faulty changesets. Instead of random samples, you can try picking
39 nodes that will maximize the number of nodes that will be
39 nodes that will maximize the number of nodes that will be
40 classified with it (since all ancestors or descendants will be marked as well).
40 classified with it (since all ancestors or descendants will be marked as well).
41 """
41 """
42
42
43 from __future__ import absolute_import
43 from __future__ import absolute_import
44
44
45 import collections
45 import collections
46 import random
46 import random
47
47
48 from .i18n import _
48 from .i18n import _
49 from .node import (
49 from .node import (
50 nullid,
50 nullid,
51 nullrev,
51 nullrev,
52 )
52 )
53 from . import (
53 from . import (
54 error,
54 error,
55 util,
55 util,
56 )
56 )
57
57
58 def _updatesample(revs, heads, sample, parentfn, quicksamplesize=0):
58 def _updatesample(revs, heads, sample, parentfn, quicksamplesize=0):
59 """update an existing sample to match the expected size
59 """update an existing sample to match the expected size
60
60
61 The sample is updated with revs exponentially distant from each head of the
61 The sample is updated with revs exponentially distant from each head of the
62 <revs> set. (H~1, H~2, H~4, H~8, etc).
62 <revs> set. (H~1, H~2, H~4, H~8, etc).
63
63
64 If a target size is specified, the sampling will stop once this size is
64 If a target size is specified, the sampling will stop once this size is
65 reached. Otherwise sampling will happen until roots of the <revs> set are
65 reached. Otherwise sampling will happen until roots of the <revs> set are
66 reached.
66 reached.
67
67
68 :revs: set of revs we want to discover (if None, assume the whole dag)
68 :revs: set of revs we want to discover (if None, assume the whole dag)
69 :heads: set of DAG head revs
69 :heads: set of DAG head revs
70 :sample: a sample to update
70 :sample: a sample to update
71 :parentfn: a callable to resolve parents for a revision
71 :parentfn: a callable to resolve parents for a revision
72 :quicksamplesize: optional target size of the sample"""
72 :quicksamplesize: optional target size of the sample"""
73 dist = {}
73 dist = {}
74 visit = collections.deque(heads)
74 visit = collections.deque(heads)
75 seen = set()
75 seen = set()
76 factor = 1
76 factor = 1
77 while visit:
77 while visit:
78 curr = visit.popleft()
78 curr = visit.popleft()
79 if curr in seen:
79 if curr in seen:
80 continue
80 continue
81 d = dist.setdefault(curr, 1)
81 d = dist.setdefault(curr, 1)
82 if d > factor:
82 if d > factor:
83 factor *= 2
83 factor *= 2
84 if d == factor:
84 if d == factor:
85 sample.add(curr)
85 sample.add(curr)
86 if quicksamplesize and (len(sample) >= quicksamplesize):
86 if quicksamplesize and (len(sample) >= quicksamplesize):
87 return
87 return
88 seen.add(curr)
88 seen.add(curr)
89
89
90 for p in parentfn(curr):
90 for p in parentfn(curr):
91 if p != nullrev and (not revs or p in revs):
91 if p != nullrev and (not revs or p in revs):
92 dist.setdefault(p, d + 1)
92 dist.setdefault(p, d + 1)
93 visit.append(p)
93 visit.append(p)
94
94
95 def _takequicksample(repo, headrevs, revs, size):
95 def _takequicksample(repo, headrevs, revs, size):
96 """takes a quick sample of size <size>
96 """takes a quick sample of size <size>
97
97
98 It is meant for initial sampling and focuses on querying heads and close
98 It is meant for initial sampling and focuses on querying heads and close
99 ancestors of heads.
99 ancestors of heads.
100
100
101 :dag: a dag object
101 :dag: a dag object
102 :headrevs: set of head revisions in local DAG to consider
102 :headrevs: set of head revisions in local DAG to consider
103 :revs: set of revs to discover
103 :revs: set of revs to discover
104 :size: the maximum size of the sample"""
104 :size: the maximum size of the sample"""
105 if len(revs) <= size:
105 if len(revs) <= size:
106 return list(revs)
106 return list(revs)
107 sample = set(repo.revs('heads(%ld)', revs))
107 sample = set(repo.revs('heads(%ld)', revs))
108
108
109 if len(sample) >= size:
109 if len(sample) >= size:
110 return _limitsample(sample, size)
110 return _limitsample(sample, size)
111
111
112 _updatesample(None, headrevs, sample, repo.changelog.parentrevs,
112 _updatesample(None, headrevs, sample, repo.changelog.parentrevs,
113 quicksamplesize=size)
113 quicksamplesize=size)
114 return sample
114 return sample
115
115
116 def _takefullsample(repo, headrevs, revs, size):
116 def _takefullsample(repo, headrevs, revs, size):
117 if len(revs) <= size:
117 if len(revs) <= size:
118 return list(revs)
118 return list(revs)
119 sample = set(repo.revs('heads(%ld)', revs))
119 sample = set(repo.revs('heads(%ld)', revs))
120
120
121 # update from heads
121 # update from heads
122 revsheads = set(repo.revs('heads(%ld)', revs))
122 revsheads = set(repo.revs('heads(%ld)', revs))
123 _updatesample(revs, revsheads, sample, repo.changelog.parentrevs)
123 _updatesample(revs, revsheads, sample, repo.changelog.parentrevs)
124
124
125 # update from roots
125 # update from roots
126 revsroots = set(repo.revs('roots(%ld)', revs))
126 revsroots = set(repo.revs('roots(%ld)', revs))
127
127
128 # _updatesample() essentially does interaction over revisions to look up
128 # _updatesample() essentially does interaction over revisions to look up
129 # their children. This lookup is expensive and doing it in a loop is
129 # their children. This lookup is expensive and doing it in a loop is
130 # quadratic. We precompute the children for all relevant revisions and
130 # quadratic. We precompute the children for all relevant revisions and
131 # make the lookup in _updatesample() a simple dict lookup.
131 # make the lookup in _updatesample() a simple dict lookup.
132 #
132 #
133 # Because this function can be called multiple times during discovery, we
133 # Because this function can be called multiple times during discovery, we
134 # may still perform redundant work and there is room to optimize this by
134 # may still perform redundant work and there is room to optimize this by
135 # keeping a persistent cache of children across invocations.
135 # keeping a persistent cache of children across invocations.
136 children = {}
136 children = {}
137
137
138 parentrevs = repo.changelog.parentrevs
138 parentrevs = repo.changelog.parentrevs
139 for rev in repo.changelog.revs(start=min(revsroots)):
139 for rev in repo.changelog.revs(start=min(revsroots)):
140 # Always ensure revision has an entry so we don't need to worry about
140 # Always ensure revision has an entry so we don't need to worry about
141 # missing keys.
141 # missing keys.
142 children.setdefault(rev, [])
142 children.setdefault(rev, [])
143
143
144 for prev in parentrevs(rev):
144 for prev in parentrevs(rev):
145 if prev == nullrev:
145 if prev == nullrev:
146 continue
146 continue
147
147
148 children.setdefault(prev, []).append(rev)
148 children.setdefault(prev, []).append(rev)
149
149
150 _updatesample(revs, revsroots, sample, children.__getitem__)
150 _updatesample(revs, revsroots, sample, children.__getitem__)
151 assert sample
151 assert sample
152 sample = _limitsample(sample, size)
152 sample = _limitsample(sample, size)
153 if len(sample) < size:
153 if len(sample) < size:
154 more = size - len(sample)
154 more = size - len(sample)
155 sample.update(random.sample(list(revs - sample), more))
155 sample.update(random.sample(list(revs - sample), more))
156 return sample
156 return sample
157
157
158 def _limitsample(sample, desiredlen):
158 def _limitsample(sample, desiredlen):
159 """return a random subset of sample of at most desiredlen item"""
159 """return a random subset of sample of at most desiredlen item"""
160 if len(sample) > desiredlen:
160 if len(sample) > desiredlen:
161 sample = set(random.sample(sample, desiredlen))
161 sample = set(random.sample(sample, desiredlen))
162 return sample
162 return sample
163
163
164 class partialdiscovery(object):
164 class partialdiscovery(object):
165 """an object representing ongoing discovery
165 """an object representing ongoing discovery
166
166
167 Feed with data from the remote repository, this object keep track of the
167 Feed with data from the remote repository, this object keep track of the
168 current set of changeset in various states:
168 current set of changeset in various states:
169
169
170 - common: own nodes I know we both know
170 - common: own nodes I know we both know
171 - undecided: own nodes where I don't know if remote knows them
171 """
172 """
172
173
173 def __init__(self, repo):
174 def __init__(self, repo, targetheads):
174 self._repo = repo
175 self._repo = repo
176 self._targetheads = targetheads
175 self._common = repo.changelog.incrementalmissingrevs()
177 self._common = repo.changelog.incrementalmissingrevs()
178 self._undecided = None
176
179
177 def addcommons(self, commons):
180 def addcommons(self, commons):
178 """registrer nodes known as common"""
181 """registrer nodes known as common"""
179 self._common.addbases(commons)
182 self._common.addbases(commons)
180
183
181 def hasinfo(self):
184 def hasinfo(self):
182 """return True is we have any clue about the remote state"""
185 """return True is we have any clue about the remote state"""
183 return self._common.hasbases()
186 return self._common.hasbases()
184
187
188 @property
189 def undecided(self):
190 if self._undecided is not None:
191 return self._undecided
192 self._undecided = set(self._common.missingancestors(self._targetheads))
193 return self._undecided
194
185 def commonheads(self):
195 def commonheads(self):
186 """the heads of the known common set"""
196 """the heads of the known common set"""
187 # heads(common) == heads(common.bases) since common represents
197 # heads(common) == heads(common.bases) since common represents
188 # common.bases and all its ancestors
198 # common.bases and all its ancestors
189 # The presence of nullrev will confuse heads(). So filter it out.
199 # The presence of nullrev will confuse heads(). So filter it out.
190 return set(self._repo.revs('heads(%ld)',
200 return set(self._repo.revs('heads(%ld)',
191 self._common.bases - {nullrev}))
201 self._common.bases - {nullrev}))
192
202
193 def findcommonheads(ui, local, remote,
203 def findcommonheads(ui, local, remote,
194 initialsamplesize=100,
204 initialsamplesize=100,
195 fullsamplesize=200,
205 fullsamplesize=200,
196 abortwhenunrelated=True,
206 abortwhenunrelated=True,
197 ancestorsof=None):
207 ancestorsof=None):
198 '''Return a tuple (common, anyincoming, remoteheads) used to identify
208 '''Return a tuple (common, anyincoming, remoteheads) used to identify
199 missing nodes from or in remote.
209 missing nodes from or in remote.
200 '''
210 '''
201 start = util.timer()
211 start = util.timer()
202
212
203 roundtrips = 0
213 roundtrips = 0
204 cl = local.changelog
214 cl = local.changelog
205 clnode = cl.node
215 clnode = cl.node
206 clrev = cl.rev
216 clrev = cl.rev
207
217
208 if ancestorsof is not None:
218 if ancestorsof is not None:
209 ownheads = [clrev(n) for n in ancestorsof]
219 ownheads = [clrev(n) for n in ancestorsof]
210 else:
220 else:
211 ownheads = [rev for rev in cl.headrevs() if rev != nullrev]
221 ownheads = [rev for rev in cl.headrevs() if rev != nullrev]
212
222
213 # early exit if we know all the specified remote heads already
223 # early exit if we know all the specified remote heads already
214 ui.debug("query 1; heads\n")
224 ui.debug("query 1; heads\n")
215 roundtrips += 1
225 roundtrips += 1
216 sample = _limitsample(ownheads, initialsamplesize)
226 sample = _limitsample(ownheads, initialsamplesize)
217 # indices between sample and externalized version must match
227 # indices between sample and externalized version must match
218 sample = list(sample)
228 sample = list(sample)
219
229
220 with remote.commandexecutor() as e:
230 with remote.commandexecutor() as e:
221 fheads = e.callcommand('heads', {})
231 fheads = e.callcommand('heads', {})
222 fknown = e.callcommand('known', {
232 fknown = e.callcommand('known', {
223 'nodes': [clnode(r) for r in sample],
233 'nodes': [clnode(r) for r in sample],
224 })
234 })
225
235
226 srvheadhashes, yesno = fheads.result(), fknown.result()
236 srvheadhashes, yesno = fheads.result(), fknown.result()
227
237
228 if cl.tip() == nullid:
238 if cl.tip() == nullid:
229 if srvheadhashes != [nullid]:
239 if srvheadhashes != [nullid]:
230 return [nullid], True, srvheadhashes
240 return [nullid], True, srvheadhashes
231 return [nullid], False, []
241 return [nullid], False, []
232
242
233 # start actual discovery (we note this before the next "if" for
243 # start actual discovery (we note this before the next "if" for
234 # compatibility reasons)
244 # compatibility reasons)
235 ui.status(_("searching for changes\n"))
245 ui.status(_("searching for changes\n"))
236
246
237 srvheads = []
247 srvheads = []
238 for node in srvheadhashes:
248 for node in srvheadhashes:
239 if node == nullid:
249 if node == nullid:
240 continue
250 continue
241
251
242 try:
252 try:
243 srvheads.append(clrev(node))
253 srvheads.append(clrev(node))
244 # Catches unknown and filtered nodes.
254 # Catches unknown and filtered nodes.
245 except error.LookupError:
255 except error.LookupError:
246 continue
256 continue
247
257
248 if len(srvheads) == len(srvheadhashes):
258 if len(srvheads) == len(srvheadhashes):
249 ui.debug("all remote heads known locally\n")
259 ui.debug("all remote heads known locally\n")
250 return srvheadhashes, False, srvheadhashes
260 return srvheadhashes, False, srvheadhashes
251
261
252 if len(sample) == len(ownheads) and all(yesno):
262 if len(sample) == len(ownheads) and all(yesno):
253 ui.note(_("all local heads known remotely\n"))
263 ui.note(_("all local heads known remotely\n"))
254 ownheadhashes = [clnode(r) for r in ownheads]
264 ownheadhashes = [clnode(r) for r in ownheads]
255 return ownheadhashes, True, srvheadhashes
265 return ownheadhashes, True, srvheadhashes
256
266
257 # full blown discovery
267 # full blown discovery
258
268
259 disco = partialdiscovery(local)
269 disco = partialdiscovery(local, ownheads)
260 # treat remote heads (and maybe own heads) as a first implicit sample
270 # treat remote heads (and maybe own heads) as a first implicit sample
261 # response
271 # response
262 disco.addcommons(srvheads)
272 disco.addcommons(srvheads)
263 commoninsample = set(n for i, n in enumerate(sample) if yesno[i])
273 commoninsample = set(n for i, n in enumerate(sample) if yesno[i])
264 disco.addcommons(commoninsample)
274 disco.addcommons(commoninsample)
265 # own nodes where I don't know if remote knows them
266 undecided = set(disco._common.missingancestors(ownheads))
267 # own nodes I know remote lacks
275 # own nodes I know remote lacks
268 missing = set()
276 missing = set()
269
277
270 full = False
278 full = False
271 progress = ui.makeprogress(_('searching'), unit=_('queries'))
279 progress = ui.makeprogress(_('searching'), unit=_('queries'))
272 while undecided:
280 while disco.undecided:
273
281
274 if sample:
282 if sample:
275 missinginsample = [n for i, n in enumerate(sample) if not yesno[i]]
283 missinginsample = [n for i, n in enumerate(sample) if not yesno[i]]
276
284
277 if missing:
285 if missing:
278 missing.update(local.revs('descendants(%ld) - descendants(%ld)',
286 missing.update(local.revs('descendants(%ld) - descendants(%ld)',
279 missinginsample, missing))
287 missinginsample, missing))
280 else:
288 else:
281 missing.update(local.revs('descendants(%ld)', missinginsample))
289 missing.update(local.revs('descendants(%ld)', missinginsample))
282
290
283 undecided.difference_update(missing)
291 disco.undecided.difference_update(missing)
284
292
285 if not undecided:
293 if not disco.undecided:
286 break
294 break
287
295
288 if full or disco.hasinfo():
296 if full or disco.hasinfo():
289 if full:
297 if full:
290 ui.note(_("sampling from both directions\n"))
298 ui.note(_("sampling from both directions\n"))
291 else:
299 else:
292 ui.debug("taking initial sample\n")
300 ui.debug("taking initial sample\n")
293 samplefunc = _takefullsample
301 samplefunc = _takefullsample
294 targetsize = fullsamplesize
302 targetsize = fullsamplesize
295 else:
303 else:
296 # use even cheaper initial sample
304 # use even cheaper initial sample
297 ui.debug("taking quick initial sample\n")
305 ui.debug("taking quick initial sample\n")
298 samplefunc = _takequicksample
306 samplefunc = _takequicksample
299 targetsize = initialsamplesize
307 targetsize = initialsamplesize
300 sample = samplefunc(local, ownheads, undecided, targetsize)
308 sample = samplefunc(local, ownheads, disco.undecided, targetsize)
301
309
302 roundtrips += 1
310 roundtrips += 1
303 progress.update(roundtrips)
311 progress.update(roundtrips)
304 ui.debug("query %i; still undecided: %i, sample size is: %i\n"
312 ui.debug("query %i; still undecided: %i, sample size is: %i\n"
305 % (roundtrips, len(undecided), len(sample)))
313 % (roundtrips, len(disco.undecided), len(sample)))
306 # indices between sample and externalized version must match
314 # indices between sample and externalized version must match
307 sample = list(sample)
315 sample = list(sample)
308
316
309 with remote.commandexecutor() as e:
317 with remote.commandexecutor() as e:
310 yesno = e.callcommand('known', {
318 yesno = e.callcommand('known', {
311 'nodes': [clnode(r) for r in sample],
319 'nodes': [clnode(r) for r in sample],
312 }).result()
320 }).result()
313
321
314 full = True
322 full = True
315
323
316 if sample:
324 if sample:
317 commoninsample = set(n for i, n in enumerate(sample) if yesno[i])
325 commoninsample = set(n for i, n in enumerate(sample) if yesno[i])
318 disco.addcommons(commoninsample)
326 disco.addcommons(commoninsample)
319 disco._common.removeancestorsfrom(undecided)
327 disco._common.removeancestorsfrom(disco.undecided)
320
328
321 result = disco.commonheads()
329 result = disco.commonheads()
322 elapsed = util.timer() - start
330 elapsed = util.timer() - start
323 progress.complete()
331 progress.complete()
324 ui.debug("%d total queries in %.4fs\n" % (roundtrips, elapsed))
332 ui.debug("%d total queries in %.4fs\n" % (roundtrips, elapsed))
325 msg = ('found %d common and %d unknown server heads,'
333 msg = ('found %d common and %d unknown server heads,'
326 ' %d roundtrips in %.4fs\n')
334 ' %d roundtrips in %.4fs\n')
327 missing = set(result) - set(srvheads)
335 missing = set(result) - set(srvheads)
328 ui.log('discovery', msg, len(result), len(missing), roundtrips,
336 ui.log('discovery', msg, len(result), len(missing), roundtrips,
329 elapsed)
337 elapsed)
330
338
331 if not result and srvheadhashes != [nullid]:
339 if not result and srvheadhashes != [nullid]:
332 if abortwhenunrelated:
340 if abortwhenunrelated:
333 raise error.Abort(_("repository is unrelated"))
341 raise error.Abort(_("repository is unrelated"))
334 else:
342 else:
335 ui.warn(_("warning: repository is unrelated\n"))
343 ui.warn(_("warning: repository is unrelated\n"))
336 return ({nullid}, True, srvheadhashes,)
344 return ({nullid}, True, srvheadhashes,)
337
345
338 anyincoming = (srvheadhashes != [nullid])
346 anyincoming = (srvheadhashes != [nullid])
339 result = {clnode(r) for r in result}
347 result = {clnode(r) for r in result}
340 return result, anyincoming, srvheadhashes
348 return result, anyincoming, srvheadhashes
General Comments 0
You need to be logged in to leave comments. Login now