##// END OF EJS Templates
discovery: introduce a partialdiscovery object...
Boris Feld -
r41147:3023bc4b default
parent child Browse files
Show More
@@ -1,314 +1,336 b''
1 # setdiscovery.py - improved discovery of common nodeset for mercurial
1 # setdiscovery.py - improved discovery of common nodeset for mercurial
2 #
2 #
3 # Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
3 # Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
4 # and Peter Arrenbrecht <peter@arrenbrecht.ch>
4 # and Peter Arrenbrecht <peter@arrenbrecht.ch>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8 """
8 """
9 Algorithm works in the following way. You have two repository: local and
9 Algorithm works in the following way. You have two repository: local and
10 remote. They both contains a DAG of changelists.
10 remote. They both contains a DAG of changelists.
11
11
12 The goal of the discovery protocol is to find one set of node *common*,
12 The goal of the discovery protocol is to find one set of node *common*,
13 the set of nodes shared by local and remote.
13 the set of nodes shared by local and remote.
14
14
15 One of the issue with the original protocol was latency, it could
15 One of the issue with the original protocol was latency, it could
16 potentially require lots of roundtrips to discover that the local repo was a
16 potentially require lots of roundtrips to discover that the local repo was a
17 subset of remote (which is a very common case, you usually have few changes
17 subset of remote (which is a very common case, you usually have few changes
18 compared to upstream, while upstream probably had lots of development).
18 compared to upstream, while upstream probably had lots of development).
19
19
20 The new protocol only requires one interface for the remote repo: `known()`,
20 The new protocol only requires one interface for the remote repo: `known()`,
21 which given a set of changelists tells you if they are present in the DAG.
21 which given a set of changelists tells you if they are present in the DAG.
22
22
23 The algorithm then works as follow:
23 The algorithm then works as follow:
24
24
25 - We will be using three sets, `common`, `missing`, `unknown`. Originally
25 - We will be using three sets, `common`, `missing`, `unknown`. Originally
26 all nodes are in `unknown`.
26 all nodes are in `unknown`.
27 - Take a sample from `unknown`, call `remote.known(sample)`
27 - Take a sample from `unknown`, call `remote.known(sample)`
28 - For each node that remote knows, move it and all its ancestors to `common`
28 - For each node that remote knows, move it and all its ancestors to `common`
29 - For each node that remote doesn't know, move it and all its descendants
29 - For each node that remote doesn't know, move it and all its descendants
30 to `missing`
30 to `missing`
31 - Iterate until `unknown` is empty
31 - Iterate until `unknown` is empty
32
32
33 There are a couple optimizations, first is instead of starting with a random
33 There are a couple optimizations, first is instead of starting with a random
34 sample of missing, start by sending all heads, in the case where the local
34 sample of missing, start by sending all heads, in the case where the local
35 repo is a subset, you computed the answer in one round trip.
35 repo is a subset, you computed the answer in one round trip.
36
36
37 Then you can do something similar to the bisecting strategy used when
37 Then you can do something similar to the bisecting strategy used when
38 finding faulty changesets. Instead of random samples, you can try picking
38 finding faulty changesets. Instead of random samples, you can try picking
39 nodes that will maximize the number of nodes that will be
39 nodes that will maximize the number of nodes that will be
40 classified with it (since all ancestors or descendants will be marked as well).
40 classified with it (since all ancestors or descendants will be marked as well).
41 """
41 """
42
42
43 from __future__ import absolute_import
43 from __future__ import absolute_import
44
44
45 import collections
45 import collections
46 import random
46 import random
47
47
48 from .i18n import _
48 from .i18n import _
49 from .node import (
49 from .node import (
50 nullid,
50 nullid,
51 nullrev,
51 nullrev,
52 )
52 )
53 from . import (
53 from . import (
54 error,
54 error,
55 util,
55 util,
56 )
56 )
57
57
58 def _updatesample(revs, heads, sample, parentfn, quicksamplesize=0):
58 def _updatesample(revs, heads, sample, parentfn, quicksamplesize=0):
59 """update an existing sample to match the expected size
59 """update an existing sample to match the expected size
60
60
61 The sample is updated with revs exponentially distant from each head of the
61 The sample is updated with revs exponentially distant from each head of the
62 <revs> set. (H~1, H~2, H~4, H~8, etc).
62 <revs> set. (H~1, H~2, H~4, H~8, etc).
63
63
64 If a target size is specified, the sampling will stop once this size is
64 If a target size is specified, the sampling will stop once this size is
65 reached. Otherwise sampling will happen until roots of the <revs> set are
65 reached. Otherwise sampling will happen until roots of the <revs> set are
66 reached.
66 reached.
67
67
68 :revs: set of revs we want to discover (if None, assume the whole dag)
68 :revs: set of revs we want to discover (if None, assume the whole dag)
69 :heads: set of DAG head revs
69 :heads: set of DAG head revs
70 :sample: a sample to update
70 :sample: a sample to update
71 :parentfn: a callable to resolve parents for a revision
71 :parentfn: a callable to resolve parents for a revision
72 :quicksamplesize: optional target size of the sample"""
72 :quicksamplesize: optional target size of the sample"""
73 dist = {}
73 dist = {}
74 visit = collections.deque(heads)
74 visit = collections.deque(heads)
75 seen = set()
75 seen = set()
76 factor = 1
76 factor = 1
77 while visit:
77 while visit:
78 curr = visit.popleft()
78 curr = visit.popleft()
79 if curr in seen:
79 if curr in seen:
80 continue
80 continue
81 d = dist.setdefault(curr, 1)
81 d = dist.setdefault(curr, 1)
82 if d > factor:
82 if d > factor:
83 factor *= 2
83 factor *= 2
84 if d == factor:
84 if d == factor:
85 sample.add(curr)
85 sample.add(curr)
86 if quicksamplesize and (len(sample) >= quicksamplesize):
86 if quicksamplesize and (len(sample) >= quicksamplesize):
87 return
87 return
88 seen.add(curr)
88 seen.add(curr)
89
89
90 for p in parentfn(curr):
90 for p in parentfn(curr):
91 if p != nullrev and (not revs or p in revs):
91 if p != nullrev and (not revs or p in revs):
92 dist.setdefault(p, d + 1)
92 dist.setdefault(p, d + 1)
93 visit.append(p)
93 visit.append(p)
94
94
95 def _takequicksample(repo, headrevs, revs, size):
95 def _takequicksample(repo, headrevs, revs, size):
96 """takes a quick sample of size <size>
96 """takes a quick sample of size <size>
97
97
98 It is meant for initial sampling and focuses on querying heads and close
98 It is meant for initial sampling and focuses on querying heads and close
99 ancestors of heads.
99 ancestors of heads.
100
100
101 :dag: a dag object
101 :dag: a dag object
102 :headrevs: set of head revisions in local DAG to consider
102 :headrevs: set of head revisions in local DAG to consider
103 :revs: set of revs to discover
103 :revs: set of revs to discover
104 :size: the maximum size of the sample"""
104 :size: the maximum size of the sample"""
105 if len(revs) <= size:
105 if len(revs) <= size:
106 return list(revs)
106 return list(revs)
107 sample = set(repo.revs('heads(%ld)', revs))
107 sample = set(repo.revs('heads(%ld)', revs))
108
108
109 if len(sample) >= size:
109 if len(sample) >= size:
110 return _limitsample(sample, size)
110 return _limitsample(sample, size)
111
111
112 _updatesample(None, headrevs, sample, repo.changelog.parentrevs,
112 _updatesample(None, headrevs, sample, repo.changelog.parentrevs,
113 quicksamplesize=size)
113 quicksamplesize=size)
114 return sample
114 return sample
115
115
116 def _takefullsample(repo, headrevs, revs, size):
116 def _takefullsample(repo, headrevs, revs, size):
117 if len(revs) <= size:
117 if len(revs) <= size:
118 return list(revs)
118 return list(revs)
119 sample = set(repo.revs('heads(%ld)', revs))
119 sample = set(repo.revs('heads(%ld)', revs))
120
120
121 # update from heads
121 # update from heads
122 revsheads = set(repo.revs('heads(%ld)', revs))
122 revsheads = set(repo.revs('heads(%ld)', revs))
123 _updatesample(revs, revsheads, sample, repo.changelog.parentrevs)
123 _updatesample(revs, revsheads, sample, repo.changelog.parentrevs)
124
124
125 # update from roots
125 # update from roots
126 revsroots = set(repo.revs('roots(%ld)', revs))
126 revsroots = set(repo.revs('roots(%ld)', revs))
127
127
128 # _updatesample() essentially does interaction over revisions to look up
128 # _updatesample() essentially does interaction over revisions to look up
129 # their children. This lookup is expensive and doing it in a loop is
129 # their children. This lookup is expensive and doing it in a loop is
130 # quadratic. We precompute the children for all relevant revisions and
130 # quadratic. We precompute the children for all relevant revisions and
131 # make the lookup in _updatesample() a simple dict lookup.
131 # make the lookup in _updatesample() a simple dict lookup.
132 #
132 #
133 # Because this function can be called multiple times during discovery, we
133 # Because this function can be called multiple times during discovery, we
134 # may still perform redundant work and there is room to optimize this by
134 # may still perform redundant work and there is room to optimize this by
135 # keeping a persistent cache of children across invocations.
135 # keeping a persistent cache of children across invocations.
136 children = {}
136 children = {}
137
137
138 parentrevs = repo.changelog.parentrevs
138 parentrevs = repo.changelog.parentrevs
139 for rev in repo.changelog.revs(start=min(revsroots)):
139 for rev in repo.changelog.revs(start=min(revsroots)):
140 # Always ensure revision has an entry so we don't need to worry about
140 # Always ensure revision has an entry so we don't need to worry about
141 # missing keys.
141 # missing keys.
142 children.setdefault(rev, [])
142 children.setdefault(rev, [])
143
143
144 for prev in parentrevs(rev):
144 for prev in parentrevs(rev):
145 if prev == nullrev:
145 if prev == nullrev:
146 continue
146 continue
147
147
148 children.setdefault(prev, []).append(rev)
148 children.setdefault(prev, []).append(rev)
149
149
150 _updatesample(revs, revsroots, sample, children.__getitem__)
150 _updatesample(revs, revsroots, sample, children.__getitem__)
151 assert sample
151 assert sample
152 sample = _limitsample(sample, size)
152 sample = _limitsample(sample, size)
153 if len(sample) <= size:
153 if len(sample) <= size:
154 more = size - len(sample)
154 more = size - len(sample)
155 sample.update(random.sample(list(revs - sample), more))
155 sample.update(random.sample(list(revs - sample), more))
156 return sample
156 return sample
157
157
158 def _limitsample(sample, desiredlen):
158 def _limitsample(sample, desiredlen):
159 """return a random subset of sample of at most desiredlen item"""
159 """return a random subset of sample of at most desiredlen item"""
160 if len(sample) > desiredlen:
160 if len(sample) > desiredlen:
161 sample = set(random.sample(sample, desiredlen))
161 sample = set(random.sample(sample, desiredlen))
162 return sample
162 return sample
163
163
164 class partialdiscovery(object):
165 """an object representing ongoing discovery
166
167 Feed with data from the remote repository, this object keep track of the
168 current set of changeset in various states:
169
170 - common: own nodes I know we both know
171 """
172
173 def __init__(self, repo):
174 self._repo = repo
175 self._common = repo.changelog.incrementalmissingrevs()
176
177 def addcommons(self, commons):
178 """registrer nodes known as common"""
179 self._common.addbases(commons)
180
181 def hasinfo(self):
182 """return True is we have any clue about the remote state"""
183 return self._common.hasbases()
184
185
164 def findcommonheads(ui, local, remote,
186 def findcommonheads(ui, local, remote,
165 initialsamplesize=100,
187 initialsamplesize=100,
166 fullsamplesize=200,
188 fullsamplesize=200,
167 abortwhenunrelated=True,
189 abortwhenunrelated=True,
168 ancestorsof=None):
190 ancestorsof=None):
169 '''Return a tuple (common, anyincoming, remoteheads) used to identify
191 '''Return a tuple (common, anyincoming, remoteheads) used to identify
170 missing nodes from or in remote.
192 missing nodes from or in remote.
171 '''
193 '''
172 start = util.timer()
194 start = util.timer()
173
195
174 roundtrips = 0
196 roundtrips = 0
175 cl = local.changelog
197 cl = local.changelog
176 clnode = cl.node
198 clnode = cl.node
177 clrev = cl.rev
199 clrev = cl.rev
178
200
179 if ancestorsof is not None:
201 if ancestorsof is not None:
180 ownheads = [clrev(n) for n in ancestorsof]
202 ownheads = [clrev(n) for n in ancestorsof]
181 else:
203 else:
182 ownheads = [rev for rev in cl.headrevs() if rev != nullrev]
204 ownheads = [rev for rev in cl.headrevs() if rev != nullrev]
183
205
184 # early exit if we know all the specified remote heads already
206 # early exit if we know all the specified remote heads already
185 ui.debug("query 1; heads\n")
207 ui.debug("query 1; heads\n")
186 roundtrips += 1
208 roundtrips += 1
187 sample = _limitsample(ownheads, initialsamplesize)
209 sample = _limitsample(ownheads, initialsamplesize)
188 # indices between sample and externalized version must match
210 # indices between sample and externalized version must match
189 sample = list(sample)
211 sample = list(sample)
190
212
191 with remote.commandexecutor() as e:
213 with remote.commandexecutor() as e:
192 fheads = e.callcommand('heads', {})
214 fheads = e.callcommand('heads', {})
193 fknown = e.callcommand('known', {
215 fknown = e.callcommand('known', {
194 'nodes': [clnode(r) for r in sample],
216 'nodes': [clnode(r) for r in sample],
195 })
217 })
196
218
197 srvheadhashes, yesno = fheads.result(), fknown.result()
219 srvheadhashes, yesno = fheads.result(), fknown.result()
198
220
199 if cl.tip() == nullid:
221 if cl.tip() == nullid:
200 if srvheadhashes != [nullid]:
222 if srvheadhashes != [nullid]:
201 return [nullid], True, srvheadhashes
223 return [nullid], True, srvheadhashes
202 return [nullid], False, []
224 return [nullid], False, []
203
225
204 # start actual discovery (we note this before the next "if" for
226 # start actual discovery (we note this before the next "if" for
205 # compatibility reasons)
227 # compatibility reasons)
206 ui.status(_("searching for changes\n"))
228 ui.status(_("searching for changes\n"))
207
229
208 srvheads = []
230 srvheads = []
209 for node in srvheadhashes:
231 for node in srvheadhashes:
210 if node == nullid:
232 if node == nullid:
211 continue
233 continue
212
234
213 try:
235 try:
214 srvheads.append(clrev(node))
236 srvheads.append(clrev(node))
215 # Catches unknown and filtered nodes.
237 # Catches unknown and filtered nodes.
216 except error.LookupError:
238 except error.LookupError:
217 continue
239 continue
218
240
219 if len(srvheads) == len(srvheadhashes):
241 if len(srvheads) == len(srvheadhashes):
220 ui.debug("all remote heads known locally\n")
242 ui.debug("all remote heads known locally\n")
221 return srvheadhashes, False, srvheadhashes
243 return srvheadhashes, False, srvheadhashes
222
244
223 if len(sample) == len(ownheads) and all(yesno):
245 if len(sample) == len(ownheads) and all(yesno):
224 ui.note(_("all local heads known remotely\n"))
246 ui.note(_("all local heads known remotely\n"))
225 ownheadhashes = [clnode(r) for r in ownheads]
247 ownheadhashes = [clnode(r) for r in ownheads]
226 return ownheadhashes, True, srvheadhashes
248 return ownheadhashes, True, srvheadhashes
227
249
228 # full blown discovery
250 # full blown discovery
229
251
230 # own nodes I know we both know
252 disco = partialdiscovery(local)
231 # treat remote heads (and maybe own heads) as a first implicit sample
253 # treat remote heads (and maybe own heads) as a first implicit sample
232 # response
254 # response
233 common = cl.incrementalmissingrevs(srvheads)
255 disco.addcommons(srvheads)
234 commoninsample = set(n for i, n in enumerate(sample) if yesno[i])
256 commoninsample = set(n for i, n in enumerate(sample) if yesno[i])
235 common.addbases(commoninsample)
257 disco.addcommons(commoninsample)
236 # own nodes where I don't know if remote knows them
258 # own nodes where I don't know if remote knows them
237 undecided = set(common.missingancestors(ownheads))
259 undecided = set(disco._common.missingancestors(ownheads))
238 # own nodes I know remote lacks
260 # own nodes I know remote lacks
239 missing = set()
261 missing = set()
240
262
241 full = False
263 full = False
242 progress = ui.makeprogress(_('searching'), unit=_('queries'))
264 progress = ui.makeprogress(_('searching'), unit=_('queries'))
243 while undecided:
265 while undecided:
244
266
245 if sample:
267 if sample:
246 missinginsample = [n for i, n in enumerate(sample) if not yesno[i]]
268 missinginsample = [n for i, n in enumerate(sample) if not yesno[i]]
247
269
248 if missing:
270 if missing:
249 missing.update(local.revs('descendants(%ld) - descendants(%ld)',
271 missing.update(local.revs('descendants(%ld) - descendants(%ld)',
250 missinginsample, missing))
272 missinginsample, missing))
251 else:
273 else:
252 missing.update(local.revs('descendants(%ld)', missinginsample))
274 missing.update(local.revs('descendants(%ld)', missinginsample))
253
275
254 undecided.difference_update(missing)
276 undecided.difference_update(missing)
255
277
256 if not undecided:
278 if not undecided:
257 break
279 break
258
280
259 if full or common.hasbases():
281 if full or disco.hasinfo():
260 if full:
282 if full:
261 ui.note(_("sampling from both directions\n"))
283 ui.note(_("sampling from both directions\n"))
262 else:
284 else:
263 ui.debug("taking initial sample\n")
285 ui.debug("taking initial sample\n")
264 samplefunc = _takefullsample
286 samplefunc = _takefullsample
265 targetsize = fullsamplesize
287 targetsize = fullsamplesize
266 else:
288 else:
267 # use even cheaper initial sample
289 # use even cheaper initial sample
268 ui.debug("taking quick initial sample\n")
290 ui.debug("taking quick initial sample\n")
269 samplefunc = _takequicksample
291 samplefunc = _takequicksample
270 targetsize = initialsamplesize
292 targetsize = initialsamplesize
271 sample = samplefunc(local, ownheads, undecided, targetsize)
293 sample = samplefunc(local, ownheads, undecided, targetsize)
272
294
273 roundtrips += 1
295 roundtrips += 1
274 progress.update(roundtrips)
296 progress.update(roundtrips)
275 ui.debug("query %i; still undecided: %i, sample size is: %i\n"
297 ui.debug("query %i; still undecided: %i, sample size is: %i\n"
276 % (roundtrips, len(undecided), len(sample)))
298 % (roundtrips, len(undecided), len(sample)))
277 # indices between sample and externalized version must match
299 # indices between sample and externalized version must match
278 sample = list(sample)
300 sample = list(sample)
279
301
280 with remote.commandexecutor() as e:
302 with remote.commandexecutor() as e:
281 yesno = e.callcommand('known', {
303 yesno = e.callcommand('known', {
282 'nodes': [clnode(r) for r in sample],
304 'nodes': [clnode(r) for r in sample],
283 }).result()
305 }).result()
284
306
285 full = True
307 full = True
286
308
287 if sample:
309 if sample:
288 commoninsample = set(n for i, n in enumerate(sample) if yesno[i])
310 commoninsample = set(n for i, n in enumerate(sample) if yesno[i])
289 common.addbases(commoninsample)
311 disco.addcommons(commoninsample)
290 common.removeancestorsfrom(undecided)
312 disco._common.removeancestorsfrom(undecided)
291
313
292 # heads(common) == heads(common.bases) since common represents common.bases
314 # heads(common) == heads(common.bases) since common represents common.bases
293 # and all its ancestors
315 # and all its ancestors
294 # The presence of nullrev will confuse heads(). So filter it out.
316 # The presence of nullrev will confuse heads(). So filter it out.
295 result = set(local.revs('heads(%ld)', common.bases - {nullrev}))
317 result = set(local.revs('heads(%ld)', disco._common.bases - {nullrev}))
296 elapsed = util.timer() - start
318 elapsed = util.timer() - start
297 progress.complete()
319 progress.complete()
298 ui.debug("%d total queries in %.4fs\n" % (roundtrips, elapsed))
320 ui.debug("%d total queries in %.4fs\n" % (roundtrips, elapsed))
299 msg = ('found %d common and %d unknown server heads,'
321 msg = ('found %d common and %d unknown server heads,'
300 ' %d roundtrips in %.4fs\n')
322 ' %d roundtrips in %.4fs\n')
301 missing = set(result) - set(srvheads)
323 missing = set(result) - set(srvheads)
302 ui.log('discovery', msg, len(result), len(missing), roundtrips,
324 ui.log('discovery', msg, len(result), len(missing), roundtrips,
303 elapsed)
325 elapsed)
304
326
305 if not result and srvheadhashes != [nullid]:
327 if not result and srvheadhashes != [nullid]:
306 if abortwhenunrelated:
328 if abortwhenunrelated:
307 raise error.Abort(_("repository is unrelated"))
329 raise error.Abort(_("repository is unrelated"))
308 else:
330 else:
309 ui.warn(_("warning: repository is unrelated\n"))
331 ui.warn(_("warning: repository is unrelated\n"))
310 return ({nullid}, True, srvheadhashes,)
332 return ({nullid}, True, srvheadhashes,)
311
333
312 anyincoming = (srvheadhashes != [nullid])
334 anyincoming = (srvheadhashes != [nullid])
313 result = {clnode(r) for r in result}
335 result = {clnode(r) for r in result}
314 return result, anyincoming, srvheadhashes
336 return result, anyincoming, srvheadhashes
General Comments 0
You need to be logged in to leave comments. Login now