##// END OF EJS Templates
discovery: cache the children mapping used during each discovery...
marmoute -
r42051:5baf06d2 default
parent child Browse files
Show More
@@ -1,370 +1,370 b''
1 # setdiscovery.py - improved discovery of common nodeset for mercurial
1 # setdiscovery.py - improved discovery of common nodeset for mercurial
2 #
2 #
3 # Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
3 # Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
4 # and Peter Arrenbrecht <peter@arrenbrecht.ch>
4 # and Peter Arrenbrecht <peter@arrenbrecht.ch>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8 """
8 """
9 Algorithm works in the following way. You have two repository: local and
9 Algorithm works in the following way. You have two repository: local and
10 remote. They both contains a DAG of changelists.
10 remote. They both contains a DAG of changelists.
11
11
12 The goal of the discovery protocol is to find one set of node *common*,
12 The goal of the discovery protocol is to find one set of node *common*,
13 the set of nodes shared by local and remote.
13 the set of nodes shared by local and remote.
14
14
15 One of the issue with the original protocol was latency, it could
15 One of the issue with the original protocol was latency, it could
16 potentially require lots of roundtrips to discover that the local repo was a
16 potentially require lots of roundtrips to discover that the local repo was a
17 subset of remote (which is a very common case, you usually have few changes
17 subset of remote (which is a very common case, you usually have few changes
18 compared to upstream, while upstream probably had lots of development).
18 compared to upstream, while upstream probably had lots of development).
19
19
20 The new protocol only requires one interface for the remote repo: `known()`,
20 The new protocol only requires one interface for the remote repo: `known()`,
21 which given a set of changelists tells you if they are present in the DAG.
21 which given a set of changelists tells you if they are present in the DAG.
22
22
23 The algorithm then works as follow:
23 The algorithm then works as follow:
24
24
25 - We will be using three sets, `common`, `missing`, `unknown`. Originally
25 - We will be using three sets, `common`, `missing`, `unknown`. Originally
26 all nodes are in `unknown`.
26 all nodes are in `unknown`.
27 - Take a sample from `unknown`, call `remote.known(sample)`
27 - Take a sample from `unknown`, call `remote.known(sample)`
28 - For each node that remote knows, move it and all its ancestors to `common`
28 - For each node that remote knows, move it and all its ancestors to `common`
29 - For each node that remote doesn't know, move it and all its descendants
29 - For each node that remote doesn't know, move it and all its descendants
30 to `missing`
30 to `missing`
31 - Iterate until `unknown` is empty
31 - Iterate until `unknown` is empty
32
32
33 There are a couple optimizations, first is instead of starting with a random
33 There are a couple optimizations, first is instead of starting with a random
34 sample of missing, start by sending all heads, in the case where the local
34 sample of missing, start by sending all heads, in the case where the local
35 repo is a subset, you computed the answer in one round trip.
35 repo is a subset, you computed the answer in one round trip.
36
36
37 Then you can do something similar to the bisecting strategy used when
37 Then you can do something similar to the bisecting strategy used when
38 finding faulty changesets. Instead of random samples, you can try picking
38 finding faulty changesets. Instead of random samples, you can try picking
39 nodes that will maximize the number of nodes that will be
39 nodes that will maximize the number of nodes that will be
40 classified with it (since all ancestors or descendants will be marked as well).
40 classified with it (since all ancestors or descendants will be marked as well).
41 """
41 """
42
42
43 from __future__ import absolute_import
43 from __future__ import absolute_import
44
44
45 import collections
45 import collections
46 import random
46 import random
47
47
48 from .i18n import _
48 from .i18n import _
49 from .node import (
49 from .node import (
50 nullid,
50 nullid,
51 nullrev,
51 nullrev,
52 )
52 )
53 from . import (
53 from . import (
54 error,
54 error,
55 util,
55 util,
56 )
56 )
57
57
58 def _updatesample(revs, heads, sample, parentfn, quicksamplesize=0):
58 def _updatesample(revs, heads, sample, parentfn, quicksamplesize=0):
59 """update an existing sample to match the expected size
59 """update an existing sample to match the expected size
60
60
61 The sample is updated with revs exponentially distant from each head of the
61 The sample is updated with revs exponentially distant from each head of the
62 <revs> set. (H~1, H~2, H~4, H~8, etc).
62 <revs> set. (H~1, H~2, H~4, H~8, etc).
63
63
64 If a target size is specified, the sampling will stop once this size is
64 If a target size is specified, the sampling will stop once this size is
65 reached. Otherwise sampling will happen until roots of the <revs> set are
65 reached. Otherwise sampling will happen until roots of the <revs> set are
66 reached.
66 reached.
67
67
68 :revs: set of revs we want to discover (if None, assume the whole dag)
68 :revs: set of revs we want to discover (if None, assume the whole dag)
69 :heads: set of DAG head revs
69 :heads: set of DAG head revs
70 :sample: a sample to update
70 :sample: a sample to update
71 :parentfn: a callable to resolve parents for a revision
71 :parentfn: a callable to resolve parents for a revision
72 :quicksamplesize: optional target size of the sample"""
72 :quicksamplesize: optional target size of the sample"""
73 dist = {}
73 dist = {}
74 visit = collections.deque(heads)
74 visit = collections.deque(heads)
75 seen = set()
75 seen = set()
76 factor = 1
76 factor = 1
77 while visit:
77 while visit:
78 curr = visit.popleft()
78 curr = visit.popleft()
79 if curr in seen:
79 if curr in seen:
80 continue
80 continue
81 d = dist.setdefault(curr, 1)
81 d = dist.setdefault(curr, 1)
82 if d > factor:
82 if d > factor:
83 factor *= 2
83 factor *= 2
84 if d == factor:
84 if d == factor:
85 sample.add(curr)
85 sample.add(curr)
86 if quicksamplesize and (len(sample) >= quicksamplesize):
86 if quicksamplesize and (len(sample) >= quicksamplesize):
87 return
87 return
88 seen.add(curr)
88 seen.add(curr)
89
89
90 for p in parentfn(curr):
90 for p in parentfn(curr):
91 if p != nullrev and (not revs or p in revs):
91 if p != nullrev and (not revs or p in revs):
92 dist.setdefault(p, d + 1)
92 dist.setdefault(p, d + 1)
93 visit.append(p)
93 visit.append(p)
94
94
95 def _limitsample(sample, desiredlen):
95 def _limitsample(sample, desiredlen):
96 """return a random subset of sample of at most desiredlen item"""
96 """return a random subset of sample of at most desiredlen item"""
97 if len(sample) > desiredlen:
97 if len(sample) > desiredlen:
98 sample = set(random.sample(sample, desiredlen))
98 sample = set(random.sample(sample, desiredlen))
99 return sample
99 return sample
100
100
101 class partialdiscovery(object):
101 class partialdiscovery(object):
102 """an object representing ongoing discovery
102 """an object representing ongoing discovery
103
103
104 Feed with data from the remote repository, this object keep track of the
104 Feed with data from the remote repository, this object keep track of the
105 current set of changeset in various states:
105 current set of changeset in various states:
106
106
107 - common: revs also known remotely
107 - common: revs also known remotely
108 - undecided: revs we don't have information on yet
108 - undecided: revs we don't have information on yet
109 - missing: revs missing remotely
109 - missing: revs missing remotely
110 (all tracked revisions are known locally)
110 (all tracked revisions are known locally)
111 """
111 """
112
112
113 def __init__(self, repo, targetheads):
113 def __init__(self, repo, targetheads):
114 self._repo = repo
114 self._repo = repo
115 self._targetheads = targetheads
115 self._targetheads = targetheads
116 self._common = repo.changelog.incrementalmissingrevs()
116 self._common = repo.changelog.incrementalmissingrevs()
117 self._undecided = None
117 self._undecided = None
118 self.missing = set()
118 self.missing = set()
119 self._childrenmap = None
119
120
120 def addcommons(self, commons):
121 def addcommons(self, commons):
121 """registrer nodes known as common"""
122 """registrer nodes known as common"""
122 self._common.addbases(commons)
123 self._common.addbases(commons)
123 if self._undecided is not None:
124 if self._undecided is not None:
124 self._common.removeancestorsfrom(self._undecided)
125 self._common.removeancestorsfrom(self._undecided)
125
126
126 def addmissings(self, missings):
127 def addmissings(self, missings):
127 """registrer some nodes as missing"""
128 """registrer some nodes as missing"""
128 newmissing = self._repo.revs('%ld::%ld', missings, self.undecided)
129 newmissing = self._repo.revs('%ld::%ld', missings, self.undecided)
129 if newmissing:
130 if newmissing:
130 self.missing.update(newmissing)
131 self.missing.update(newmissing)
131 self.undecided.difference_update(newmissing)
132 self.undecided.difference_update(newmissing)
132
133
133 def addinfo(self, sample):
134 def addinfo(self, sample):
134 """consume an iterable of (rev, known) tuples"""
135 """consume an iterable of (rev, known) tuples"""
135 common = set()
136 common = set()
136 missing = set()
137 missing = set()
137 for rev, known in sample:
138 for rev, known in sample:
138 if known:
139 if known:
139 common.add(rev)
140 common.add(rev)
140 else:
141 else:
141 missing.add(rev)
142 missing.add(rev)
142 if common:
143 if common:
143 self.addcommons(common)
144 self.addcommons(common)
144 if missing:
145 if missing:
145 self.addmissings(missing)
146 self.addmissings(missing)
146
147
147 def hasinfo(self):
148 def hasinfo(self):
148 """return True is we have any clue about the remote state"""
149 """return True is we have any clue about the remote state"""
149 return self._common.hasbases()
150 return self._common.hasbases()
150
151
151 def iscomplete(self):
152 def iscomplete(self):
152 """True if all the necessary data have been gathered"""
153 """True if all the necessary data have been gathered"""
153 return self._undecided is not None and not self._undecided
154 return self._undecided is not None and not self._undecided
154
155
155 @property
156 @property
156 def undecided(self):
157 def undecided(self):
157 if self._undecided is not None:
158 if self._undecided is not None:
158 return self._undecided
159 return self._undecided
159 self._undecided = set(self._common.missingancestors(self._targetheads))
160 self._undecided = set(self._common.missingancestors(self._targetheads))
160 return self._undecided
161 return self._undecided
161
162
162 def commonheads(self):
163 def commonheads(self):
163 """the heads of the known common set"""
164 """the heads of the known common set"""
164 # heads(common) == heads(common.bases) since common represents
165 # heads(common) == heads(common.bases) since common represents
165 # common.bases and all its ancestors
166 # common.bases and all its ancestors
166 return self._common.basesheads()
167 return self._common.basesheads()
167
168
168 def _parentsgetter(self):
169 def _parentsgetter(self):
169 getrev = self._repo.changelog.index.__getitem__
170 getrev = self._repo.changelog.index.__getitem__
170 def getparents(r):
171 def getparents(r):
171 return getrev(r)[5:6]
172 return getrev(r)[5:6]
172 return getparents
173 return getparents
173
174
174 def _childrengetter(self, revs):
175 def _childrengetter(self, revs):
175
176
177 if self._childrenmap is not None:
178 return self._childrenmap.__getitem__
179
176 # _updatesample() essentially does interaction over revisions to look
180 # _updatesample() essentially does interaction over revisions to look
177 # up their children. This lookup is expensive and doing it in a loop is
181 # up their children. This lookup is expensive and doing it in a loop is
178 # quadratic. We precompute the children for all relevant revisions and
182 # quadratic. We precompute the children for all relevant revisions and
179 # make the lookup in _updatesample() a simple dict lookup.
183 # make the lookup in _updatesample() a simple dict lookup.
180 #
184 self._childrenmap = children = {}
181 # Because this function can be called multiple times during discovery,
182 # we may still perform redundant work and there is room to optimize
183 # this by keeping a persistent cache of children across invocations.
184 children = {}
185
185
186 parentrevs = self._parentsgetter()
186 parentrevs = self._parentsgetter()
187
187
188 for rev in sorted(revs):
188 for rev in sorted(revs):
189 # Always ensure revision has an entry so we don't need to worry
189 # Always ensure revision has an entry so we don't need to worry
190 # about missing keys.
190 # about missing keys.
191 children[rev] = []
191 children[rev] = []
192 for prev in parentrevs(rev):
192 for prev in parentrevs(rev):
193 if prev == nullrev:
193 if prev == nullrev:
194 continue
194 continue
195 c = children.get(prev)
195 c = children.get(prev)
196 if c is not None:
196 if c is not None:
197 c.append(rev)
197 c.append(rev)
198 return children.__getitem__
198 return children.__getitem__
199
199
200 def takequicksample(self, headrevs, size):
200 def takequicksample(self, headrevs, size):
201 """takes a quick sample of size <size>
201 """takes a quick sample of size <size>
202
202
203 It is meant for initial sampling and focuses on querying heads and close
203 It is meant for initial sampling and focuses on querying heads and close
204 ancestors of heads.
204 ancestors of heads.
205
205
206 :headrevs: set of head revisions in local DAG to consider
206 :headrevs: set of head revisions in local DAG to consider
207 :size: the maximum size of the sample"""
207 :size: the maximum size of the sample"""
208 revs = self.undecided
208 revs = self.undecided
209 if len(revs) <= size:
209 if len(revs) <= size:
210 return list(revs)
210 return list(revs)
211 sample = set(self._repo.revs('heads(%ld)', revs))
211 sample = set(self._repo.revs('heads(%ld)', revs))
212
212
213 if len(sample) >= size:
213 if len(sample) >= size:
214 return _limitsample(sample, size)
214 return _limitsample(sample, size)
215
215
216 _updatesample(None, headrevs, sample, self._parentsgetter(),
216 _updatesample(None, headrevs, sample, self._parentsgetter(),
217 quicksamplesize=size)
217 quicksamplesize=size)
218 return sample
218 return sample
219
219
220 def takefullsample(self, headrevs, size):
220 def takefullsample(self, headrevs, size):
221 revs = self.undecided
221 revs = self.undecided
222 if len(revs) <= size:
222 if len(revs) <= size:
223 return list(revs)
223 return list(revs)
224 repo = self._repo
224 repo = self._repo
225 sample = set(repo.revs('heads(%ld)', revs))
225 sample = set(repo.revs('heads(%ld)', revs))
226 parentrevs = self._parentsgetter()
226 parentrevs = self._parentsgetter()
227
227
228 # update from heads
228 # update from heads
229 revsheads = sample.copy()
229 revsheads = sample.copy()
230 _updatesample(revs, revsheads, sample, parentrevs)
230 _updatesample(revs, revsheads, sample, parentrevs)
231
231
232 # update from roots
232 # update from roots
233 revsroots = set(repo.revs('roots(%ld)', revs))
233 revsroots = set(repo.revs('roots(%ld)', revs))
234
234
235 childrenrevs = self._childrengetter(revs)
235 childrenrevs = self._childrengetter(revs)
236
236
237 _updatesample(revs, revsroots, sample, childrenrevs)
237 _updatesample(revs, revsroots, sample, childrenrevs)
238 assert sample
238 assert sample
239 sample = _limitsample(sample, size)
239 sample = _limitsample(sample, size)
240 if len(sample) < size:
240 if len(sample) < size:
241 more = size - len(sample)
241 more = size - len(sample)
242 sample.update(random.sample(list(revs - sample), more))
242 sample.update(random.sample(list(revs - sample), more))
243 return sample
243 return sample
244
244
245 def findcommonheads(ui, local, remote,
245 def findcommonheads(ui, local, remote,
246 initialsamplesize=100,
246 initialsamplesize=100,
247 fullsamplesize=200,
247 fullsamplesize=200,
248 abortwhenunrelated=True,
248 abortwhenunrelated=True,
249 ancestorsof=None):
249 ancestorsof=None):
250 '''Return a tuple (common, anyincoming, remoteheads) used to identify
250 '''Return a tuple (common, anyincoming, remoteheads) used to identify
251 missing nodes from or in remote.
251 missing nodes from or in remote.
252 '''
252 '''
253 start = util.timer()
253 start = util.timer()
254
254
255 roundtrips = 0
255 roundtrips = 0
256 cl = local.changelog
256 cl = local.changelog
257 clnode = cl.node
257 clnode = cl.node
258 clrev = cl.rev
258 clrev = cl.rev
259
259
260 if ancestorsof is not None:
260 if ancestorsof is not None:
261 ownheads = [clrev(n) for n in ancestorsof]
261 ownheads = [clrev(n) for n in ancestorsof]
262 else:
262 else:
263 ownheads = [rev for rev in cl.headrevs() if rev != nullrev]
263 ownheads = [rev for rev in cl.headrevs() if rev != nullrev]
264
264
265 # early exit if we know all the specified remote heads already
265 # early exit if we know all the specified remote heads already
266 ui.debug("query 1; heads\n")
266 ui.debug("query 1; heads\n")
267 roundtrips += 1
267 roundtrips += 1
268 sample = _limitsample(ownheads, initialsamplesize)
268 sample = _limitsample(ownheads, initialsamplesize)
269 # indices between sample and externalized version must match
269 # indices between sample and externalized version must match
270 sample = list(sample)
270 sample = list(sample)
271
271
272 with remote.commandexecutor() as e:
272 with remote.commandexecutor() as e:
273 fheads = e.callcommand('heads', {})
273 fheads = e.callcommand('heads', {})
274 fknown = e.callcommand('known', {
274 fknown = e.callcommand('known', {
275 'nodes': [clnode(r) for r in sample],
275 'nodes': [clnode(r) for r in sample],
276 })
276 })
277
277
278 srvheadhashes, yesno = fheads.result(), fknown.result()
278 srvheadhashes, yesno = fheads.result(), fknown.result()
279
279
280 if cl.tip() == nullid:
280 if cl.tip() == nullid:
281 if srvheadhashes != [nullid]:
281 if srvheadhashes != [nullid]:
282 return [nullid], True, srvheadhashes
282 return [nullid], True, srvheadhashes
283 return [nullid], False, []
283 return [nullid], False, []
284
284
285 # start actual discovery (we note this before the next "if" for
285 # start actual discovery (we note this before the next "if" for
286 # compatibility reasons)
286 # compatibility reasons)
287 ui.status(_("searching for changes\n"))
287 ui.status(_("searching for changes\n"))
288
288
289 knownsrvheads = [] # revnos of remote heads that are known locally
289 knownsrvheads = [] # revnos of remote heads that are known locally
290 for node in srvheadhashes:
290 for node in srvheadhashes:
291 if node == nullid:
291 if node == nullid:
292 continue
292 continue
293
293
294 try:
294 try:
295 knownsrvheads.append(clrev(node))
295 knownsrvheads.append(clrev(node))
296 # Catches unknown and filtered nodes.
296 # Catches unknown and filtered nodes.
297 except error.LookupError:
297 except error.LookupError:
298 continue
298 continue
299
299
300 if len(knownsrvheads) == len(srvheadhashes):
300 if len(knownsrvheads) == len(srvheadhashes):
301 ui.debug("all remote heads known locally\n")
301 ui.debug("all remote heads known locally\n")
302 return srvheadhashes, False, srvheadhashes
302 return srvheadhashes, False, srvheadhashes
303
303
304 if len(sample) == len(ownheads) and all(yesno):
304 if len(sample) == len(ownheads) and all(yesno):
305 ui.note(_("all local heads known remotely\n"))
305 ui.note(_("all local heads known remotely\n"))
306 ownheadhashes = [clnode(r) for r in ownheads]
306 ownheadhashes = [clnode(r) for r in ownheads]
307 return ownheadhashes, True, srvheadhashes
307 return ownheadhashes, True, srvheadhashes
308
308
309 # full blown discovery
309 # full blown discovery
310
310
311 disco = partialdiscovery(local, ownheads)
311 disco = partialdiscovery(local, ownheads)
312 # treat remote heads (and maybe own heads) as a first implicit sample
312 # treat remote heads (and maybe own heads) as a first implicit sample
313 # response
313 # response
314 disco.addcommons(knownsrvheads)
314 disco.addcommons(knownsrvheads)
315 disco.addinfo(zip(sample, yesno))
315 disco.addinfo(zip(sample, yesno))
316
316
317 full = False
317 full = False
318 progress = ui.makeprogress(_('searching'), unit=_('queries'))
318 progress = ui.makeprogress(_('searching'), unit=_('queries'))
319 while not disco.iscomplete():
319 while not disco.iscomplete():
320
320
321 if full or disco.hasinfo():
321 if full or disco.hasinfo():
322 if full:
322 if full:
323 ui.note(_("sampling from both directions\n"))
323 ui.note(_("sampling from both directions\n"))
324 else:
324 else:
325 ui.debug("taking initial sample\n")
325 ui.debug("taking initial sample\n")
326 samplefunc = disco.takefullsample
326 samplefunc = disco.takefullsample
327 targetsize = fullsamplesize
327 targetsize = fullsamplesize
328 else:
328 else:
329 # use even cheaper initial sample
329 # use even cheaper initial sample
330 ui.debug("taking quick initial sample\n")
330 ui.debug("taking quick initial sample\n")
331 samplefunc = disco.takequicksample
331 samplefunc = disco.takequicksample
332 targetsize = initialsamplesize
332 targetsize = initialsamplesize
333 sample = samplefunc(ownheads, targetsize)
333 sample = samplefunc(ownheads, targetsize)
334
334
335 roundtrips += 1
335 roundtrips += 1
336 progress.update(roundtrips)
336 progress.update(roundtrips)
337 ui.debug("query %i; still undecided: %i, sample size is: %i\n"
337 ui.debug("query %i; still undecided: %i, sample size is: %i\n"
338 % (roundtrips, len(disco.undecided), len(sample)))
338 % (roundtrips, len(disco.undecided), len(sample)))
339 # indices between sample and externalized version must match
339 # indices between sample and externalized version must match
340 sample = list(sample)
340 sample = list(sample)
341
341
342 with remote.commandexecutor() as e:
342 with remote.commandexecutor() as e:
343 yesno = e.callcommand('known', {
343 yesno = e.callcommand('known', {
344 'nodes': [clnode(r) for r in sample],
344 'nodes': [clnode(r) for r in sample],
345 }).result()
345 }).result()
346
346
347 full = True
347 full = True
348
348
349 disco.addinfo(zip(sample, yesno))
349 disco.addinfo(zip(sample, yesno))
350
350
351 result = disco.commonheads()
351 result = disco.commonheads()
352 elapsed = util.timer() - start
352 elapsed = util.timer() - start
353 progress.complete()
353 progress.complete()
354 ui.debug("%d total queries in %.4fs\n" % (roundtrips, elapsed))
354 ui.debug("%d total queries in %.4fs\n" % (roundtrips, elapsed))
355 msg = ('found %d common and %d unknown server heads,'
355 msg = ('found %d common and %d unknown server heads,'
356 ' %d roundtrips in %.4fs\n')
356 ' %d roundtrips in %.4fs\n')
357 missing = set(result) - set(knownsrvheads)
357 missing = set(result) - set(knownsrvheads)
358 ui.log('discovery', msg, len(result), len(missing), roundtrips,
358 ui.log('discovery', msg, len(result), len(missing), roundtrips,
359 elapsed)
359 elapsed)
360
360
361 if not result and srvheadhashes != [nullid]:
361 if not result and srvheadhashes != [nullid]:
362 if abortwhenunrelated:
362 if abortwhenunrelated:
363 raise error.Abort(_("repository is unrelated"))
363 raise error.Abort(_("repository is unrelated"))
364 else:
364 else:
365 ui.warn(_("warning: repository is unrelated\n"))
365 ui.warn(_("warning: repository is unrelated\n"))
366 return ({nullid}, True, srvheadhashes,)
366 return ({nullid}, True, srvheadhashes,)
367
367
368 anyincoming = (srvheadhashes != [nullid])
368 anyincoming = (srvheadhashes != [nullid])
369 result = {clnode(r) for r in result}
369 result = {clnode(r) for r in result}
370 return result, anyincoming, srvheadhashes
370 return result, anyincoming, srvheadhashes
General Comments 0
You need to be logged in to leave comments. Login now