Show More
@@ -1,270 +1,271 b'' | |||||
1 | # setdiscovery.py - improved discovery of common nodeset for mercurial |
|
1 | # setdiscovery.py - improved discovery of common nodeset for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2010 Benoit Boissinot <bboissin@gmail.com> |
|
3 | # Copyright 2010 Benoit Boissinot <bboissin@gmail.com> | |
4 | # and Peter Arrenbrecht <peter@arrenbrecht.ch> |
|
4 | # and Peter Arrenbrecht <peter@arrenbrecht.ch> | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms of the |
|
6 | # This software may be used and distributed according to the terms of the | |
7 | # GNU General Public License version 2 or any later version. |
|
7 | # GNU General Public License version 2 or any later version. | |
8 | """ |
|
8 | """ | |
9 | Algorithm works in the following way. You have two repository: local and |
|
9 | Algorithm works in the following way. You have two repository: local and | |
10 | remote. They both contains a DAG of changelists. |
|
10 | remote. They both contains a DAG of changelists. | |
11 |
|
11 | |||
12 | The goal of the discovery protocol is to find one set of node *common*, |
|
12 | The goal of the discovery protocol is to find one set of node *common*, | |
13 | the set of nodes shared by local and remote. |
|
13 | the set of nodes shared by local and remote. | |
14 |
|
14 | |||
15 | One of the issue with the original protocol was latency, it could |
|
15 | One of the issue with the original protocol was latency, it could | |
16 | potentially require lots of roundtrips to discover that the local repo was a |
|
16 | potentially require lots of roundtrips to discover that the local repo was a | |
17 | subset of remote (which is a very common case, you usually have few changes |
|
17 | subset of remote (which is a very common case, you usually have few changes | |
18 | compared to upstream, while upstream probably had lots of development). |
|
18 | compared to upstream, while upstream probably had lots of development). | |
19 |
|
19 | |||
20 | The new protocol only requires one interface for the remote repo: `known()`, |
|
20 | The new protocol only requires one interface for the remote repo: `known()`, | |
21 | which given a set of changelists tells you if they are present in the DAG. |
|
21 | which given a set of changelists tells you if they are present in the DAG. | |
22 |
|
22 | |||
23 | The algorithm then works as follow: |
|
23 | The algorithm then works as follow: | |
24 |
|
24 | |||
25 | - We will be using three sets, `common`, `missing`, `unknown`. Originally |
|
25 | - We will be using three sets, `common`, `missing`, `unknown`. Originally | |
26 | all nodes are in `unknown`. |
|
26 | all nodes are in `unknown`. | |
27 | - Take a sample from `unknown`, call `remote.known(sample)` |
|
27 | - Take a sample from `unknown`, call `remote.known(sample)` | |
28 | - For each node that remote knows, move it and all its ancestors to `common` |
|
28 | - For each node that remote knows, move it and all its ancestors to `common` | |
29 | - For each node that remote doesn't know, move it and all its descendants |
|
29 | - For each node that remote doesn't know, move it and all its descendants | |
30 | to `missing` |
|
30 | to `missing` | |
31 | - Iterate until `unknown` is empty |
|
31 | - Iterate until `unknown` is empty | |
32 |
|
32 | |||
33 | There are a couple optimizations, first is instead of starting with a random |
|
33 | There are a couple optimizations, first is instead of starting with a random | |
34 | sample of missing, start by sending all heads, in the case where the local |
|
34 | sample of missing, start by sending all heads, in the case where the local | |
35 | repo is a subset, you computed the answer in one round trip. |
|
35 | repo is a subset, you computed the answer in one round trip. | |
36 |
|
36 | |||
37 | Then you can do something similar to the bisecting strategy used when |
|
37 | Then you can do something similar to the bisecting strategy used when | |
38 | finding faulty changesets. Instead of random samples, you can try picking |
|
38 | finding faulty changesets. Instead of random samples, you can try picking | |
39 | nodes that will maximize the number of nodes that will be |
|
39 | nodes that will maximize the number of nodes that will be | |
40 | classified with it (since all ancestors or descendants will be marked as well). |
|
40 | classified with it (since all ancestors or descendants will be marked as well). | |
41 | """ |
|
41 | """ | |
42 |
|
42 | |||
43 | from __future__ import absolute_import |
|
43 | from __future__ import absolute_import | |
44 |
|
44 | |||
45 | import collections |
|
45 | import collections | |
46 | import random |
|
46 | import random | |
47 |
|
47 | |||
48 | from .i18n import _ |
|
48 | from .i18n import _ | |
49 | from .node import ( |
|
49 | from .node import ( | |
50 | nullid, |
|
50 | nullid, | |
51 | nullrev, |
|
51 | nullrev, | |
52 | ) |
|
52 | ) | |
53 | from . import ( |
|
53 | from . import ( | |
54 | dagutil, |
|
54 | dagutil, | |
55 | error, |
|
55 | error, | |
56 | util, |
|
56 | util, | |
57 | ) |
|
57 | ) | |
58 |
|
58 | |||
59 | def _updatesample(dag, nodes, sample, quicksamplesize=0): |
|
59 | def _updatesample(dag, nodes, sample, quicksamplesize=0): | |
60 | """update an existing sample to match the expected size |
|
60 | """update an existing sample to match the expected size | |
61 |
|
61 | |||
62 | The sample is updated with nodes exponentially distant from each head of the |
|
62 | The sample is updated with nodes exponentially distant from each head of the | |
63 | <nodes> set. (H~1, H~2, H~4, H~8, etc). |
|
63 | <nodes> set. (H~1, H~2, H~4, H~8, etc). | |
64 |
|
64 | |||
65 | If a target size is specified, the sampling will stop once this size is |
|
65 | If a target size is specified, the sampling will stop once this size is | |
66 | reached. Otherwise sampling will happen until roots of the <nodes> set are |
|
66 | reached. Otherwise sampling will happen until roots of the <nodes> set are | |
67 | reached. |
|
67 | reached. | |
68 |
|
68 | |||
69 | :dag: a dag object from dagutil |
|
69 | :dag: a dag object from dagutil | |
70 | :nodes: set of nodes we want to discover (if None, assume the whole dag) |
|
70 | :nodes: set of nodes we want to discover (if None, assume the whole dag) | |
71 | :sample: a sample to update |
|
71 | :sample: a sample to update | |
72 | :quicksamplesize: optional target size of the sample""" |
|
72 | :quicksamplesize: optional target size of the sample""" | |
73 | # if nodes is empty we scan the entire graph |
|
73 | # if nodes is empty we scan the entire graph | |
74 | if nodes: |
|
74 | if nodes: | |
75 | heads = dag.headsetofconnecteds(nodes) |
|
75 | heads = dag.headsetofconnecteds(nodes) | |
76 | else: |
|
76 | else: | |
77 | heads = dag.heads() |
|
77 | heads = dag.heads() | |
78 | dist = {} |
|
78 | dist = {} | |
79 | visit = collections.deque(heads) |
|
79 | visit = collections.deque(heads) | |
80 | seen = set() |
|
80 | seen = set() | |
81 | factor = 1 |
|
81 | factor = 1 | |
82 | while visit: |
|
82 | while visit: | |
83 | curr = visit.popleft() |
|
83 | curr = visit.popleft() | |
84 | if curr in seen: |
|
84 | if curr in seen: | |
85 | continue |
|
85 | continue | |
86 | d = dist.setdefault(curr, 1) |
|
86 | d = dist.setdefault(curr, 1) | |
87 | if d > factor: |
|
87 | if d > factor: | |
88 | factor *= 2 |
|
88 | factor *= 2 | |
89 | if d == factor: |
|
89 | if d == factor: | |
90 | sample.add(curr) |
|
90 | sample.add(curr) | |
91 | if quicksamplesize and (len(sample) >= quicksamplesize): |
|
91 | if quicksamplesize and (len(sample) >= quicksamplesize): | |
92 | return |
|
92 | return | |
93 | seen.add(curr) |
|
93 | seen.add(curr) | |
94 | for p in dag.parents(curr): |
|
94 | for p in dag.parents(curr): | |
95 | if not nodes or p in nodes: |
|
95 | if not nodes or p in nodes: | |
96 | dist.setdefault(p, d + 1) |
|
96 | dist.setdefault(p, d + 1) | |
97 | visit.append(p) |
|
97 | visit.append(p) | |
98 |
|
98 | |||
99 | def _takequicksample(dag, nodes, size): |
|
99 | def _takequicksample(dag, nodes, size): | |
100 | """takes a quick sample of size <size> |
|
100 | """takes a quick sample of size <size> | |
101 |
|
101 | |||
102 | It is meant for initial sampling and focuses on querying heads and close |
|
102 | It is meant for initial sampling and focuses on querying heads and close | |
103 | ancestors of heads. |
|
103 | ancestors of heads. | |
104 |
|
104 | |||
105 | :dag: a dag object |
|
105 | :dag: a dag object | |
106 | :nodes: set of nodes to discover |
|
106 | :nodes: set of nodes to discover | |
107 | :size: the maximum size of the sample""" |
|
107 | :size: the maximum size of the sample""" | |
108 | sample = dag.headsetofconnecteds(nodes) |
|
108 | sample = dag.headsetofconnecteds(nodes) | |
109 | if len(sample) >= size: |
|
109 | if len(sample) >= size: | |
110 | return _limitsample(sample, size) |
|
110 | return _limitsample(sample, size) | |
111 | _updatesample(dag, None, sample, quicksamplesize=size) |
|
111 | _updatesample(dag, None, sample, quicksamplesize=size) | |
112 | return sample |
|
112 | return sample | |
113 |
|
113 | |||
114 | def _takefullsample(dag, nodes, size): |
|
114 | def _takefullsample(dag, nodes, size): | |
115 | sample = dag.headsetofconnecteds(nodes) |
|
115 | sample = dag.headsetofconnecteds(nodes) | |
116 | # update from heads |
|
116 | # update from heads | |
117 | _updatesample(dag, nodes, sample) |
|
117 | _updatesample(dag, nodes, sample) | |
118 | # update from roots |
|
118 | # update from roots | |
119 | _updatesample(dag.inverse(), nodes, sample) |
|
119 | _updatesample(dag.inverse(), nodes, sample) | |
120 | assert sample |
|
120 | assert sample | |
121 | sample = _limitsample(sample, size) |
|
121 | sample = _limitsample(sample, size) | |
122 | if len(sample) < size: |
|
122 | if len(sample) < size: | |
123 | more = size - len(sample) |
|
123 | more = size - len(sample) | |
124 | sample.update(random.sample(list(nodes - sample), more)) |
|
124 | sample.update(random.sample(list(nodes - sample), more)) | |
125 | return sample |
|
125 | return sample | |
126 |
|
126 | |||
127 | def _limitsample(sample, desiredlen): |
|
127 | def _limitsample(sample, desiredlen): | |
128 | """return a random subset of sample of at most desiredlen item""" |
|
128 | """return a random subset of sample of at most desiredlen item""" | |
129 | if len(sample) > desiredlen: |
|
129 | if len(sample) > desiredlen: | |
130 | sample = set(random.sample(sample, desiredlen)) |
|
130 | sample = set(random.sample(sample, desiredlen)) | |
131 | return sample |
|
131 | return sample | |
132 |
|
132 | |||
133 | def findcommonheads(ui, local, remote, |
|
133 | def findcommonheads(ui, local, remote, | |
134 | initialsamplesize=100, |
|
134 | initialsamplesize=100, | |
135 | fullsamplesize=200, |
|
135 | fullsamplesize=200, | |
136 | abortwhenunrelated=True, |
|
136 | abortwhenunrelated=True, | |
137 | ancestorsof=None): |
|
137 | ancestorsof=None): | |
138 | '''Return a tuple (common, anyincoming, remoteheads) used to identify |
|
138 | '''Return a tuple (common, anyincoming, remoteheads) used to identify | |
139 | missing nodes from or in remote. |
|
139 | missing nodes from or in remote. | |
140 | ''' |
|
140 | ''' | |
141 | start = util.timer() |
|
141 | start = util.timer() | |
142 |
|
142 | |||
143 | roundtrips = 0 |
|
143 | roundtrips = 0 | |
144 | cl = local.changelog |
|
144 | cl = local.changelog | |
145 | localsubset = None |
|
145 | localsubset = None | |
146 | if ancestorsof is not None: |
|
146 | if ancestorsof is not None: | |
147 | rev = local.changelog.rev |
|
147 | rev = local.changelog.rev | |
148 | localsubset = [rev(n) for n in ancestorsof] |
|
148 | localsubset = [rev(n) for n in ancestorsof] | |
149 | dag = dagutil.revlogdag(cl, localsubset=localsubset) |
|
149 | dag = dagutil.revlogdag(cl, localsubset=localsubset) | |
150 |
|
150 | |||
151 | # early exit if we know all the specified remote heads already |
|
151 | # early exit if we know all the specified remote heads already | |
152 | ui.debug("query 1; heads\n") |
|
152 | ui.debug("query 1; heads\n") | |
153 | roundtrips += 1 |
|
153 | roundtrips += 1 | |
154 | ownheads = dag.heads() |
|
154 | ownheads = dag.heads() | |
155 | sample = _limitsample(ownheads, initialsamplesize) |
|
155 | sample = _limitsample(ownheads, initialsamplesize) | |
156 | # indices between sample and externalized version must match |
|
156 | # indices between sample and externalized version must match | |
157 | sample = list(sample) |
|
157 | sample = list(sample) | |
158 |
|
158 | |||
159 | with remote.commandexecutor() as e: |
|
159 | with remote.commandexecutor() as e: | |
160 | fheads = e.callcommand('heads', {}) |
|
160 | fheads = e.callcommand('heads', {}) | |
161 | fknown = e.callcommand('known', { |
|
161 | fknown = e.callcommand('known', { | |
162 | 'nodes': dag.externalizeall(sample), |
|
162 | 'nodes': dag.externalizeall(sample), | |
163 | }) |
|
163 | }) | |
164 |
|
164 | |||
165 | srvheadhashes, yesno = fheads.result(), fknown.result() |
|
165 | srvheadhashes, yesno = fheads.result(), fknown.result() | |
166 |
|
166 | |||
167 | if cl.tip() == nullid: |
|
167 | if cl.tip() == nullid: | |
168 | if srvheadhashes != [nullid]: |
|
168 | if srvheadhashes != [nullid]: | |
169 | return [nullid], True, srvheadhashes |
|
169 | return [nullid], True, srvheadhashes | |
170 | return [nullid], False, [] |
|
170 | return [nullid], False, [] | |
171 |
|
171 | |||
172 | # start actual discovery (we note this before the next "if" for |
|
172 | # start actual discovery (we note this before the next "if" for | |
173 | # compatibility reasons) |
|
173 | # compatibility reasons) | |
174 | ui.status(_("searching for changes\n")) |
|
174 | ui.status(_("searching for changes\n")) | |
175 |
|
175 | |||
176 | srvheads = dag.internalizeall(srvheadhashes, filterunknown=True) |
|
176 | srvheads = dag.internalizeall(srvheadhashes, filterunknown=True) | |
177 | if len(srvheads) == len(srvheadhashes): |
|
177 | if len(srvheads) == len(srvheadhashes): | |
178 | ui.debug("all remote heads known locally\n") |
|
178 | ui.debug("all remote heads known locally\n") | |
179 | return (srvheadhashes, False, srvheadhashes,) |
|
179 | return (srvheadhashes, False, srvheadhashes,) | |
180 |
|
180 | |||
181 | if len(sample) == len(ownheads) and all(yesno): |
|
181 | if len(sample) == len(ownheads) and all(yesno): | |
182 | ui.note(_("all local heads known remotely\n")) |
|
182 | ui.note(_("all local heads known remotely\n")) | |
183 | ownheadhashes = dag.externalizeall(ownheads) |
|
183 | ownheadhashes = dag.externalizeall(ownheads) | |
184 | return (ownheadhashes, True, srvheadhashes,) |
|
184 | return (ownheadhashes, True, srvheadhashes,) | |
185 |
|
185 | |||
186 | # full blown discovery |
|
186 | # full blown discovery | |
187 |
|
187 | |||
188 | # own nodes I know we both know |
|
188 | # own nodes I know we both know | |
189 | # treat remote heads (and maybe own heads) as a first implicit sample |
|
189 | # treat remote heads (and maybe own heads) as a first implicit sample | |
190 | # response |
|
190 | # response | |
191 | common = cl.incrementalmissingrevs(srvheads) |
|
191 | common = cl.incrementalmissingrevs(srvheads) | |
192 | commoninsample = set(n for i, n in enumerate(sample) if yesno[i]) |
|
192 | commoninsample = set(n for i, n in enumerate(sample) if yesno[i]) | |
193 | common.addbases(commoninsample) |
|
193 | common.addbases(commoninsample) | |
194 | # own nodes where I don't know if remote knows them |
|
194 | # own nodes where I don't know if remote knows them | |
195 | undecided = set(common.missingancestors(ownheads)) |
|
195 | undecided = set(common.missingancestors(ownheads)) | |
196 | # own nodes I know remote lacks |
|
196 | # own nodes I know remote lacks | |
197 | missing = set() |
|
197 | missing = set() | |
198 |
|
198 | |||
199 | full = False |
|
199 | full = False | |
|
200 | progress = ui.makeprogress(_('searching'), unit=_('queries')) | |||
200 | while undecided: |
|
201 | while undecided: | |
201 |
|
202 | |||
202 | if sample: |
|
203 | if sample: | |
203 | missinginsample = [n for i, n in enumerate(sample) if not yesno[i]] |
|
204 | missinginsample = [n for i, n in enumerate(sample) if not yesno[i]] | |
204 | missing.update(dag.descendantset(missinginsample, missing)) |
|
205 | missing.update(dag.descendantset(missinginsample, missing)) | |
205 |
|
206 | |||
206 | undecided.difference_update(missing) |
|
207 | undecided.difference_update(missing) | |
207 |
|
208 | |||
208 | if not undecided: |
|
209 | if not undecided: | |
209 | break |
|
210 | break | |
210 |
|
211 | |||
211 | if full or common.hasbases(): |
|
212 | if full or common.hasbases(): | |
212 | if full: |
|
213 | if full: | |
213 | ui.note(_("sampling from both directions\n")) |
|
214 | ui.note(_("sampling from both directions\n")) | |
214 | else: |
|
215 | else: | |
215 | ui.debug("taking initial sample\n") |
|
216 | ui.debug("taking initial sample\n") | |
216 | samplefunc = _takefullsample |
|
217 | samplefunc = _takefullsample | |
217 | targetsize = fullsamplesize |
|
218 | targetsize = fullsamplesize | |
218 | else: |
|
219 | else: | |
219 | # use even cheaper initial sample |
|
220 | # use even cheaper initial sample | |
220 | ui.debug("taking quick initial sample\n") |
|
221 | ui.debug("taking quick initial sample\n") | |
221 | samplefunc = _takequicksample |
|
222 | samplefunc = _takequicksample | |
222 | targetsize = initialsamplesize |
|
223 | targetsize = initialsamplesize | |
223 | if len(undecided) < targetsize: |
|
224 | if len(undecided) < targetsize: | |
224 | sample = list(undecided) |
|
225 | sample = list(undecided) | |
225 | else: |
|
226 | else: | |
226 | sample = samplefunc(dag, undecided, targetsize) |
|
227 | sample = samplefunc(dag, undecided, targetsize) | |
227 |
|
228 | |||
228 | roundtrips += 1 |
|
229 | roundtrips += 1 | |
229 | ui.progress(_('searching'), roundtrips, unit=_('queries')) |
|
230 | progress.update(roundtrips) | |
230 | ui.debug("query %i; still undecided: %i, sample size is: %i\n" |
|
231 | ui.debug("query %i; still undecided: %i, sample size is: %i\n" | |
231 | % (roundtrips, len(undecided), len(sample))) |
|
232 | % (roundtrips, len(undecided), len(sample))) | |
232 | # indices between sample and externalized version must match |
|
233 | # indices between sample and externalized version must match | |
233 | sample = list(sample) |
|
234 | sample = list(sample) | |
234 |
|
235 | |||
235 | with remote.commandexecutor() as e: |
|
236 | with remote.commandexecutor() as e: | |
236 | yesno = e.callcommand('known', { |
|
237 | yesno = e.callcommand('known', { | |
237 | 'nodes': dag.externalizeall(sample), |
|
238 | 'nodes': dag.externalizeall(sample), | |
238 | }).result() |
|
239 | }).result() | |
239 |
|
240 | |||
240 | full = True |
|
241 | full = True | |
241 |
|
242 | |||
242 | if sample: |
|
243 | if sample: | |
243 | commoninsample = set(n for i, n in enumerate(sample) if yesno[i]) |
|
244 | commoninsample = set(n for i, n in enumerate(sample) if yesno[i]) | |
244 | common.addbases(commoninsample) |
|
245 | common.addbases(commoninsample) | |
245 | common.removeancestorsfrom(undecided) |
|
246 | common.removeancestorsfrom(undecided) | |
246 |
|
247 | |||
247 | # heads(common) == heads(common.bases) since common represents common.bases |
|
248 | # heads(common) == heads(common.bases) since common represents common.bases | |
248 | # and all its ancestors |
|
249 | # and all its ancestors | |
249 | result = dag.headsetofconnecteds(common.bases) |
|
250 | result = dag.headsetofconnecteds(common.bases) | |
250 | # common.bases can include nullrev, but our contract requires us to not |
|
251 | # common.bases can include nullrev, but our contract requires us to not | |
251 | # return any heads in that case, so discard that |
|
252 | # return any heads in that case, so discard that | |
252 | result.discard(nullrev) |
|
253 | result.discard(nullrev) | |
253 | elapsed = util.timer() - start |
|
254 | elapsed = util.timer() - start | |
254 | ui.progress(_('searching'), None) |
|
255 | progress.update(None) | |
255 | ui.debug("%d total queries in %.4fs\n" % (roundtrips, elapsed)) |
|
256 | ui.debug("%d total queries in %.4fs\n" % (roundtrips, elapsed)) | |
256 | msg = ('found %d common and %d unknown server heads,' |
|
257 | msg = ('found %d common and %d unknown server heads,' | |
257 | ' %d roundtrips in %.4fs\n') |
|
258 | ' %d roundtrips in %.4fs\n') | |
258 | missing = set(result) - set(srvheads) |
|
259 | missing = set(result) - set(srvheads) | |
259 | ui.log('discovery', msg, len(result), len(missing), roundtrips, |
|
260 | ui.log('discovery', msg, len(result), len(missing), roundtrips, | |
260 | elapsed) |
|
261 | elapsed) | |
261 |
|
262 | |||
262 | if not result and srvheadhashes != [nullid]: |
|
263 | if not result and srvheadhashes != [nullid]: | |
263 | if abortwhenunrelated: |
|
264 | if abortwhenunrelated: | |
264 | raise error.Abort(_("repository is unrelated")) |
|
265 | raise error.Abort(_("repository is unrelated")) | |
265 | else: |
|
266 | else: | |
266 | ui.warn(_("warning: repository is unrelated\n")) |
|
267 | ui.warn(_("warning: repository is unrelated\n")) | |
267 | return ({nullid}, True, srvheadhashes,) |
|
268 | return ({nullid}, True, srvheadhashes,) | |
268 |
|
269 | |||
269 | anyincoming = (srvheadhashes != [nullid]) |
|
270 | anyincoming = (srvheadhashes != [nullid]) | |
270 | return dag.externalizeall(result), anyincoming, srvheadhashes |
|
271 | return dag.externalizeall(result), anyincoming, srvheadhashes |
General Comments 0
You need to be logged in to leave comments.
Login now