Show More
@@ -1,293 +1,313 b'' | |||||
1 | # setdiscovery.py - improved discovery of common nodeset for mercurial |
|
1 | # setdiscovery.py - improved discovery of common nodeset for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2010 Benoit Boissinot <bboissin@gmail.com> |
|
3 | # Copyright 2010 Benoit Boissinot <bboissin@gmail.com> | |
4 | # and Peter Arrenbrecht <peter@arrenbrecht.ch> |
|
4 | # and Peter Arrenbrecht <peter@arrenbrecht.ch> | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms of the |
|
6 | # This software may be used and distributed according to the terms of the | |
7 | # GNU General Public License version 2 or any later version. |
|
7 | # GNU General Public License version 2 or any later version. | |
8 | """ |
|
8 | """ | |
9 | Algorithm works in the following way. You have two repository: local and |
|
9 | Algorithm works in the following way. You have two repository: local and | |
10 | remote. They both contains a DAG of changelists. |
|
10 | remote. They both contains a DAG of changelists. | |
11 |
|
11 | |||
12 | The goal of the discovery protocol is to find one set of node *common*, |
|
12 | The goal of the discovery protocol is to find one set of node *common*, | |
13 | the set of nodes shared by local and remote. |
|
13 | the set of nodes shared by local and remote. | |
14 |
|
14 | |||
15 | One of the issue with the original protocol was latency, it could |
|
15 | One of the issue with the original protocol was latency, it could | |
16 | potentially require lots of roundtrips to discover that the local repo was a |
|
16 | potentially require lots of roundtrips to discover that the local repo was a | |
17 | subset of remote (which is a very common case, you usually have few changes |
|
17 | subset of remote (which is a very common case, you usually have few changes | |
18 | compared to upstream, while upstream probably had lots of development). |
|
18 | compared to upstream, while upstream probably had lots of development). | |
19 |
|
19 | |||
20 | The new protocol only requires one interface for the remote repo: `known()`, |
|
20 | The new protocol only requires one interface for the remote repo: `known()`, | |
21 | which given a set of changelists tells you if they are present in the DAG. |
|
21 | which given a set of changelists tells you if they are present in the DAG. | |
22 |
|
22 | |||
23 | The algorithm then works as follow: |
|
23 | The algorithm then works as follow: | |
24 |
|
24 | |||
25 | - We will be using three sets, `common`, `missing`, `unknown`. Originally |
|
25 | - We will be using three sets, `common`, `missing`, `unknown`. Originally | |
26 | all nodes are in `unknown`. |
|
26 | all nodes are in `unknown`. | |
27 | - Take a sample from `unknown`, call `remote.known(sample)` |
|
27 | - Take a sample from `unknown`, call `remote.known(sample)` | |
28 | - For each node that remote knows, move it and all its ancestors to `common` |
|
28 | - For each node that remote knows, move it and all its ancestors to `common` | |
29 | - For each node that remote doesn't know, move it and all its descendants |
|
29 | - For each node that remote doesn't know, move it and all its descendants | |
30 | to `missing` |
|
30 | to `missing` | |
31 | - Iterate until `unknown` is empty |
|
31 | - Iterate until `unknown` is empty | |
32 |
|
32 | |||
33 | There are a couple optimizations, first is instead of starting with a random |
|
33 | There are a couple optimizations, first is instead of starting with a random | |
34 | sample of missing, start by sending all heads, in the case where the local |
|
34 | sample of missing, start by sending all heads, in the case where the local | |
35 | repo is a subset, you computed the answer in one round trip. |
|
35 | repo is a subset, you computed the answer in one round trip. | |
36 |
|
36 | |||
37 | Then you can do something similar to the bisecting strategy used when |
|
37 | Then you can do something similar to the bisecting strategy used when | |
38 | finding faulty changesets. Instead of random samples, you can try picking |
|
38 | finding faulty changesets. Instead of random samples, you can try picking | |
39 | nodes that will maximize the number of nodes that will be |
|
39 | nodes that will maximize the number of nodes that will be | |
40 | classified with it (since all ancestors or descendants will be marked as well). |
|
40 | classified with it (since all ancestors or descendants will be marked as well). | |
41 | """ |
|
41 | """ | |
42 |
|
42 | |||
43 | from __future__ import absolute_import |
|
43 | from __future__ import absolute_import | |
44 |
|
44 | |||
45 | import collections |
|
45 | import collections | |
46 | import random |
|
46 | import random | |
47 |
|
47 | |||
48 | from .i18n import _ |
|
48 | from .i18n import _ | |
49 | from .node import ( |
|
49 | from .node import ( | |
50 | nullid, |
|
50 | nullid, | |
51 | nullrev, |
|
51 | nullrev, | |
52 | ) |
|
52 | ) | |
53 | from . import ( |
|
53 | from . import ( | |
54 | error, |
|
54 | error, | |
55 | util, |
|
55 | util, | |
56 | ) |
|
56 | ) | |
57 |
|
57 | |||
58 | def _updatesample(revs, heads, sample, parentfn, quicksamplesize=0): |
|
58 | def _updatesample(revs, heads, sample, parentfn, quicksamplesize=0): | |
59 | """update an existing sample to match the expected size |
|
59 | """update an existing sample to match the expected size | |
60 |
|
60 | |||
61 | The sample is updated with revs exponentially distant from each head of the |
|
61 | The sample is updated with revs exponentially distant from each head of the | |
62 | <revs> set. (H~1, H~2, H~4, H~8, etc). |
|
62 | <revs> set. (H~1, H~2, H~4, H~8, etc). | |
63 |
|
63 | |||
64 | If a target size is specified, the sampling will stop once this size is |
|
64 | If a target size is specified, the sampling will stop once this size is | |
65 | reached. Otherwise sampling will happen until roots of the <revs> set are |
|
65 | reached. Otherwise sampling will happen until roots of the <revs> set are | |
66 | reached. |
|
66 | reached. | |
67 |
|
67 | |||
68 | :revs: set of revs we want to discover (if None, assume the whole dag) |
|
68 | :revs: set of revs we want to discover (if None, assume the whole dag) | |
69 | :heads: set of DAG head revs |
|
69 | :heads: set of DAG head revs | |
70 | :sample: a sample to update |
|
70 | :sample: a sample to update | |
71 | :parentfn: a callable to resolve parents for a revision |
|
71 | :parentfn: a callable to resolve parents for a revision | |
72 | :quicksamplesize: optional target size of the sample""" |
|
72 | :quicksamplesize: optional target size of the sample""" | |
73 | dist = {} |
|
73 | dist = {} | |
74 | visit = collections.deque(heads) |
|
74 | visit = collections.deque(heads) | |
75 | seen = set() |
|
75 | seen = set() | |
76 | factor = 1 |
|
76 | factor = 1 | |
77 | while visit: |
|
77 | while visit: | |
78 | curr = visit.popleft() |
|
78 | curr = visit.popleft() | |
79 | if curr in seen: |
|
79 | if curr in seen: | |
80 | continue |
|
80 | continue | |
81 | d = dist.setdefault(curr, 1) |
|
81 | d = dist.setdefault(curr, 1) | |
82 | if d > factor: |
|
82 | if d > factor: | |
83 | factor *= 2 |
|
83 | factor *= 2 | |
84 | if d == factor: |
|
84 | if d == factor: | |
85 | sample.add(curr) |
|
85 | sample.add(curr) | |
86 | if quicksamplesize and (len(sample) >= quicksamplesize): |
|
86 | if quicksamplesize and (len(sample) >= quicksamplesize): | |
87 | return |
|
87 | return | |
88 | seen.add(curr) |
|
88 | seen.add(curr) | |
89 |
|
89 | |||
90 | for p in parentfn(curr): |
|
90 | for p in parentfn(curr): | |
91 | if p != nullrev and (not revs or p in revs): |
|
91 | if p != nullrev and (not revs or p in revs): | |
92 | dist.setdefault(p, d + 1) |
|
92 | dist.setdefault(p, d + 1) | |
93 | visit.append(p) |
|
93 | visit.append(p) | |
94 |
|
94 | |||
95 | def _takequicksample(repo, headrevs, revs, size): |
|
95 | def _takequicksample(repo, headrevs, revs, size): | |
96 | """takes a quick sample of size <size> |
|
96 | """takes a quick sample of size <size> | |
97 |
|
97 | |||
98 | It is meant for initial sampling and focuses on querying heads and close |
|
98 | It is meant for initial sampling and focuses on querying heads and close | |
99 | ancestors of heads. |
|
99 | ancestors of heads. | |
100 |
|
100 | |||
101 | :dag: a dag object |
|
101 | :dag: a dag object | |
102 | :headrevs: set of head revisions in local DAG to consider |
|
102 | :headrevs: set of head revisions in local DAG to consider | |
103 | :revs: set of revs to discover |
|
103 | :revs: set of revs to discover | |
104 | :size: the maximum size of the sample""" |
|
104 | :size: the maximum size of the sample""" | |
105 | sample = set(repo.revs('heads(%ld)', revs)) |
|
105 | sample = set(repo.revs('heads(%ld)', revs)) | |
106 |
|
106 | |||
107 | if len(sample) >= size: |
|
107 | if len(sample) >= size: | |
108 | return _limitsample(sample, size) |
|
108 | return _limitsample(sample, size) | |
109 |
|
109 | |||
110 | _updatesample(None, headrevs, sample, repo.changelog.parentrevs, |
|
110 | _updatesample(None, headrevs, sample, repo.changelog.parentrevs, | |
111 | quicksamplesize=size) |
|
111 | quicksamplesize=size) | |
112 | return sample |
|
112 | return sample | |
113 |
|
113 | |||
114 | def _takefullsample(repo, headrevs, revs, size): |
|
114 | def _takefullsample(repo, headrevs, revs, size): | |
115 | sample = set(repo.revs('heads(%ld)', revs)) |
|
115 | sample = set(repo.revs('heads(%ld)', revs)) | |
116 |
|
116 | |||
117 | # update from heads |
|
117 | # update from heads | |
118 | revsheads = set(repo.revs('heads(%ld)', revs)) |
|
118 | revsheads = set(repo.revs('heads(%ld)', revs)) | |
119 | _updatesample(revs, revsheads, sample, repo.changelog.parentrevs) |
|
119 | _updatesample(revs, revsheads, sample, repo.changelog.parentrevs) | |
|
120 | ||||
120 | # update from roots |
|
121 | # update from roots | |
121 | revsroots = set(repo.revs('roots(%ld)', revs)) |
|
122 | revsroots = set(repo.revs('roots(%ld)', revs)) | |
122 |
|
123 | |||
123 | # TODO this is quadratic |
|
124 | # _updatesample() essentially does interaction over revisions to look up | |
124 | parentfn = lambda rev: repo.changelog.children(repo.changelog.node(rev)) |
|
125 | # their children. This lookup is expensive and doing it in a loop is | |
|
126 | # quadratic. We precompute the children for all relevant revisions and | |||
|
127 | # make the lookup in _updatesample() a simple dict lookup. | |||
|
128 | # | |||
|
129 | # Because this function can be called multiple times during discovery, we | |||
|
130 | # may still perform redundant work and there is room to optimize this by | |||
|
131 | # keeping a persistent cache of children across invocations. | |||
|
132 | children = {} | |||
125 |
|
133 | |||
126 | _updatesample(revs, revsroots, sample, parentfn) |
|
134 | parentrevs = repo.changelog.parentrevs | |
|
135 | for rev in repo.changelog.revs(start=min(revsroots)): | |||
|
136 | # Always ensure revision has an entry so we don't need to worry about | |||
|
137 | # missing keys. | |||
|
138 | children.setdefault(rev, []) | |||
|
139 | ||||
|
140 | for prev in parentrevs(rev): | |||
|
141 | if prev == nullrev: | |||
|
142 | continue | |||
|
143 | ||||
|
144 | children.setdefault(prev, []).append(rev) | |||
|
145 | ||||
|
146 | _updatesample(revs, revsroots, sample, children.__getitem__) | |||
127 | assert sample |
|
147 | assert sample | |
128 | sample = _limitsample(sample, size) |
|
148 | sample = _limitsample(sample, size) | |
129 | if len(sample) < size: |
|
149 | if len(sample) < size: | |
130 | more = size - len(sample) |
|
150 | more = size - len(sample) | |
131 | sample.update(random.sample(list(revs - sample), more)) |
|
151 | sample.update(random.sample(list(revs - sample), more)) | |
132 | return sample |
|
152 | return sample | |
133 |
|
153 | |||
134 | def _limitsample(sample, desiredlen): |
|
154 | def _limitsample(sample, desiredlen): | |
135 | """return a random subset of sample of at most desiredlen item""" |
|
155 | """return a random subset of sample of at most desiredlen item""" | |
136 | if len(sample) > desiredlen: |
|
156 | if len(sample) > desiredlen: | |
137 | sample = set(random.sample(sample, desiredlen)) |
|
157 | sample = set(random.sample(sample, desiredlen)) | |
138 | return sample |
|
158 | return sample | |
139 |
|
159 | |||
140 | def findcommonheads(ui, local, remote, |
|
160 | def findcommonheads(ui, local, remote, | |
141 | initialsamplesize=100, |
|
161 | initialsamplesize=100, | |
142 | fullsamplesize=200, |
|
162 | fullsamplesize=200, | |
143 | abortwhenunrelated=True, |
|
163 | abortwhenunrelated=True, | |
144 | ancestorsof=None): |
|
164 | ancestorsof=None): | |
145 | '''Return a tuple (common, anyincoming, remoteheads) used to identify |
|
165 | '''Return a tuple (common, anyincoming, remoteheads) used to identify | |
146 | missing nodes from or in remote. |
|
166 | missing nodes from or in remote. | |
147 | ''' |
|
167 | ''' | |
148 | start = util.timer() |
|
168 | start = util.timer() | |
149 |
|
169 | |||
150 | roundtrips = 0 |
|
170 | roundtrips = 0 | |
151 | cl = local.changelog |
|
171 | cl = local.changelog | |
152 | clnode = cl.node |
|
172 | clnode = cl.node | |
153 | clrev = cl.rev |
|
173 | clrev = cl.rev | |
154 |
|
174 | |||
155 | if ancestorsof is not None: |
|
175 | if ancestorsof is not None: | |
156 | ownheads = [clrev(n) for n in ancestorsof] |
|
176 | ownheads = [clrev(n) for n in ancestorsof] | |
157 | else: |
|
177 | else: | |
158 | ownheads = [rev for rev in cl.headrevs() if rev != nullrev] |
|
178 | ownheads = [rev for rev in cl.headrevs() if rev != nullrev] | |
159 |
|
179 | |||
160 | # early exit if we know all the specified remote heads already |
|
180 | # early exit if we know all the specified remote heads already | |
161 | ui.debug("query 1; heads\n") |
|
181 | ui.debug("query 1; heads\n") | |
162 | roundtrips += 1 |
|
182 | roundtrips += 1 | |
163 | sample = _limitsample(ownheads, initialsamplesize) |
|
183 | sample = _limitsample(ownheads, initialsamplesize) | |
164 | # indices between sample and externalized version must match |
|
184 | # indices between sample and externalized version must match | |
165 | sample = list(sample) |
|
185 | sample = list(sample) | |
166 |
|
186 | |||
167 | with remote.commandexecutor() as e: |
|
187 | with remote.commandexecutor() as e: | |
168 | fheads = e.callcommand('heads', {}) |
|
188 | fheads = e.callcommand('heads', {}) | |
169 | fknown = e.callcommand('known', { |
|
189 | fknown = e.callcommand('known', { | |
170 | 'nodes': [clnode(r) for r in sample], |
|
190 | 'nodes': [clnode(r) for r in sample], | |
171 | }) |
|
191 | }) | |
172 |
|
192 | |||
173 | srvheadhashes, yesno = fheads.result(), fknown.result() |
|
193 | srvheadhashes, yesno = fheads.result(), fknown.result() | |
174 |
|
194 | |||
175 | if cl.tip() == nullid: |
|
195 | if cl.tip() == nullid: | |
176 | if srvheadhashes != [nullid]: |
|
196 | if srvheadhashes != [nullid]: | |
177 | return [nullid], True, srvheadhashes |
|
197 | return [nullid], True, srvheadhashes | |
178 | return [nullid], False, [] |
|
198 | return [nullid], False, [] | |
179 |
|
199 | |||
180 | # start actual discovery (we note this before the next "if" for |
|
200 | # start actual discovery (we note this before the next "if" for | |
181 | # compatibility reasons) |
|
201 | # compatibility reasons) | |
182 | ui.status(_("searching for changes\n")) |
|
202 | ui.status(_("searching for changes\n")) | |
183 |
|
203 | |||
184 | srvheads = [] |
|
204 | srvheads = [] | |
185 | for node in srvheadhashes: |
|
205 | for node in srvheadhashes: | |
186 | if node == nullid: |
|
206 | if node == nullid: | |
187 | continue |
|
207 | continue | |
188 |
|
208 | |||
189 | try: |
|
209 | try: | |
190 | srvheads.append(clrev(node)) |
|
210 | srvheads.append(clrev(node)) | |
191 | # Catches unknown and filtered nodes. |
|
211 | # Catches unknown and filtered nodes. | |
192 | except error.LookupError: |
|
212 | except error.LookupError: | |
193 | continue |
|
213 | continue | |
194 |
|
214 | |||
195 | if len(srvheads) == len(srvheadhashes): |
|
215 | if len(srvheads) == len(srvheadhashes): | |
196 | ui.debug("all remote heads known locally\n") |
|
216 | ui.debug("all remote heads known locally\n") | |
197 | return srvheadhashes, False, srvheadhashes |
|
217 | return srvheadhashes, False, srvheadhashes | |
198 |
|
218 | |||
199 | if len(sample) == len(ownheads) and all(yesno): |
|
219 | if len(sample) == len(ownheads) and all(yesno): | |
200 | ui.note(_("all local heads known remotely\n")) |
|
220 | ui.note(_("all local heads known remotely\n")) | |
201 | ownheadhashes = [clnode(r) for r in ownheads] |
|
221 | ownheadhashes = [clnode(r) for r in ownheads] | |
202 | return ownheadhashes, True, srvheadhashes |
|
222 | return ownheadhashes, True, srvheadhashes | |
203 |
|
223 | |||
204 | # full blown discovery |
|
224 | # full blown discovery | |
205 |
|
225 | |||
206 | # own nodes I know we both know |
|
226 | # own nodes I know we both know | |
207 | # treat remote heads (and maybe own heads) as a first implicit sample |
|
227 | # treat remote heads (and maybe own heads) as a first implicit sample | |
208 | # response |
|
228 | # response | |
209 | common = cl.incrementalmissingrevs(srvheads) |
|
229 | common = cl.incrementalmissingrevs(srvheads) | |
210 | commoninsample = set(n for i, n in enumerate(sample) if yesno[i]) |
|
230 | commoninsample = set(n for i, n in enumerate(sample) if yesno[i]) | |
211 | common.addbases(commoninsample) |
|
231 | common.addbases(commoninsample) | |
212 | # own nodes where I don't know if remote knows them |
|
232 | # own nodes where I don't know if remote knows them | |
213 | undecided = set(common.missingancestors(ownheads)) |
|
233 | undecided = set(common.missingancestors(ownheads)) | |
214 | # own nodes I know remote lacks |
|
234 | # own nodes I know remote lacks | |
215 | missing = set() |
|
235 | missing = set() | |
216 |
|
236 | |||
217 | full = False |
|
237 | full = False | |
218 | progress = ui.makeprogress(_('searching'), unit=_('queries')) |
|
238 | progress = ui.makeprogress(_('searching'), unit=_('queries')) | |
219 | while undecided: |
|
239 | while undecided: | |
220 |
|
240 | |||
221 | if sample: |
|
241 | if sample: | |
222 | missinginsample = [n for i, n in enumerate(sample) if not yesno[i]] |
|
242 | missinginsample = [n for i, n in enumerate(sample) if not yesno[i]] | |
223 |
|
243 | |||
224 | if missing: |
|
244 | if missing: | |
225 | missing.update(local.revs('descendants(%ld) - descendants(%ld)', |
|
245 | missing.update(local.revs('descendants(%ld) - descendants(%ld)', | |
226 | missinginsample, missing)) |
|
246 | missinginsample, missing)) | |
227 | else: |
|
247 | else: | |
228 | missing.update(local.revs('descendants(%ld)', missinginsample)) |
|
248 | missing.update(local.revs('descendants(%ld)', missinginsample)) | |
229 |
|
249 | |||
230 | undecided.difference_update(missing) |
|
250 | undecided.difference_update(missing) | |
231 |
|
251 | |||
232 | if not undecided: |
|
252 | if not undecided: | |
233 | break |
|
253 | break | |
234 |
|
254 | |||
235 | if full or common.hasbases(): |
|
255 | if full or common.hasbases(): | |
236 | if full: |
|
256 | if full: | |
237 | ui.note(_("sampling from both directions\n")) |
|
257 | ui.note(_("sampling from both directions\n")) | |
238 | else: |
|
258 | else: | |
239 | ui.debug("taking initial sample\n") |
|
259 | ui.debug("taking initial sample\n") | |
240 | samplefunc = _takefullsample |
|
260 | samplefunc = _takefullsample | |
241 | targetsize = fullsamplesize |
|
261 | targetsize = fullsamplesize | |
242 | else: |
|
262 | else: | |
243 | # use even cheaper initial sample |
|
263 | # use even cheaper initial sample | |
244 | ui.debug("taking quick initial sample\n") |
|
264 | ui.debug("taking quick initial sample\n") | |
245 | samplefunc = _takequicksample |
|
265 | samplefunc = _takequicksample | |
246 | targetsize = initialsamplesize |
|
266 | targetsize = initialsamplesize | |
247 | if len(undecided) < targetsize: |
|
267 | if len(undecided) < targetsize: | |
248 | sample = list(undecided) |
|
268 | sample = list(undecided) | |
249 | else: |
|
269 | else: | |
250 | sample = samplefunc(local, ownheads, undecided, targetsize) |
|
270 | sample = samplefunc(local, ownheads, undecided, targetsize) | |
251 |
|
271 | |||
252 | roundtrips += 1 |
|
272 | roundtrips += 1 | |
253 | progress.update(roundtrips) |
|
273 | progress.update(roundtrips) | |
254 | ui.debug("query %i; still undecided: %i, sample size is: %i\n" |
|
274 | ui.debug("query %i; still undecided: %i, sample size is: %i\n" | |
255 | % (roundtrips, len(undecided), len(sample))) |
|
275 | % (roundtrips, len(undecided), len(sample))) | |
256 | # indices between sample and externalized version must match |
|
276 | # indices between sample and externalized version must match | |
257 | sample = list(sample) |
|
277 | sample = list(sample) | |
258 |
|
278 | |||
259 | with remote.commandexecutor() as e: |
|
279 | with remote.commandexecutor() as e: | |
260 | yesno = e.callcommand('known', { |
|
280 | yesno = e.callcommand('known', { | |
261 | 'nodes': [clnode(r) for r in sample], |
|
281 | 'nodes': [clnode(r) for r in sample], | |
262 | }).result() |
|
282 | }).result() | |
263 |
|
283 | |||
264 | full = True |
|
284 | full = True | |
265 |
|
285 | |||
266 | if sample: |
|
286 | if sample: | |
267 | commoninsample = set(n for i, n in enumerate(sample) if yesno[i]) |
|
287 | commoninsample = set(n for i, n in enumerate(sample) if yesno[i]) | |
268 | common.addbases(commoninsample) |
|
288 | common.addbases(commoninsample) | |
269 | common.removeancestorsfrom(undecided) |
|
289 | common.removeancestorsfrom(undecided) | |
270 |
|
290 | |||
271 | # heads(common) == heads(common.bases) since common represents common.bases |
|
291 | # heads(common) == heads(common.bases) since common represents common.bases | |
272 | # and all its ancestors |
|
292 | # and all its ancestors | |
273 | # The presence of nullrev will confuse heads(). So filter it out. |
|
293 | # The presence of nullrev will confuse heads(). So filter it out. | |
274 | result = set(local.revs('heads(%ld)', common.bases - {nullrev})) |
|
294 | result = set(local.revs('heads(%ld)', common.bases - {nullrev})) | |
275 | elapsed = util.timer() - start |
|
295 | elapsed = util.timer() - start | |
276 | progress.complete() |
|
296 | progress.complete() | |
277 | ui.debug("%d total queries in %.4fs\n" % (roundtrips, elapsed)) |
|
297 | ui.debug("%d total queries in %.4fs\n" % (roundtrips, elapsed)) | |
278 | msg = ('found %d common and %d unknown server heads,' |
|
298 | msg = ('found %d common and %d unknown server heads,' | |
279 | ' %d roundtrips in %.4fs\n') |
|
299 | ' %d roundtrips in %.4fs\n') | |
280 | missing = set(result) - set(srvheads) |
|
300 | missing = set(result) - set(srvheads) | |
281 | ui.log('discovery', msg, len(result), len(missing), roundtrips, |
|
301 | ui.log('discovery', msg, len(result), len(missing), roundtrips, | |
282 | elapsed) |
|
302 | elapsed) | |
283 |
|
303 | |||
284 | if not result and srvheadhashes != [nullid]: |
|
304 | if not result and srvheadhashes != [nullid]: | |
285 | if abortwhenunrelated: |
|
305 | if abortwhenunrelated: | |
286 | raise error.Abort(_("repository is unrelated")) |
|
306 | raise error.Abort(_("repository is unrelated")) | |
287 | else: |
|
307 | else: | |
288 | ui.warn(_("warning: repository is unrelated\n")) |
|
308 | ui.warn(_("warning: repository is unrelated\n")) | |
289 | return ({nullid}, True, srvheadhashes,) |
|
309 | return ({nullid}, True, srvheadhashes,) | |
290 |
|
310 | |||
291 | anyincoming = (srvheadhashes != [nullid]) |
|
311 | anyincoming = (srvheadhashes != [nullid]) | |
292 | result = {clnode(r) for r in result} |
|
312 | result = {clnode(r) for r in result} | |
293 | return result, anyincoming, srvheadhashes |
|
313 | return result, anyincoming, srvheadhashes |
General Comments 0
You need to be logged in to leave comments.
Login now