##// END OF EJS Templates
setdiscovery: always add exponential sample to the heads...
Pierre-Yves David -
r23813:932f814b default
parent child Browse files
Show More
@@ -1,258 +1,255 b''
1 1 # setdiscovery.py - improved discovery of common nodeset for mercurial
2 2 #
3 3 # Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
4 4 # and Peter Arrenbrecht <peter@arrenbrecht.ch>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8 """
9 9 Algorithm works in the following way. You have two repository: local and
10 10 remote. They both contains a DAG of changelists.
11 11
12 12 The goal of the discovery protocol is to find one set of node *common*,
13 13 the set of nodes shared by local and remote.
14 14
15 15 One of the issue with the original protocol was latency, it could
16 16 potentially require lots of roundtrips to discover that the local repo was a
17 17 subset of remote (which is a very common case, you usually have few changes
18 18 compared to upstream, while upstream probably had lots of development).
19 19
20 20 The new protocol only requires one interface for the remote repo: `known()`,
21 21 which given a set of changelists tells you if they are present in the DAG.
22 22
23 23 The algorithm then works as follow:
24 24
25 25 - We will be using three sets, `common`, `missing`, `unknown`. Originally
26 26 all nodes are in `unknown`.
27 27 - Take a sample from `unknown`, call `remote.known(sample)`
28 28 - For each node that remote knows, move it and all its ancestors to `common`
29 29 - For each node that remote doesn't know, move it and all its descendants
30 30 to `missing`
31 31 - Iterate until `unknown` is empty
32 32
33 33 There are a couple optimizations, first is instead of starting with a random
34 34 sample of missing, start by sending all heads, in the case where the local
35 35 repo is a subset, you computed the answer in one round trip.
36 36
37 37 Then you can do something similar to the bisecting strategy used when
38 38 finding faulty changesets. Instead of random samples, you can try picking
39 39 nodes that will maximize the number of nodes that will be
40 40 classified with it (since all ancestors or descendants will be marked as well).
41 41 """
42 42
43 43 from node import nullid, nullrev
44 44 from i18n import _
45 45 import random
46 46 import util, dagutil
47 47
48 48 def _updatesample(dag, nodes, sample, always, quicksamplesize=0):
49 49 """update an existing sample to match the expected size
50 50
51 51 The sample is updated with nodes exponentially distant from each head of the
52 52 <nodes> set. (H~1, H~2, H~4, H~8, etc).
53 53
54 54 If a target size is specified, the sampling will stop once this size is
55 55 reached. Otherwise sampling will happen until roots of the <nodes> set are
56 56 reached.
57 57
58 58 :dag: a dag object from dagutil
59 59 :nodes: set of nodes we want to discover (if None, assume the whole dag)
60 60 :sample: a sample to update
61 61 :always: set of notable nodes that will be part of the sample anyway
62 62 :quicksamplesize: optional target size of the sample"""
63 63 # if nodes is empty we scan the entire graph
64 64 if nodes:
65 65 heads = dag.headsetofconnecteds(nodes)
66 66 else:
67 67 heads = dag.heads()
68 68 dist = {}
69 69 visit = util.deque(heads)
70 70 seen = set()
71 71 factor = 1
72 72 while visit:
73 73 curr = visit.popleft()
74 74 if curr in seen:
75 75 continue
76 76 d = dist.setdefault(curr, 1)
77 77 if d > factor:
78 78 factor *= 2
79 79 if d == factor:
80 80 if curr not in always: # need this check for the early exit below
81 81 sample.add(curr)
82 82 if quicksamplesize and (len(sample) >= quicksamplesize):
83 83 return
84 84 seen.add(curr)
85 85 for p in dag.parents(curr):
86 86 if not nodes or p in nodes:
87 87 dist.setdefault(p, d + 1)
88 88 visit.append(p)
89 89
90 90 def _setupsample(dag, nodes, size):
91 91 always = dag.headsetofconnecteds(nodes)
92 92 desiredlen = size - len(always)
93 93 if desiredlen <= 0:
94 94 # This could be bad if there are very many heads, all unknown to the
95 95 # server. We're counting on long request support here.
96 96 return always, None, desiredlen
97 97 return always, set(), desiredlen
98 98
99 99 def _takequicksample(dag, nodes, size):
100 100 always, sample, desiredlen = _setupsample(dag, nodes, size)
101 101 if sample is None:
102 102 return always
103 103 _updatesample(dag, None, sample, always, quicksamplesize=desiredlen)
104 104 sample.update(always)
105 105 return sample
106 106
107 107 def _takefullsample(dag, nodes, size):
108 always = dag.headsetofconnecteds(nodes)
109 if size <= len(always):
110 return always
111 sample = always
108 sample = always = dag.headsetofconnecteds(nodes)
112 109 # update from heads
113 110 _updatesample(dag, nodes, sample, always)
114 111 # update from roots
115 112 _updatesample(dag.inverse(), nodes, sample, always)
116 113 assert sample
117 114 sample.update(always)
118 115 sample = _limitsample(sample, size)
119 116 if len(sample) < size:
120 117 more = size - len(sample)
121 118 sample.update(random.sample(list(nodes - sample), more))
122 119 return sample
123 120
124 121 def _limitsample(sample, desiredlen):
125 122 """return a random subset of sample of at most desiredlen item"""
126 123 if len(sample) > desiredlen:
127 124 sample = set(random.sample(sample, desiredlen))
128 125 return sample
129 126
130 127 def findcommonheads(ui, local, remote,
131 128 initialsamplesize=100,
132 129 fullsamplesize=200,
133 130 abortwhenunrelated=True):
134 131 '''Return a tuple (common, anyincoming, remoteheads) used to identify
135 132 missing nodes from or in remote.
136 133 '''
137 134 roundtrips = 0
138 135 cl = local.changelog
139 136 dag = dagutil.revlogdag(cl)
140 137
141 138 # early exit if we know all the specified remote heads already
142 139 ui.debug("query 1; heads\n")
143 140 roundtrips += 1
144 141 ownheads = dag.heads()
145 142 sample = _limitsample(ownheads, initialsamplesize)
146 143 # indices between sample and externalized version must match
147 144 sample = list(sample)
148 145 if remote.local():
149 146 # stopgap until we have a proper localpeer that supports batch()
150 147 srvheadhashes = remote.heads()
151 148 yesno = remote.known(dag.externalizeall(sample))
152 149 elif remote.capable('batch'):
153 150 batch = remote.batch()
154 151 srvheadhashesref = batch.heads()
155 152 yesnoref = batch.known(dag.externalizeall(sample))
156 153 batch.submit()
157 154 srvheadhashes = srvheadhashesref.value
158 155 yesno = yesnoref.value
159 156 else:
160 157 # compatibility with pre-batch, but post-known remotes during 1.9
161 158 # development
162 159 srvheadhashes = remote.heads()
163 160 sample = []
164 161
165 162 if cl.tip() == nullid:
166 163 if srvheadhashes != [nullid]:
167 164 return [nullid], True, srvheadhashes
168 165 return [nullid], False, []
169 166
170 167 # start actual discovery (we note this before the next "if" for
171 168 # compatibility reasons)
172 169 ui.status(_("searching for changes\n"))
173 170
174 171 srvheads = dag.internalizeall(srvheadhashes, filterunknown=True)
175 172 if len(srvheads) == len(srvheadhashes):
176 173 ui.debug("all remote heads known locally\n")
177 174 return (srvheadhashes, False, srvheadhashes,)
178 175
179 176 if sample and len(ownheads) <= initialsamplesize and util.all(yesno):
180 177 ui.note(_("all local heads known remotely\n"))
181 178 ownheadhashes = dag.externalizeall(ownheads)
182 179 return (ownheadhashes, True, srvheadhashes,)
183 180
184 181 # full blown discovery
185 182
186 183 # own nodes I know we both know
187 184 # treat remote heads (and maybe own heads) as a first implicit sample
188 185 # response
189 186 common = cl.incrementalmissingrevs(srvheads)
190 187 commoninsample = set(n for i, n in enumerate(sample) if yesno[i])
191 188 common.addbases(commoninsample)
192 189 # own nodes where I don't know if remote knows them
193 190 undecided = set(common.missingancestors(ownheads))
194 191 # own nodes I know remote lacks
195 192 missing = set()
196 193
197 194 full = False
198 195 while undecided:
199 196
200 197 if sample:
201 198 missinginsample = [n for i, n in enumerate(sample) if not yesno[i]]
202 199 missing.update(dag.descendantset(missinginsample, missing))
203 200
204 201 undecided.difference_update(missing)
205 202
206 203 if not undecided:
207 204 break
208 205
209 206 if full or common.hasbases():
210 207 if full:
211 208 ui.note(_("sampling from both directions\n"))
212 209 else:
213 210 ui.debug("taking initial sample\n")
214 211 samplefunc = _takefullsample
215 212 targetsize = fullsamplesize
216 213 else:
217 214 # use even cheaper initial sample
218 215 ui.debug("taking quick initial sample\n")
219 216 samplefunc = _takequicksample
220 217 targetsize = initialsamplesize
221 218 if len(undecided) < targetsize:
222 219 sample = list(undecided)
223 220 else:
224 221 sample = samplefunc(dag, undecided, targetsize)
225 222 sample = _limitsample(sample, targetsize)
226 223
227 224 roundtrips += 1
228 225 ui.progress(_('searching'), roundtrips, unit=_('queries'))
229 226 ui.debug("query %i; still undecided: %i, sample size is: %i\n"
230 227 % (roundtrips, len(undecided), len(sample)))
231 228 # indices between sample and externalized version must match
232 229 sample = list(sample)
233 230 yesno = remote.known(dag.externalizeall(sample))
234 231 full = True
235 232
236 233 if sample:
237 234 commoninsample = set(n for i, n in enumerate(sample) if yesno[i])
238 235 common.addbases(commoninsample)
239 236 common.removeancestorsfrom(undecided)
240 237
241 238 # heads(common) == heads(common.bases) since common represents common.bases
242 239 # and all its ancestors
243 240 result = dag.headsetofconnecteds(common.bases)
244 241 # common.bases can include nullrev, but our contract requires us to not
245 242 # return any heads in that case, so discard that
246 243 result.discard(nullrev)
247 244 ui.progress(_('searching'), None)
248 245 ui.debug("%d total queries\n" % roundtrips)
249 246
250 247 if not result and srvheadhashes != [nullid]:
251 248 if abortwhenunrelated:
252 249 raise util.Abort(_("repository is unrelated"))
253 250 else:
254 251 ui.warn(_("warning: repository is unrelated\n"))
255 252 return (set([nullid]), True, srvheadhashes,)
256 253
257 254 anyincoming = (srvheadhashes != [nullid])
258 255 return dag.externalizeall(result), anyincoming, srvheadhashes
@@ -1,409 +1,406 b''
1 1
2 2 Function to test discovery between two repos in both directions, using both the local shortcut
3 3 (which is currently not activated by default) and the full remotable protocol:
4 4
5 5 $ testdesc() { # revs_a, revs_b, dagdesc
6 6 > if [ -d foo ]; then rm -rf foo; fi
7 7 > hg init foo
8 8 > cd foo
9 9 > hg debugbuilddag "$3"
10 10 > hg clone . a $1 --quiet
11 11 > hg clone . b $2 --quiet
12 12 > echo
13 13 > echo "% -- a -> b tree"
14 14 > hg -R a debugdiscovery b --verbose --old
15 15 > echo
16 16 > echo "% -- a -> b set"
17 17 > hg -R a debugdiscovery b --verbose --debug
18 18 > echo
19 19 > echo "% -- b -> a tree"
20 20 > hg -R b debugdiscovery a --verbose --old
21 21 > echo
22 22 > echo "% -- b -> a set"
23 23 > hg -R b debugdiscovery a --verbose --debug
24 24 > cd ..
25 25 > }
26 26
27 27
28 28 Small superset:
29 29
30 30 $ testdesc '-ra1 -ra2' '-rb1 -rb2 -rb3' '
31 31 > +2:f +1:a1:b1
32 32 > <f +4 :a2
33 33 > +5 :b2
34 34 > <f +3 :b3'
35 35
36 36 % -- a -> b tree
37 37 comparing with b
38 38 searching for changes
39 39 unpruned common: 01241442b3c2 66f7d451a68b b5714e113bc0
40 40 common heads: 01241442b3c2 b5714e113bc0
41 41 local is subset
42 42
43 43 % -- a -> b set
44 44 comparing with b
45 45 query 1; heads
46 46 searching for changes
47 47 all local heads known remotely
48 48 common heads: 01241442b3c2 b5714e113bc0
49 49 local is subset
50 50
51 51 % -- b -> a tree
52 52 comparing with a
53 53 searching for changes
54 54 unpruned common: 01241442b3c2 b5714e113bc0
55 55 common heads: 01241442b3c2 b5714e113bc0
56 56 remote is subset
57 57
58 58 % -- b -> a set
59 59 comparing with a
60 60 query 1; heads
61 61 searching for changes
62 62 all remote heads known locally
63 63 common heads: 01241442b3c2 b5714e113bc0
64 64 remote is subset
65 65
66 66
67 67 Many new:
68 68
69 69 $ testdesc '-ra1 -ra2' '-rb' '
70 70 > +2:f +3:a1 +3:b
71 71 > <f +30 :a2'
72 72
73 73 % -- a -> b tree
74 74 comparing with b
75 75 searching for changes
76 76 unpruned common: bebd167eb94d
77 77 common heads: bebd167eb94d
78 78
79 79 % -- a -> b set
80 80 comparing with b
81 81 query 1; heads
82 82 searching for changes
83 83 taking initial sample
84 84 searching: 2 queries
85 85 query 2; still undecided: 29, sample size is: 29
86 86 2 total queries
87 87 common heads: bebd167eb94d
88 88
89 89 % -- b -> a tree
90 90 comparing with a
91 91 searching for changes
92 92 unpruned common: 66f7d451a68b bebd167eb94d
93 93 common heads: bebd167eb94d
94 94
95 95 % -- b -> a set
96 96 comparing with a
97 97 query 1; heads
98 98 searching for changes
99 99 taking initial sample
100 100 searching: 2 queries
101 101 query 2; still undecided: 2, sample size is: 2
102 102 2 total queries
103 103 common heads: bebd167eb94d
104 104
105 105
106 106 Both sides many new with stub:
107 107
108 108 $ testdesc '-ra1 -ra2' '-rb' '
109 109 > +2:f +2:a1 +30 :b
110 110 > <f +30 :a2'
111 111
112 112 % -- a -> b tree
113 113 comparing with b
114 114 searching for changes
115 115 unpruned common: 2dc09a01254d
116 116 common heads: 2dc09a01254d
117 117
118 118 % -- a -> b set
119 119 comparing with b
120 120 query 1; heads
121 121 searching for changes
122 122 taking initial sample
123 123 searching: 2 queries
124 124 query 2; still undecided: 29, sample size is: 29
125 125 2 total queries
126 126 common heads: 2dc09a01254d
127 127
128 128 % -- b -> a tree
129 129 comparing with a
130 130 searching for changes
131 131 unpruned common: 2dc09a01254d 66f7d451a68b
132 132 common heads: 2dc09a01254d
133 133
134 134 % -- b -> a set
135 135 comparing with a
136 136 query 1; heads
137 137 searching for changes
138 138 taking initial sample
139 139 searching: 2 queries
140 140 query 2; still undecided: 29, sample size is: 29
141 141 2 total queries
142 142 common heads: 2dc09a01254d
143 143
144 144
145 145 Both many new:
146 146
147 147 $ testdesc '-ra' '-rb' '
148 148 > +2:f +30 :b
149 149 > <f +30 :a'
150 150
151 151 % -- a -> b tree
152 152 comparing with b
153 153 searching for changes
154 154 unpruned common: 66f7d451a68b
155 155 common heads: 66f7d451a68b
156 156
157 157 % -- a -> b set
158 158 comparing with b
159 159 query 1; heads
160 160 searching for changes
161 161 taking quick initial sample
162 162 searching: 2 queries
163 163 query 2; still undecided: 31, sample size is: 31
164 164 2 total queries
165 165 common heads: 66f7d451a68b
166 166
167 167 % -- b -> a tree
168 168 comparing with a
169 169 searching for changes
170 170 unpruned common: 66f7d451a68b
171 171 common heads: 66f7d451a68b
172 172
173 173 % -- b -> a set
174 174 comparing with a
175 175 query 1; heads
176 176 searching for changes
177 177 taking quick initial sample
178 178 searching: 2 queries
179 179 query 2; still undecided: 31, sample size is: 31
180 180 2 total queries
181 181 common heads: 66f7d451a68b
182 182
183 183
184 184 Both many new skewed:
185 185
186 186 $ testdesc '-ra' '-rb' '
187 187 > +2:f +30 :b
188 188 > <f +50 :a'
189 189
190 190 % -- a -> b tree
191 191 comparing with b
192 192 searching for changes
193 193 unpruned common: 66f7d451a68b
194 194 common heads: 66f7d451a68b
195 195
196 196 % -- a -> b set
197 197 comparing with b
198 198 query 1; heads
199 199 searching for changes
200 200 taking quick initial sample
201 201 searching: 2 queries
202 202 query 2; still undecided: 51, sample size is: 51
203 203 2 total queries
204 204 common heads: 66f7d451a68b
205 205
206 206 % -- b -> a tree
207 207 comparing with a
208 208 searching for changes
209 209 unpruned common: 66f7d451a68b
210 210 common heads: 66f7d451a68b
211 211
212 212 % -- b -> a set
213 213 comparing with a
214 214 query 1; heads
215 215 searching for changes
216 216 taking quick initial sample
217 217 searching: 2 queries
218 218 query 2; still undecided: 31, sample size is: 31
219 219 2 total queries
220 220 common heads: 66f7d451a68b
221 221
222 222
223 223 Both many new on top of long history:
224 224
225 225 $ testdesc '-ra' '-rb' '
226 226 > +1000:f +30 :b
227 227 > <f +50 :a'
228 228
229 229 % -- a -> b tree
230 230 comparing with b
231 231 searching for changes
232 232 unpruned common: 7ead0cba2838
233 233 common heads: 7ead0cba2838
234 234
235 235 % -- a -> b set
236 236 comparing with b
237 237 query 1; heads
238 238 searching for changes
239 239 taking quick initial sample
240 240 searching: 2 queries
241 241 query 2; still undecided: 1049, sample size is: 11
242 242 sampling from both directions
243 243 searching: 3 queries
244 244 query 3; still undecided: 31, sample size is: 31
245 245 3 total queries
246 246 common heads: 7ead0cba2838
247 247
248 248 % -- b -> a tree
249 249 comparing with a
250 250 searching for changes
251 251 unpruned common: 7ead0cba2838
252 252 common heads: 7ead0cba2838
253 253
254 254 % -- b -> a set
255 255 comparing with a
256 256 query 1; heads
257 257 searching for changes
258 258 taking quick initial sample
259 259 searching: 2 queries
260 260 query 2; still undecided: 1029, sample size is: 11
261 261 sampling from both directions
262 262 searching: 3 queries
263 263 query 3; still undecided: 15, sample size is: 15
264 264 3 total queries
265 265 common heads: 7ead0cba2838
266 266
267 267
268 268 One with >200 heads, which used to use up all of the sample:
269 269
270 270 $ hg init manyheads
271 271 $ cd manyheads
272 272 $ echo "+300:r @a" >dagdesc
273 273 $ echo "*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3 *r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3" >>dagdesc # 20 heads
274 274 $ echo "*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3 *r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3" >>dagdesc # 20 heads
275 275 $ echo "*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3 *r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3" >>dagdesc # 20 heads
276 276 $ echo "*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3 *r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3" >>dagdesc # 20 heads
277 277 $ echo "*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3 *r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3" >>dagdesc # 20 heads
278 278 $ echo "*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3 *r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3" >>dagdesc # 20 heads
279 279 $ echo "*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3 *r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3" >>dagdesc # 20 heads
280 280 $ echo "*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3 *r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3" >>dagdesc # 20 heads
281 281 $ echo "*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3 *r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3" >>dagdesc # 20 heads
282 282 $ echo "*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3 *r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3" >>dagdesc # 20 heads
283 283 $ echo "*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3 *r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3" >>dagdesc # 20 heads
284 284 $ echo "*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3 *r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3" >>dagdesc # 20 heads
285 285 $ echo "*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3 *r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3*r+3" >>dagdesc # 20 heads
286 286 $ echo "@b *r+3" >>dagdesc # one more head
287 287 $ hg debugbuilddag <dagdesc
288 288 reading DAG from stdin
289 289
290 290 $ hg heads -t --template . | wc -c
291 291 \s*261 (re)
292 292
293 293 $ hg clone -b a . a
294 294 adding changesets
295 295 adding manifests
296 296 adding file changes
297 297 added 1340 changesets with 0 changes to 0 files (+259 heads)
298 298 updating to branch a
299 299 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
300 300 $ hg clone -b b . b
301 301 adding changesets
302 302 adding manifests
303 303 adding file changes
304 304 added 304 changesets with 0 changes to 0 files
305 305 updating to branch b
306 306 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
307 307
308 308 $ hg -R a debugdiscovery b --debug --verbose
309 309 comparing with b
310 310 query 1; heads
311 311 searching for changes
312 312 taking quick initial sample
313 313 searching: 2 queries
314 314 query 2; still undecided: 1240, sample size is: 100
315 315 sampling from both directions
316 316 searching: 3 queries
317 317 query 3; still undecided: 1140, sample size is: 200
318 318 sampling from both directions
319 319 searching: 4 queries
320 query 4; still undecided: 940, sample size is: 200
320 query 4; still undecided: 592, sample size is: 200
321 321 sampling from both directions
322 322 searching: 5 queries
323 query 5; still undecided: 740, sample size is: 200
323 query 5; still undecided: 292, sample size is: 200
324 324 sampling from both directions
325 325 searching: 6 queries
326 query 6; still undecided: 540, sample size is: 200
327 sampling from both directions
328 searching: 7 queries
329 query 7; still undecided: 46, sample size is: 46
330 7 total queries
326 query 6; still undecided: 51, sample size is: 51
327 6 total queries
331 328 common heads: 3ee37d65064a
332 329
333 330 Test actual protocol when pulling one new head in addition to common heads
334 331
335 332 $ hg clone -U b c
336 333 $ hg -R c id -ir tip
337 334 513314ca8b3a
338 335 $ hg -R c up -qr default
339 336 $ touch c/f
340 337 $ hg -R c ci -Aqm "extra head"
341 338 $ hg -R c id -i
342 339 e64a39e7da8b
343 340
344 341 $ hg serve -R c -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
345 342 $ cat hg.pid >> $DAEMON_PIDS
346 343
347 344 $ hg -R b incoming http://localhost:$HGPORT/ -T '{node|short}\n'
348 345 comparing with http://localhost:$HGPORT/
349 346 searching for changes
350 347 e64a39e7da8b
351 348
352 349 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS
353 350 $ cut -d' ' -f6- access.log | grep -v cmd=known # cmd=known uses random sampling
354 351 "GET /?cmd=capabilities HTTP/1.1" 200 -
355 352 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D513314ca8b3ae4dac8eec56966265b00fcf866db
356 353 "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:common=513314ca8b3ae4dac8eec56966265b00fcf866db&heads=e64a39e7da8b0d54bc63e81169aff001c13b3477
357 354 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases
358 355 $ cat errors.log
359 356
360 357 $ cd ..
361 358
362 359
363 360 Issue 4438 - test coverage for 3ef893520a85 issues.
364 361
365 362 $ mkdir issue4438
366 363 $ cd issue4438
367 364 #if false
368 365 generate new bundles:
369 366 $ hg init r1
370 367 $ for i in `seq 101`; do hg -R r1 up -qr null && hg -R r1 branch -q b$i && hg -R r1 ci -qmb$i; done
371 368 $ hg clone -q r1 r2
372 369 $ for i in `seq 10`; do hg -R r1 up -qr null && hg -R r1 branch -q c$i && hg -R r1 ci -qmc$i; done
373 370 $ hg -R r2 branch -q r2change && hg -R r2 ci -qmr2change
374 371 $ hg -R r1 bundle -qa $TESTDIR/bundles/issue4438-r1.hg
375 372 $ hg -R r2 bundle -qa $TESTDIR/bundles/issue4438-r2.hg
376 373 #else
377 374 use existing bundles:
378 375 $ hg clone -q $TESTDIR/bundles/issue4438-r1.hg r1
379 376 $ hg clone -q $TESTDIR/bundles/issue4438-r2.hg r2
380 377 #endif
381 378
382 379 Set iteration order could cause wrong and unstable results - fixed in 73cfaa348650:
383 380
384 381 $ hg -R r1 outgoing r2 -T'{rev} '
385 382 comparing with r2
386 383 searching for changes
387 384 101 102 103 104 105 106 107 108 109 110 (no-eol)
388 385
389 386 The case where all the 'initialsamplesize' samples already were common would
390 387 give 'all remote heads known locally' without checking the remaining heads -
391 388 fixed in 86c35b7ae300:
392 389
393 390 $ cat >> $TESTTMP/unrandomsample.py << EOF
394 391 > import random
395 392 > def sample(population, k):
396 393 > return sorted(population)[:k]
397 394 > random.sample = sample
398 395 > EOF
399 396
400 397 $ cat >> r1/.hg/hgrc << EOF
401 398 > [extensions]
402 399 > unrandomsample = $TESTTMP/unrandomsample.py
403 400 > EOF
404 401
405 402 $ hg -R r1 outgoing r2 -T'{rev} '
406 403 comparing with r2
407 404 searching for changes
408 405 101 102 103 104 105 106 107 108 109 110 (no-eol)
409 406 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now