Show More
@@ -1,918 +1,920 b'' | |||
|
1 | 1 | # streamclone.py - producing and consuming streaming repository data |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import contextlib |
|
11 | 11 | import errno |
|
12 | 12 | import os |
|
13 | 13 | import struct |
|
14 | 14 | |
|
15 | 15 | from .i18n import _ |
|
16 | 16 | from .pycompat import open |
|
17 | 17 | from .interfaces import repository |
|
18 | 18 | from . import ( |
|
19 | 19 | bookmarks, |
|
20 | 20 | cacheutil, |
|
21 | 21 | error, |
|
22 | 22 | narrowspec, |
|
23 | 23 | phases, |
|
24 | 24 | pycompat, |
|
25 | 25 | requirements as requirementsmod, |
|
26 | 26 | scmutil, |
|
27 | 27 | store, |
|
28 | 28 | util, |
|
29 | 29 | ) |
|
30 | 30 | from .utils import ( |
|
31 | 31 | stringutil, |
|
32 | 32 | ) |
|
33 | 33 | |
|
34 | 34 | |
|
35 | 35 | def canperformstreamclone(pullop, bundle2=False): |
|
36 | 36 | """Whether it is possible to perform a streaming clone as part of pull. |
|
37 | 37 | |
|
38 | 38 | ``bundle2`` will cause the function to consider stream clone through |
|
39 | 39 | bundle2 and only through bundle2. |
|
40 | 40 | |
|
41 | 41 | Returns a tuple of (supported, requirements). ``supported`` is True if |
|
42 | 42 | streaming clone is supported and False otherwise. ``requirements`` is |
|
43 | 43 | a set of repo requirements from the remote, or ``None`` if stream clone |
|
44 | 44 | isn't supported. |
|
45 | 45 | """ |
|
46 | 46 | repo = pullop.repo |
|
47 | 47 | remote = pullop.remote |
|
48 | 48 | |
|
49 | 49 | bundle2supported = False |
|
50 | 50 | if pullop.canusebundle2: |
|
51 | 51 | if b'v2' in pullop.remotebundle2caps.get(b'stream', []): |
|
52 | 52 | bundle2supported = True |
|
53 | 53 | # else |
|
54 | 54 | # Server doesn't support bundle2 stream clone or doesn't support |
|
55 | 55 | # the versions we support. Fall back and possibly allow legacy. |
|
56 | 56 | |
|
57 | 57 | # Ensures legacy code path uses available bundle2. |
|
58 | 58 | if bundle2supported and not bundle2: |
|
59 | 59 | return False, None |
|
60 | 60 | # Ensures bundle2 doesn't try to do a stream clone if it isn't supported. |
|
61 | 61 | elif bundle2 and not bundle2supported: |
|
62 | 62 | return False, None |
|
63 | 63 | |
|
64 | 64 | # Streaming clone only works on empty repositories. |
|
65 | 65 | if len(repo): |
|
66 | 66 | return False, None |
|
67 | 67 | |
|
68 | 68 | # Streaming clone only works if all data is being requested. |
|
69 | 69 | if pullop.heads: |
|
70 | 70 | return False, None |
|
71 | 71 | |
|
72 | 72 | streamrequested = pullop.streamclonerequested |
|
73 | 73 | |
|
74 | 74 | # If we don't have a preference, let the server decide for us. This |
|
75 | 75 | # likely only comes into play in LANs. |
|
76 | 76 | if streamrequested is None: |
|
77 | 77 | # The server can advertise whether to prefer streaming clone. |
|
78 | 78 | streamrequested = remote.capable(b'stream-preferred') |
|
79 | 79 | |
|
80 | 80 | if not streamrequested: |
|
81 | 81 | return False, None |
|
82 | 82 | |
|
83 | 83 | # In order for stream clone to work, the client has to support all the |
|
84 | 84 | # requirements advertised by the server. |
|
85 | 85 | # |
|
86 | 86 | # The server advertises its requirements via the "stream" and "streamreqs" |
|
87 | 87 | # capability. "stream" (a value-less capability) is advertised if and only |
|
88 | 88 | # if the only requirement is "revlogv1." Else, the "streamreqs" capability |
|
89 | 89 | # is advertised and contains a comma-delimited list of requirements. |
|
90 | 90 | requirements = set() |
|
91 | 91 | if remote.capable(b'stream'): |
|
92 | 92 | requirements.add(requirementsmod.REVLOGV1_REQUIREMENT) |
|
93 | 93 | else: |
|
94 | 94 | streamreqs = remote.capable(b'streamreqs') |
|
95 | 95 | # This is weird and shouldn't happen with modern servers. |
|
96 | 96 | if not streamreqs: |
|
97 | 97 | pullop.repo.ui.warn( |
|
98 | 98 | _( |
|
99 | 99 | b'warning: stream clone requested but server has them ' |
|
100 | 100 | b'disabled\n' |
|
101 | 101 | ) |
|
102 | 102 | ) |
|
103 | 103 | return False, None |
|
104 | 104 | |
|
105 | 105 | streamreqs = set(streamreqs.split(b',')) |
|
106 | 106 | # Server requires something we don't support. Bail. |
|
107 | 107 | missingreqs = streamreqs - repo.supportedformats |
|
108 | 108 | if missingreqs: |
|
109 | 109 | pullop.repo.ui.warn( |
|
110 | 110 | _( |
|
111 | 111 | b'warning: stream clone requested but client is missing ' |
|
112 | 112 | b'requirements: %s\n' |
|
113 | 113 | ) |
|
114 | 114 | % b', '.join(sorted(missingreqs)) |
|
115 | 115 | ) |
|
116 | 116 | pullop.repo.ui.warn( |
|
117 | 117 | _( |
|
118 | 118 | b'(see https://www.mercurial-scm.org/wiki/MissingRequirement ' |
|
119 | 119 | b'for more information)\n' |
|
120 | 120 | ) |
|
121 | 121 | ) |
|
122 | 122 | return False, None |
|
123 | 123 | requirements = streamreqs |
|
124 | 124 | |
|
125 | 125 | return True, requirements |
|
126 | 126 | |
|
127 | 127 | |
|
128 | 128 | def maybeperformlegacystreamclone(pullop): |
|
129 | 129 | """Possibly perform a legacy stream clone operation. |
|
130 | 130 | |
|
131 | 131 | Legacy stream clones are performed as part of pull but before all other |
|
132 | 132 | operations. |
|
133 | 133 | |
|
134 | 134 | A legacy stream clone will not be performed if a bundle2 stream clone is |
|
135 | 135 | supported. |
|
136 | 136 | """ |
|
137 | 137 | from . import localrepo |
|
138 | 138 | |
|
139 | 139 | supported, requirements = canperformstreamclone(pullop) |
|
140 | 140 | |
|
141 | 141 | if not supported: |
|
142 | 142 | return |
|
143 | 143 | |
|
144 | 144 | repo = pullop.repo |
|
145 | 145 | remote = pullop.remote |
|
146 | 146 | |
|
147 | 147 | # Save remote branchmap. We will use it later to speed up branchcache |
|
148 | 148 | # creation. |
|
149 | 149 | rbranchmap = None |
|
150 | 150 | if remote.capable(b'branchmap'): |
|
151 | 151 | with remote.commandexecutor() as e: |
|
152 | 152 | rbranchmap = e.callcommand(b'branchmap', {}).result() |
|
153 | 153 | |
|
154 | 154 | repo.ui.status(_(b'streaming all changes\n')) |
|
155 | 155 | |
|
156 | 156 | with remote.commandexecutor() as e: |
|
157 | 157 | fp = e.callcommand(b'stream_out', {}).result() |
|
158 | 158 | |
|
159 | 159 | # TODO strictly speaking, this code should all be inside the context |
|
160 | 160 | # manager because the context manager is supposed to ensure all wire state |
|
161 | 161 | # is flushed when exiting. But the legacy peers don't do this, so it |
|
162 | 162 | # doesn't matter. |
|
163 | 163 | l = fp.readline() |
|
164 | 164 | try: |
|
165 | 165 | resp = int(l) |
|
166 | 166 | except ValueError: |
|
167 | 167 | raise error.ResponseError( |
|
168 | 168 | _(b'unexpected response from remote server:'), l |
|
169 | 169 | ) |
|
170 | 170 | if resp == 1: |
|
171 | 171 | raise error.Abort(_(b'operation forbidden by server')) |
|
172 | 172 | elif resp == 2: |
|
173 | 173 | raise error.Abort(_(b'locking the remote repository failed')) |
|
174 | 174 | elif resp != 0: |
|
175 | 175 | raise error.Abort(_(b'the server sent an unknown error code')) |
|
176 | 176 | |
|
177 | 177 | l = fp.readline() |
|
178 | 178 | try: |
|
179 | 179 | filecount, bytecount = map(int, l.split(b' ', 1)) |
|
180 | 180 | except (ValueError, TypeError): |
|
181 | 181 | raise error.ResponseError( |
|
182 | 182 | _(b'unexpected response from remote server:'), l |
|
183 | 183 | ) |
|
184 | 184 | |
|
185 | 185 | with repo.lock(): |
|
186 | 186 | consumev1(repo, fp, filecount, bytecount) |
|
187 | 187 | |
|
188 | 188 | # new requirements = old non-format requirements + |
|
189 | 189 | # new format-related remote requirements |
|
190 | 190 | # requirements from the streamed-in repository |
|
191 | 191 | repo.requirements = requirements | ( |
|
192 | 192 | repo.requirements - repo.supportedformats |
|
193 | 193 | ) |
|
194 | 194 | repo.svfs.options = localrepo.resolvestorevfsoptions( |
|
195 | 195 | repo.ui, repo.requirements, repo.features |
|
196 | 196 | ) |
|
197 | 197 | scmutil.writereporequirements(repo) |
|
198 | 198 | |
|
199 | 199 | if rbranchmap: |
|
200 | 200 | repo._branchcaches.replace(repo, rbranchmap) |
|
201 | 201 | |
|
202 | 202 | repo.invalidate() |
|
203 | 203 | |
|
204 | 204 | |
|
205 | 205 | def allowservergeneration(repo): |
|
206 | 206 | """Whether streaming clones are allowed from the server.""" |
|
207 | 207 | if repository.REPO_FEATURE_STREAM_CLONE not in repo.features: |
|
208 | 208 | return False |
|
209 | 209 | |
|
210 | 210 | if not repo.ui.configbool(b'server', b'uncompressed', untrusted=True): |
|
211 | 211 | return False |
|
212 | 212 | |
|
213 | 213 | # The way stream clone works makes it impossible to hide secret changesets. |
|
214 | 214 | # So don't allow this by default. |
|
215 | 215 | secret = phases.hassecret(repo) |
|
216 | 216 | if secret: |
|
217 | 217 | return repo.ui.configbool(b'server', b'uncompressedallowsecret') |
|
218 | 218 | |
|
219 | 219 | return True |
|
220 | 220 | |
|
221 | 221 | |
|
222 | 222 | # This is it's own function so extensions can override it. |
|
223 | 223 | def _walkstreamfiles(repo, matcher=None): |
|
224 | 224 | return repo.store.walk(matcher) |
|
225 | 225 | |
|
226 | 226 | |
|
227 | 227 | def generatev1(repo): |
|
228 | 228 | """Emit content for version 1 of a streaming clone. |
|
229 | 229 | |
|
230 | 230 | This returns a 3-tuple of (file count, byte size, data iterator). |
|
231 | 231 | |
|
232 | 232 | The data iterator consists of N entries for each file being transferred. |
|
233 | 233 | Each file entry starts as a line with the file name and integer size |
|
234 | 234 | delimited by a null byte. |
|
235 | 235 | |
|
236 | 236 | The raw file data follows. Following the raw file data is the next file |
|
237 | 237 | entry, or EOF. |
|
238 | 238 | |
|
239 | 239 | When used on the wire protocol, an additional line indicating protocol |
|
240 | 240 | success will be prepended to the stream. This function is not responsible |
|
241 | 241 | for adding it. |
|
242 | 242 | |
|
243 | 243 | This function will obtain a repository lock to ensure a consistent view of |
|
244 | 244 | the store is captured. It therefore may raise LockError. |
|
245 | 245 | """ |
|
246 | 246 | entries = [] |
|
247 | 247 | total_bytes = 0 |
|
248 | 248 | # Get consistent snapshot of repo, lock during scan. |
|
249 | 249 | with repo.lock(): |
|
250 | 250 | repo.ui.debug(b'scanning\n') |
|
251 | 251 | for file_type, name, ename, size in _walkstreamfiles(repo): |
|
252 | 252 | if size: |
|
253 | 253 | entries.append((name, size)) |
|
254 | 254 | total_bytes += size |
|
255 | 255 | _test_sync_point_walk_1(repo) |
|
256 | 256 | _test_sync_point_walk_2(repo) |
|
257 | 257 | |
|
258 | 258 | repo.ui.debug( |
|
259 | 259 | b'%d files, %d bytes to transfer\n' % (len(entries), total_bytes) |
|
260 | 260 | ) |
|
261 | 261 | |
|
262 | 262 | svfs = repo.svfs |
|
263 | 263 | debugflag = repo.ui.debugflag |
|
264 | 264 | |
|
265 | 265 | def emitrevlogdata(): |
|
266 | 266 | for name, size in entries: |
|
267 | 267 | if debugflag: |
|
268 | 268 | repo.ui.debug(b'sending %s (%d bytes)\n' % (name, size)) |
|
269 | 269 | # partially encode name over the wire for backwards compat |
|
270 | 270 | yield b'%s\0%d\n' % (store.encodedir(name), size) |
|
271 | 271 | # auditing at this stage is both pointless (paths are already |
|
272 | 272 | # trusted by the local repo) and expensive |
|
273 | 273 | with svfs(name, b'rb', auditpath=False) as fp: |
|
274 | 274 | if size <= 65536: |
|
275 | 275 | yield fp.read(size) |
|
276 | 276 | else: |
|
277 | 277 | for chunk in util.filechunkiter(fp, limit=size): |
|
278 | 278 | yield chunk |
|
279 | 279 | |
|
280 | 280 | return len(entries), total_bytes, emitrevlogdata() |
|
281 | 281 | |
|
282 | 282 | |
|
283 | 283 | def generatev1wireproto(repo): |
|
284 | 284 | """Emit content for version 1 of streaming clone suitable for the wire. |
|
285 | 285 | |
|
286 | 286 | This is the data output from ``generatev1()`` with 2 header lines. The |
|
287 | 287 | first line indicates overall success. The 2nd contains the file count and |
|
288 | 288 | byte size of payload. |
|
289 | 289 | |
|
290 | 290 | The success line contains "0" for success, "1" for stream generation not |
|
291 | 291 | allowed, and "2" for error locking the repository (possibly indicating |
|
292 | 292 | a permissions error for the server process). |
|
293 | 293 | """ |
|
294 | 294 | if not allowservergeneration(repo): |
|
295 | 295 | yield b'1\n' |
|
296 | 296 | return |
|
297 | 297 | |
|
298 | 298 | try: |
|
299 | 299 | filecount, bytecount, it = generatev1(repo) |
|
300 | 300 | except error.LockError: |
|
301 | 301 | yield b'2\n' |
|
302 | 302 | return |
|
303 | 303 | |
|
304 | 304 | # Indicates successful response. |
|
305 | 305 | yield b'0\n' |
|
306 | 306 | yield b'%d %d\n' % (filecount, bytecount) |
|
307 | 307 | for chunk in it: |
|
308 | 308 | yield chunk |
|
309 | 309 | |
|
310 | 310 | |
|
311 | 311 | def generatebundlev1(repo, compression=b'UN'): |
|
312 | 312 | """Emit content for version 1 of a stream clone bundle. |
|
313 | 313 | |
|
314 | 314 | The first 4 bytes of the output ("HGS1") denote this as stream clone |
|
315 | 315 | bundle version 1. |
|
316 | 316 | |
|
317 | 317 | The next 2 bytes indicate the compression type. Only "UN" is currently |
|
318 | 318 | supported. |
|
319 | 319 | |
|
320 | 320 | The next 16 bytes are two 64-bit big endian unsigned integers indicating |
|
321 | 321 | file count and byte count, respectively. |
|
322 | 322 | |
|
323 | 323 | The next 2 bytes is a 16-bit big endian unsigned short declaring the length |
|
324 | 324 | of the requirements string, including a trailing \0. The following N bytes |
|
325 | 325 | are the requirements string, which is ASCII containing a comma-delimited |
|
326 | 326 | list of repo requirements that are needed to support the data. |
|
327 | 327 | |
|
328 | 328 | The remaining content is the output of ``generatev1()`` (which may be |
|
329 | 329 | compressed in the future). |
|
330 | 330 | |
|
331 | 331 | Returns a tuple of (requirements, data generator). |
|
332 | 332 | """ |
|
333 | 333 | if compression != b'UN': |
|
334 | 334 | raise ValueError(b'we do not support the compression argument yet') |
|
335 | 335 | |
|
336 | 336 | requirements = repo.requirements & repo.supportedformats |
|
337 | 337 | requires = b','.join(sorted(requirements)) |
|
338 | 338 | |
|
339 | 339 | def gen(): |
|
340 | 340 | yield b'HGS1' |
|
341 | 341 | yield compression |
|
342 | 342 | |
|
343 | 343 | filecount, bytecount, it = generatev1(repo) |
|
344 | 344 | repo.ui.status( |
|
345 | 345 | _(b'writing %d bytes for %d files\n') % (bytecount, filecount) |
|
346 | 346 | ) |
|
347 | 347 | |
|
348 | 348 | yield struct.pack(b'>QQ', filecount, bytecount) |
|
349 | 349 | yield struct.pack(b'>H', len(requires) + 1) |
|
350 | 350 | yield requires + b'\0' |
|
351 | 351 | |
|
352 | 352 | # This is where we'll add compression in the future. |
|
353 | 353 | assert compression == b'UN' |
|
354 | 354 | |
|
355 | 355 | progress = repo.ui.makeprogress( |
|
356 | 356 | _(b'bundle'), total=bytecount, unit=_(b'bytes') |
|
357 | 357 | ) |
|
358 | 358 | progress.update(0) |
|
359 | 359 | |
|
360 | 360 | for chunk in it: |
|
361 | 361 | progress.increment(step=len(chunk)) |
|
362 | 362 | yield chunk |
|
363 | 363 | |
|
364 | 364 | progress.complete() |
|
365 | 365 | |
|
366 | 366 | return requirements, gen() |
|
367 | 367 | |
|
368 | 368 | |
|
369 | 369 | def consumev1(repo, fp, filecount, bytecount): |
|
370 | 370 | """Apply the contents from version 1 of a streaming clone file handle. |
|
371 | 371 | |
|
372 | 372 | This takes the output from "stream_out" and applies it to the specified |
|
373 | 373 | repository. |
|
374 | 374 | |
|
375 | 375 | Like "stream_out," the status line added by the wire protocol is not |
|
376 | 376 | handled by this function. |
|
377 | 377 | """ |
|
378 | 378 | with repo.lock(): |
|
379 | 379 | repo.ui.status( |
|
380 | 380 | _(b'%d files to transfer, %s of data\n') |
|
381 | 381 | % (filecount, util.bytecount(bytecount)) |
|
382 | 382 | ) |
|
383 | 383 | progress = repo.ui.makeprogress( |
|
384 | 384 | _(b'clone'), total=bytecount, unit=_(b'bytes') |
|
385 | 385 | ) |
|
386 | 386 | progress.update(0) |
|
387 | 387 | start = util.timer() |
|
388 | 388 | |
|
389 | 389 | # TODO: get rid of (potential) inconsistency |
|
390 | 390 | # |
|
391 | 391 | # If transaction is started and any @filecache property is |
|
392 | 392 | # changed at this point, it causes inconsistency between |
|
393 | 393 | # in-memory cached property and streamclone-ed file on the |
|
394 | 394 | # disk. Nested transaction prevents transaction scope "clone" |
|
395 | 395 | # below from writing in-memory changes out at the end of it, |
|
396 | 396 | # even though in-memory changes are discarded at the end of it |
|
397 | 397 | # regardless of transaction nesting. |
|
398 | 398 | # |
|
399 | 399 | # But transaction nesting can't be simply prohibited, because |
|
400 | 400 | # nesting occurs also in ordinary case (e.g. enabling |
|
401 | 401 | # clonebundles). |
|
402 | 402 | |
|
403 | 403 | with repo.transaction(b'clone'): |
|
404 | 404 | with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount): |
|
405 | 405 | for i in pycompat.xrange(filecount): |
|
406 | 406 | # XXX doesn't support '\n' or '\r' in filenames |
|
407 | 407 | l = fp.readline() |
|
408 | 408 | try: |
|
409 | 409 | name, size = l.split(b'\0', 1) |
|
410 | 410 | size = int(size) |
|
411 | 411 | except (ValueError, TypeError): |
|
412 | 412 | raise error.ResponseError( |
|
413 | 413 | _(b'unexpected response from remote server:'), l |
|
414 | 414 | ) |
|
415 | 415 | if repo.ui.debugflag: |
|
416 | 416 | repo.ui.debug( |
|
417 | 417 | b'adding %s (%s)\n' % (name, util.bytecount(size)) |
|
418 | 418 | ) |
|
419 | 419 | # for backwards compat, name was partially encoded |
|
420 | 420 | path = store.decodedir(name) |
|
421 | 421 | with repo.svfs(path, b'w', backgroundclose=True) as ofp: |
|
422 | 422 | for chunk in util.filechunkiter(fp, limit=size): |
|
423 | 423 | progress.increment(step=len(chunk)) |
|
424 | 424 | ofp.write(chunk) |
|
425 | 425 | |
|
426 | 426 | # force @filecache properties to be reloaded from |
|
427 | 427 | # streamclone-ed file at next access |
|
428 | 428 | repo.invalidate(clearfilecache=True) |
|
429 | 429 | |
|
430 | 430 | elapsed = util.timer() - start |
|
431 | 431 | if elapsed <= 0: |
|
432 | 432 | elapsed = 0.001 |
|
433 | 433 | progress.complete() |
|
434 | 434 | repo.ui.status( |
|
435 | 435 | _(b'transferred %s in %.1f seconds (%s/sec)\n') |
|
436 | 436 | % ( |
|
437 | 437 | util.bytecount(bytecount), |
|
438 | 438 | elapsed, |
|
439 | 439 | util.bytecount(bytecount / elapsed), |
|
440 | 440 | ) |
|
441 | 441 | ) |
|
442 | 442 | |
|
443 | 443 | |
|
444 | 444 | def readbundle1header(fp): |
|
445 | 445 | compression = fp.read(2) |
|
446 | 446 | if compression != b'UN': |
|
447 | 447 | raise error.Abort( |
|
448 | 448 | _( |
|
449 | 449 | b'only uncompressed stream clone bundles are ' |
|
450 | 450 | b'supported; got %s' |
|
451 | 451 | ) |
|
452 | 452 | % compression |
|
453 | 453 | ) |
|
454 | 454 | |
|
455 | 455 | filecount, bytecount = struct.unpack(b'>QQ', fp.read(16)) |
|
456 | 456 | requireslen = struct.unpack(b'>H', fp.read(2))[0] |
|
457 | 457 | requires = fp.read(requireslen) |
|
458 | 458 | |
|
459 | 459 | if not requires.endswith(b'\0'): |
|
460 | 460 | raise error.Abort( |
|
461 | 461 | _( |
|
462 | 462 | b'malformed stream clone bundle: ' |
|
463 | 463 | b'requirements not properly encoded' |
|
464 | 464 | ) |
|
465 | 465 | ) |
|
466 | 466 | |
|
467 | 467 | requirements = set(requires.rstrip(b'\0').split(b',')) |
|
468 | 468 | |
|
469 | 469 | return filecount, bytecount, requirements |
|
470 | 470 | |
|
471 | 471 | |
|
472 | 472 | def applybundlev1(repo, fp): |
|
473 | 473 | """Apply the content from a stream clone bundle version 1. |
|
474 | 474 | |
|
475 | 475 | We assume the 4 byte header has been read and validated and the file handle |
|
476 | 476 | is at the 2 byte compression identifier. |
|
477 | 477 | """ |
|
478 | 478 | if len(repo): |
|
479 | 479 | raise error.Abort( |
|
480 | 480 | _(b'cannot apply stream clone bundle on non-empty repo') |
|
481 | 481 | ) |
|
482 | 482 | |
|
483 | 483 | filecount, bytecount, requirements = readbundle1header(fp) |
|
484 | 484 | missingreqs = requirements - repo.supportedformats |
|
485 | 485 | if missingreqs: |
|
486 | 486 | raise error.Abort( |
|
487 | 487 | _(b'unable to apply stream clone: unsupported format: %s') |
|
488 | 488 | % b', '.join(sorted(missingreqs)) |
|
489 | 489 | ) |
|
490 | 490 | |
|
491 | 491 | consumev1(repo, fp, filecount, bytecount) |
|
492 | 492 | |
|
493 | 493 | |
|
494 | 494 | class streamcloneapplier(object): |
|
495 | 495 | """Class to manage applying streaming clone bundles. |
|
496 | 496 | |
|
497 | 497 | We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle |
|
498 | 498 | readers to perform bundle type-specific functionality. |
|
499 | 499 | """ |
|
500 | 500 | |
|
501 | 501 | def __init__(self, fh): |
|
502 | 502 | self._fh = fh |
|
503 | 503 | |
|
504 | 504 | def apply(self, repo): |
|
505 | 505 | return applybundlev1(repo, self._fh) |
|
506 | 506 | |
|
507 | 507 | |
|
508 | 508 | # type of file to stream |
|
509 | 509 | _fileappend = 0 # append only file |
|
510 | 510 | _filefull = 1 # full snapshot file |
|
511 | 511 | |
|
512 | 512 | # Source of the file |
|
513 | 513 | _srcstore = b's' # store (svfs) |
|
514 | 514 | _srccache = b'c' # cache (cache) |
|
515 | 515 | |
|
516 | 516 | # This is it's own function so extensions can override it. |
|
517 | 517 | def _walkstreamfullstorefiles(repo): |
|
518 | 518 | """list snapshot file from the store""" |
|
519 | 519 | fnames = [] |
|
520 | 520 | if not repo.publishing(): |
|
521 | 521 | fnames.append(b'phaseroots') |
|
522 | 522 | return fnames |
|
523 | 523 | |
|
524 | 524 | |
|
525 | 525 | def _filterfull(entry, copy, vfsmap): |
|
526 | 526 | """actually copy the snapshot files""" |
|
527 | 527 | src, name, ftype, data = entry |
|
528 | 528 | if ftype != _filefull: |
|
529 | 529 | return entry |
|
530 | 530 | return (src, name, ftype, copy(vfsmap[src].join(name))) |
|
531 | 531 | |
|
532 | 532 | |
|
533 | 533 | @contextlib.contextmanager |
|
534 | 534 | def maketempcopies(): |
|
535 | 535 | """return a function to temporary copy file""" |
|
536 | 536 | files = [] |
|
537 | 537 | try: |
|
538 | 538 | |
|
539 | 539 | def copy(src): |
|
540 | 540 | fd, dst = pycompat.mkstemp() |
|
541 | 541 | os.close(fd) |
|
542 | 542 | files.append(dst) |
|
543 | 543 | util.copyfiles(src, dst, hardlink=True) |
|
544 | 544 | return dst |
|
545 | 545 | |
|
546 | 546 | yield copy |
|
547 | 547 | finally: |
|
548 | 548 | for tmp in files: |
|
549 | 549 | util.tryunlink(tmp) |
|
550 | 550 | |
|
551 | 551 | |
|
552 | 552 | def _makemap(repo): |
|
553 | 553 | """make a (src -> vfs) map for the repo""" |
|
554 | 554 | vfsmap = { |
|
555 | 555 | _srcstore: repo.svfs, |
|
556 | 556 | _srccache: repo.cachevfs, |
|
557 | 557 | } |
|
558 | 558 | # we keep repo.vfs out of the on purpose, ther are too many danger there |
|
559 | 559 | # (eg: .hg/hgrc) |
|
560 | 560 | assert repo.vfs not in vfsmap.values() |
|
561 | 561 | |
|
562 | 562 | return vfsmap |
|
563 | 563 | |
|
564 | 564 | |
|
565 | 565 | def _emit2(repo, entries, totalfilesize): |
|
566 | 566 | """actually emit the stream bundle""" |
|
567 | 567 | vfsmap = _makemap(repo) |
|
568 | 568 | # we keep repo.vfs out of the on purpose, ther are too many danger there |
|
569 | 569 | # (eg: .hg/hgrc), |
|
570 | 570 | # |
|
571 | 571 | # this assert is duplicated (from _makemap) as author might think this is |
|
572 | 572 | # fine, while this is really not fine. |
|
573 | 573 | if repo.vfs in vfsmap.values(): |
|
574 | 574 | raise error.ProgrammingError( |
|
575 | 575 | b'repo.vfs must not be added to vfsmap for security reasons' |
|
576 | 576 | ) |
|
577 | 577 | |
|
578 | 578 | progress = repo.ui.makeprogress( |
|
579 | 579 | _(b'bundle'), total=totalfilesize, unit=_(b'bytes') |
|
580 | 580 | ) |
|
581 | 581 | progress.update(0) |
|
582 | 582 | with maketempcopies() as copy, progress: |
|
583 | 583 | # copy is delayed until we are in the try |
|
584 | 584 | entries = [_filterfull(e, copy, vfsmap) for e in entries] |
|
585 | 585 | yield None # this release the lock on the repository |
|
586 | 586 | totalbytecount = 0 |
|
587 | 587 | |
|
588 | 588 | for src, name, ftype, data in entries: |
|
589 | 589 | vfs = vfsmap[src] |
|
590 | 590 | yield src |
|
591 | 591 | yield util.uvarintencode(len(name)) |
|
592 | 592 | if ftype == _fileappend: |
|
593 | 593 | fp = vfs(name) |
|
594 | 594 | size = data |
|
595 | 595 | elif ftype == _filefull: |
|
596 | 596 | fp = open(data, b'rb') |
|
597 | 597 | size = util.fstat(fp).st_size |
|
598 | 598 | bytecount = 0 |
|
599 | 599 | try: |
|
600 | 600 | yield util.uvarintencode(size) |
|
601 | 601 | yield name |
|
602 | 602 | if size <= 65536: |
|
603 | 603 | chunks = (fp.read(size),) |
|
604 | 604 | else: |
|
605 | 605 | chunks = util.filechunkiter(fp, limit=size) |
|
606 | 606 | for chunk in chunks: |
|
607 | 607 | bytecount += len(chunk) |
|
608 | 608 | totalbytecount += len(chunk) |
|
609 | 609 | progress.update(totalbytecount) |
|
610 | 610 | yield chunk |
|
611 | 611 | if bytecount != size: |
|
612 | 612 | # Would most likely be caused by a race due to `hg strip` or |
|
613 | 613 | # a revlog split |
|
614 | 614 | raise error.Abort( |
|
615 | 615 | _( |
|
616 | 616 | b'clone could only read %d bytes from %s, but ' |
|
617 | 617 | b'expected %d bytes' |
|
618 | 618 | ) |
|
619 | 619 | % (bytecount, name, size) |
|
620 | 620 | ) |
|
621 | 621 | finally: |
|
622 | 622 | fp.close() |
|
623 | 623 | |
|
624 | 624 | |
|
625 | 625 | def _test_sync_point_walk_1(repo): |
|
626 | 626 | """a function for synchronisation during tests""" |
|
627 | 627 | |
|
628 | 628 | |
|
629 | 629 | def _test_sync_point_walk_2(repo): |
|
630 | 630 | """a function for synchronisation during tests""" |
|
631 | 631 | |
|
632 | 632 | |
|
633 | 633 | def _v2_walk(repo, includes, excludes, includeobsmarkers): |
|
634 | 634 | """emit a seris of files information useful to clone a repo |
|
635 | 635 | |
|
636 | 636 | return (entries, totalfilesize) |
|
637 | 637 | |
|
638 | 638 | entries is a list of tuple (vfs-key, file-path, file-type, size) |
|
639 | 639 | |
|
640 | 640 | - `vfs-key`: is a key to the right vfs to write the file (see _makemap) |
|
641 | 641 | - `name`: file path of the file to copy (to be feed to the vfss) |
|
642 | 642 | - `file-type`: do this file need to be copied with the source lock ? |
|
643 | 643 | - `size`: the size of the file (or None) |
|
644 | 644 | """ |
|
645 | 645 | assert repo._currentlock(repo._lockref) is not None |
|
646 | 646 | entries = [] |
|
647 | 647 | totalfilesize = 0 |
|
648 | 648 | |
|
649 | 649 | matcher = None |
|
650 | 650 | if includes or excludes: |
|
651 | 651 | matcher = narrowspec.match(repo.root, includes, excludes) |
|
652 | 652 | |
|
653 | 653 | for rl_type, name, ename, size in _walkstreamfiles(repo, matcher): |
|
654 | 654 | if size: |
|
655 | 655 | ft = _fileappend |
|
656 | 656 | if rl_type & store.FILEFLAGS_VOLATILE: |
|
657 | 657 | ft = _filefull |
|
658 | 658 | entries.append((_srcstore, name, ft, size)) |
|
659 | 659 | totalfilesize += size |
|
660 | 660 | for name in _walkstreamfullstorefiles(repo): |
|
661 | 661 | if repo.svfs.exists(name): |
|
662 | 662 | totalfilesize += repo.svfs.lstat(name).st_size |
|
663 | 663 | entries.append((_srcstore, name, _filefull, None)) |
|
664 | 664 | if includeobsmarkers and repo.svfs.exists(b'obsstore'): |
|
665 | 665 | totalfilesize += repo.svfs.lstat(b'obsstore').st_size |
|
666 | 666 | entries.append((_srcstore, b'obsstore', _filefull, None)) |
|
667 | 667 | for name in cacheutil.cachetocopy(repo): |
|
668 | 668 | if repo.cachevfs.exists(name): |
|
669 | 669 | totalfilesize += repo.cachevfs.lstat(name).st_size |
|
670 | 670 | entries.append((_srccache, name, _filefull, None)) |
|
671 | 671 | return entries, totalfilesize |
|
672 | 672 | |
|
673 | 673 | |
|
674 | 674 | def generatev2(repo, includes, excludes, includeobsmarkers): |
|
675 | 675 | """Emit content for version 2 of a streaming clone. |
|
676 | 676 | |
|
677 | 677 | the data stream consists the following entries: |
|
678 | 678 | 1) A char representing the file destination (eg: store or cache) |
|
679 | 679 | 2) A varint containing the length of the filename |
|
680 | 680 | 3) A varint containing the length of file data |
|
681 | 681 | 4) N bytes containing the filename (the internal, store-agnostic form) |
|
682 | 682 | 5) N bytes containing the file data |
|
683 | 683 | |
|
684 | 684 | Returns a 3-tuple of (file count, file size, data iterator). |
|
685 | 685 | """ |
|
686 | 686 | |
|
687 | 687 | with repo.lock(): |
|
688 | 688 | |
|
689 | 689 | repo.ui.debug(b'scanning\n') |
|
690 | 690 | |
|
691 | 691 | entries, totalfilesize = _v2_walk( |
|
692 | 692 | repo, |
|
693 | 693 | includes=includes, |
|
694 | 694 | excludes=excludes, |
|
695 | 695 | includeobsmarkers=includeobsmarkers, |
|
696 | 696 | ) |
|
697 | 697 | |
|
698 | 698 | chunks = _emit2(repo, entries, totalfilesize) |
|
699 | 699 | first = next(chunks) |
|
700 | 700 | assert first is None |
|
701 | 701 | _test_sync_point_walk_1(repo) |
|
702 | 702 | _test_sync_point_walk_2(repo) |
|
703 | 703 | |
|
704 | 704 | return len(entries), totalfilesize, chunks |
|
705 | 705 | |
|
706 | 706 | |
|
707 | 707 | @contextlib.contextmanager |
|
708 | 708 | def nested(*ctxs): |
|
709 | 709 | this = ctxs[0] |
|
710 | 710 | rest = ctxs[1:] |
|
711 | 711 | with this: |
|
712 | 712 | if rest: |
|
713 | 713 | with nested(*rest): |
|
714 | 714 | yield |
|
715 | 715 | else: |
|
716 | 716 | yield |
|
717 | 717 | |
|
718 | 718 | |
|
719 | 719 | def consumev2(repo, fp, filecount, filesize): |
|
720 | 720 | """Apply the contents from a version 2 streaming clone. |
|
721 | 721 | |
|
722 | 722 | Data is read from an object that only needs to provide a ``read(size)`` |
|
723 | 723 | method. |
|
724 | 724 | """ |
|
725 | 725 | with repo.lock(): |
|
726 | 726 | repo.ui.status( |
|
727 | 727 | _(b'%d files to transfer, %s of data\n') |
|
728 | 728 | % (filecount, util.bytecount(filesize)) |
|
729 | 729 | ) |
|
730 | 730 | |
|
731 | 731 | start = util.timer() |
|
732 | 732 | progress = repo.ui.makeprogress( |
|
733 | 733 | _(b'clone'), total=filesize, unit=_(b'bytes') |
|
734 | 734 | ) |
|
735 | 735 | progress.update(0) |
|
736 | 736 | |
|
737 | 737 | vfsmap = _makemap(repo) |
|
738 | 738 | # we keep repo.vfs out of the on purpose, ther are too many danger |
|
739 | 739 | # there (eg: .hg/hgrc), |
|
740 | 740 | # |
|
741 | 741 | # this assert is duplicated (from _makemap) as author might think this |
|
742 | 742 | # is fine, while this is really not fine. |
|
743 | 743 | if repo.vfs in vfsmap.values(): |
|
744 | 744 | raise error.ProgrammingError( |
|
745 | 745 | b'repo.vfs must not be added to vfsmap for security reasons' |
|
746 | 746 | ) |
|
747 | 747 | |
|
748 | 748 | with repo.transaction(b'clone'): |
|
749 | 749 | ctxs = (vfs.backgroundclosing(repo.ui) for vfs in vfsmap.values()) |
|
750 | 750 | with nested(*ctxs): |
|
751 | 751 | for i in range(filecount): |
|
752 | 752 | src = util.readexactly(fp, 1) |
|
753 | 753 | vfs = vfsmap[src] |
|
754 | 754 | namelen = util.uvarintdecodestream(fp) |
|
755 | 755 | datalen = util.uvarintdecodestream(fp) |
|
756 | 756 | |
|
757 | 757 | name = util.readexactly(fp, namelen) |
|
758 | 758 | |
|
759 | 759 | if repo.ui.debugflag: |
|
760 | 760 | repo.ui.debug( |
|
761 | 761 | b'adding [%s] %s (%s)\n' |
|
762 | 762 | % (src, name, util.bytecount(datalen)) |
|
763 | 763 | ) |
|
764 | 764 | |
|
765 | 765 | with vfs(name, b'w') as ofp: |
|
766 | 766 | for chunk in util.filechunkiter(fp, limit=datalen): |
|
767 | 767 | progress.increment(step=len(chunk)) |
|
768 | 768 | ofp.write(chunk) |
|
769 | 769 | |
|
770 | 770 | # force @filecache properties to be reloaded from |
|
771 | 771 | # streamclone-ed file at next access |
|
772 | 772 | repo.invalidate(clearfilecache=True) |
|
773 | 773 | |
|
774 | 774 | elapsed = util.timer() - start |
|
775 | 775 | if elapsed <= 0: |
|
776 | 776 | elapsed = 0.001 |
|
777 | 777 | repo.ui.status( |
|
778 | 778 | _(b'transferred %s in %.1f seconds (%s/sec)\n') |
|
779 | 779 | % ( |
|
780 | 780 | util.bytecount(progress.pos), |
|
781 | 781 | elapsed, |
|
782 | 782 | util.bytecount(progress.pos / elapsed), |
|
783 | 783 | ) |
|
784 | 784 | ) |
|
785 | 785 | progress.complete() |
|
786 | 786 | |
|
787 | 787 | |
|
788 | 788 | def applybundlev2(repo, fp, filecount, filesize, requirements): |
|
789 | 789 | from . import localrepo |
|
790 | 790 | |
|
791 | 791 | missingreqs = [r for r in requirements if r not in repo.supported] |
|
792 | 792 | if missingreqs: |
|
793 | 793 | raise error.Abort( |
|
794 | 794 | _(b'unable to apply stream clone: unsupported format: %s') |
|
795 | 795 | % b', '.join(sorted(missingreqs)) |
|
796 | 796 | ) |
|
797 | 797 | |
|
798 | 798 | consumev2(repo, fp, filecount, filesize) |
|
799 | 799 | |
|
800 | 800 | # new requirements = old non-format requirements + |
|
801 | 801 | # new format-related remote requirements |
|
802 | 802 | # requirements from the streamed-in repository |
|
803 | 803 | repo.requirements = set(requirements) | ( |
|
804 | 804 | repo.requirements - repo.supportedformats |
|
805 | 805 | ) |
|
806 | 806 | repo.svfs.options = localrepo.resolvestorevfsoptions( |
|
807 | 807 | repo.ui, repo.requirements, repo.features |
|
808 | 808 | ) |
|
809 | 809 | scmutil.writereporequirements(repo) |
|
810 | 810 | |
|
811 | 811 | |
|
812 | 812 | def _copy_files(src_vfs_map, dst_vfs_map, entries, progress): |
|
813 | 813 | hardlink = [True] |
|
814 | 814 | |
|
815 | 815 | def copy_used(): |
|
816 | 816 | hardlink[0] = False |
|
817 | 817 | progress.topic = _(b'copying') |
|
818 | 818 | |
|
819 | 819 | for k, path, size in entries: |
|
820 | 820 | src_vfs = src_vfs_map[k] |
|
821 | 821 | dst_vfs = dst_vfs_map[k] |
|
822 | 822 | src_path = src_vfs.join(path) |
|
823 | 823 | dst_path = dst_vfs.join(path) |
|
824 | dirname = dst_vfs.dirname(path) | |
|
825 | if not dst_vfs.exists(dirname): | |
|
826 | dst_vfs.makedirs(dirname) | |
|
824 | # We cannot use dirname and makedirs of dst_vfs here because the store | |
|
825 | # encoding confuses them. See issue 6581 for details. | |
|
826 | dirname = os.path.dirname(dst_path) | |
|
827 | if not os.path.exists(dirname): | |
|
828 | util.makedirs(dirname) | |
|
827 | 829 | dst_vfs.register_file(path) |
|
828 | 830 | # XXX we could use the #nb_bytes argument. |
|
829 | 831 | util.copyfile( |
|
830 | 832 | src_path, |
|
831 | 833 | dst_path, |
|
832 | 834 | hardlink=hardlink[0], |
|
833 | 835 | no_hardlink_cb=copy_used, |
|
834 | 836 | check_fs_hardlink=False, |
|
835 | 837 | ) |
|
836 | 838 | progress.increment() |
|
837 | 839 | return hardlink[0] |
|
838 | 840 | |
|
839 | 841 | |
|
840 | 842 | def local_copy(src_repo, dest_repo): |
|
841 | 843 | """copy all content from one local repository to another |
|
842 | 844 | |
|
843 | 845 | This is useful for local clone""" |
|
844 | 846 | src_store_requirements = { |
|
845 | 847 | r |
|
846 | 848 | for r in src_repo.requirements |
|
847 | 849 | if r not in requirementsmod.WORKING_DIR_REQUIREMENTS |
|
848 | 850 | } |
|
849 | 851 | dest_store_requirements = { |
|
850 | 852 | r |
|
851 | 853 | for r in dest_repo.requirements |
|
852 | 854 | if r not in requirementsmod.WORKING_DIR_REQUIREMENTS |
|
853 | 855 | } |
|
854 | 856 | assert src_store_requirements == dest_store_requirements |
|
855 | 857 | |
|
856 | 858 | with dest_repo.lock(): |
|
857 | 859 | with src_repo.lock(): |
|
858 | 860 | |
|
859 | 861 | # bookmark is not integrated to the streaming as it might use the |
|
860 | 862 | # `repo.vfs` and they are too many sentitive data accessible |
|
861 | 863 | # through `repo.vfs` to expose it to streaming clone. |
|
862 | 864 | src_book_vfs = bookmarks.bookmarksvfs(src_repo) |
|
863 | 865 | srcbookmarks = src_book_vfs.join(b'bookmarks') |
|
864 | 866 | bm_count = 0 |
|
865 | 867 | if os.path.exists(srcbookmarks): |
|
866 | 868 | bm_count = 1 |
|
867 | 869 | |
|
868 | 870 | entries, totalfilesize = _v2_walk( |
|
869 | 871 | src_repo, |
|
870 | 872 | includes=None, |
|
871 | 873 | excludes=None, |
|
872 | 874 | includeobsmarkers=True, |
|
873 | 875 | ) |
|
874 | 876 | src_vfs_map = _makemap(src_repo) |
|
875 | 877 | dest_vfs_map = _makemap(dest_repo) |
|
876 | 878 | progress = src_repo.ui.makeprogress( |
|
877 | 879 | topic=_(b'linking'), |
|
878 | 880 | total=len(entries) + bm_count, |
|
879 | 881 | unit=_(b'files'), |
|
880 | 882 | ) |
|
881 | 883 | # copy files |
|
882 | 884 | # |
|
883 | 885 | # We could copy the full file while the source repository is locked |
|
884 | 886 | # and the other one without the lock. However, in the linking case, |
|
885 | 887 | # this would also requires checks that nobody is appending any data |
|
886 | 888 | # to the files while we do the clone, so this is not done yet. We |
|
887 | 889 | # could do this blindly when copying files. |
|
888 | 890 | files = ((k, path, size) for k, path, ftype, size in entries) |
|
889 | 891 | hardlink = _copy_files(src_vfs_map, dest_vfs_map, files, progress) |
|
890 | 892 | |
|
891 | 893 | # copy bookmarks over |
|
892 | 894 | if bm_count: |
|
893 | 895 | dst_book_vfs = bookmarks.bookmarksvfs(dest_repo) |
|
894 | 896 | dstbookmarks = dst_book_vfs.join(b'bookmarks') |
|
895 | 897 | util.copyfile(srcbookmarks, dstbookmarks) |
|
896 | 898 | progress.complete() |
|
897 | 899 | if hardlink: |
|
898 | 900 | msg = b'linked %d files\n' |
|
899 | 901 | else: |
|
900 | 902 | msg = b'copied %d files\n' |
|
901 | 903 | src_repo.ui.debug(msg % (len(entries) + bm_count)) |
|
902 | 904 | |
|
903 | 905 | with dest_repo.transaction(b"localclone") as tr: |
|
904 | 906 | dest_repo.store.write(tr) |
|
905 | 907 | |
|
906 | 908 | # clean up transaction file as they do not make sense |
|
907 | 909 | undo_files = [(dest_repo.svfs, b'undo.backupfiles')] |
|
908 | 910 | undo_files.extend(dest_repo.undofiles()) |
|
909 | 911 | for undovfs, undofile in undo_files: |
|
910 | 912 | try: |
|
911 | 913 | undovfs.unlink(undofile) |
|
912 | 914 | except OSError as e: |
|
913 | 915 | if e.errno != errno.ENOENT: |
|
914 | 916 | msg = _(b'error removing %s: %s\n') |
|
915 | 917 | path = undovfs.join(undofile) |
|
916 | 918 | e_msg = stringutil.forcebytestr(e) |
|
917 | 919 | msg %= (path, e_msg) |
|
918 | 920 | dest_repo.ui.warn(msg) |
@@ -1,908 +1,904 b'' | |||
|
1 | 1 | #require serve no-reposimplestore no-chg |
|
2 | 2 | |
|
3 | 3 | #testcases stream-legacy stream-bundle2 |
|
4 | 4 | |
|
5 | 5 | #if stream-legacy |
|
6 | 6 | $ cat << EOF >> $HGRCPATH |
|
7 | 7 | > [server] |
|
8 | 8 | > bundle2.stream = no |
|
9 | 9 | > EOF |
|
10 | 10 | #endif |
|
11 | 11 | |
|
12 | 12 | Initialize repository |
|
13 | 13 | the status call is to check for issue5130 |
|
14 | 14 | |
|
15 | 15 | $ hg init server |
|
16 | 16 | $ cd server |
|
17 | 17 | $ touch foo |
|
18 | 18 | $ hg -q commit -A -m initial |
|
19 | 19 | >>> for i in range(1024): |
|
20 | 20 | ... with open(str(i), 'wb') as fh: |
|
21 | 21 | ... fh.write(b"%d" % i) and None |
|
22 | 22 | $ hg -q commit -A -m 'add a lot of files' |
|
23 | 23 | $ hg st |
|
24 | 24 | |
|
25 | 25 | add files with "tricky" name: |
|
26 | 26 | |
|
27 | 27 | $ echo foo > 00changelog.i |
|
28 | 28 | $ echo foo > 00changelog.d |
|
29 | 29 | $ echo foo > 00changelog.n |
|
30 | 30 | $ echo foo > 00changelog-ab349180a0405010.nd |
|
31 | 31 | $ echo foo > 00manifest.i |
|
32 | 32 | $ echo foo > 00manifest.d |
|
33 | 33 | $ echo foo > foo.i |
|
34 | 34 | $ echo foo > foo.d |
|
35 | 35 | $ echo foo > foo.n |
|
36 | 36 | $ echo foo > undo.py |
|
37 | 37 | $ echo foo > undo.i |
|
38 | 38 | $ echo foo > undo.d |
|
39 | 39 | $ echo foo > undo.n |
|
40 | 40 | $ echo foo > undo.foo.i |
|
41 | 41 | $ echo foo > undo.foo.d |
|
42 | 42 | $ echo foo > undo.foo.n |
|
43 | 43 | $ echo foo > undo.babar |
|
44 | 44 | $ mkdir savanah |
|
45 | 45 | $ echo foo > savanah/foo.i |
|
46 | 46 | $ echo foo > savanah/foo.d |
|
47 | 47 | $ echo foo > savanah/foo.n |
|
48 | 48 | $ echo foo > savanah/undo.py |
|
49 | 49 | $ echo foo > savanah/undo.i |
|
50 | 50 | $ echo foo > savanah/undo.d |
|
51 | 51 | $ echo foo > savanah/undo.n |
|
52 | 52 | $ echo foo > savanah/undo.foo.i |
|
53 | 53 | $ echo foo > savanah/undo.foo.d |
|
54 | 54 | $ echo foo > savanah/undo.foo.n |
|
55 | 55 | $ echo foo > savanah/undo.babar |
|
56 | 56 | $ mkdir data |
|
57 | 57 | $ echo foo > data/foo.i |
|
58 | 58 | $ echo foo > data/foo.d |
|
59 | 59 | $ echo foo > data/foo.n |
|
60 | 60 | $ echo foo > data/undo.py |
|
61 | 61 | $ echo foo > data/undo.i |
|
62 | 62 | $ echo foo > data/undo.d |
|
63 | 63 | $ echo foo > data/undo.n |
|
64 | 64 | $ echo foo > data/undo.foo.i |
|
65 | 65 | $ echo foo > data/undo.foo.d |
|
66 | 66 | $ echo foo > data/undo.foo.n |
|
67 | 67 | $ echo foo > data/undo.babar |
|
68 | 68 | $ mkdir meta |
|
69 | 69 | $ echo foo > meta/foo.i |
|
70 | 70 | $ echo foo > meta/foo.d |
|
71 | 71 | $ echo foo > meta/foo.n |
|
72 | 72 | $ echo foo > meta/undo.py |
|
73 | 73 | $ echo foo > meta/undo.i |
|
74 | 74 | $ echo foo > meta/undo.d |
|
75 | 75 | $ echo foo > meta/undo.n |
|
76 | 76 | $ echo foo > meta/undo.foo.i |
|
77 | 77 | $ echo foo > meta/undo.foo.d |
|
78 | 78 | $ echo foo > meta/undo.foo.n |
|
79 | 79 | $ echo foo > meta/undo.babar |
|
80 | 80 | $ mkdir store |
|
81 | 81 | $ echo foo > store/foo.i |
|
82 | 82 | $ echo foo > store/foo.d |
|
83 | 83 | $ echo foo > store/foo.n |
|
84 | 84 | $ echo foo > store/undo.py |
|
85 | 85 | $ echo foo > store/undo.i |
|
86 | 86 | $ echo foo > store/undo.d |
|
87 | 87 | $ echo foo > store/undo.n |
|
88 | 88 | $ echo foo > store/undo.foo.i |
|
89 | 89 | $ echo foo > store/undo.foo.d |
|
90 | 90 | $ echo foo > store/undo.foo.n |
|
91 | 91 | $ echo foo > store/undo.babar |
|
92 | 92 | |
|
93 | 93 | Name with special characters |
|
94 | 94 | |
|
95 | 95 | $ echo foo > store/CΓ©lesteVille_is_a_Capital_City |
|
96 | 96 | |
|
97 | 97 | name causing issue6581 |
|
98 | 98 | |
|
99 | 99 | $ mkdir --parents container/isam-build-centos7/ |
|
100 | 100 | $ touch container/isam-build-centos7/bazel-coverage-generator-sandboxfs-compatibility-0758e3e4f6057904d44399bd666faba9e7f40686.patch |
|
101 | 101 | |
|
102 | 102 | Add all that |
|
103 | 103 | |
|
104 | 104 | $ hg add . |
|
105 | 105 | adding 00changelog-ab349180a0405010.nd |
|
106 | 106 | adding 00changelog.d |
|
107 | 107 | adding 00changelog.i |
|
108 | 108 | adding 00changelog.n |
|
109 | 109 | adding 00manifest.d |
|
110 | 110 | adding 00manifest.i |
|
111 | 111 | adding container/isam-build-centos7/bazel-coverage-generator-sandboxfs-compatibility-0758e3e4f6057904d44399bd666faba9e7f40686.patch |
|
112 | 112 | adding data/foo.d |
|
113 | 113 | adding data/foo.i |
|
114 | 114 | adding data/foo.n |
|
115 | 115 | adding data/undo.babar |
|
116 | 116 | adding data/undo.d |
|
117 | 117 | adding data/undo.foo.d |
|
118 | 118 | adding data/undo.foo.i |
|
119 | 119 | adding data/undo.foo.n |
|
120 | 120 | adding data/undo.i |
|
121 | 121 | adding data/undo.n |
|
122 | 122 | adding data/undo.py |
|
123 | 123 | adding foo.d |
|
124 | 124 | adding foo.i |
|
125 | 125 | adding foo.n |
|
126 | 126 | adding meta/foo.d |
|
127 | 127 | adding meta/foo.i |
|
128 | 128 | adding meta/foo.n |
|
129 | 129 | adding meta/undo.babar |
|
130 | 130 | adding meta/undo.d |
|
131 | 131 | adding meta/undo.foo.d |
|
132 | 132 | adding meta/undo.foo.i |
|
133 | 133 | adding meta/undo.foo.n |
|
134 | 134 | adding meta/undo.i |
|
135 | 135 | adding meta/undo.n |
|
136 | 136 | adding meta/undo.py |
|
137 | 137 | adding savanah/foo.d |
|
138 | 138 | adding savanah/foo.i |
|
139 | 139 | adding savanah/foo.n |
|
140 | 140 | adding savanah/undo.babar |
|
141 | 141 | adding savanah/undo.d |
|
142 | 142 | adding savanah/undo.foo.d |
|
143 | 143 | adding savanah/undo.foo.i |
|
144 | 144 | adding savanah/undo.foo.n |
|
145 | 145 | adding savanah/undo.i |
|
146 | 146 | adding savanah/undo.n |
|
147 | 147 | adding savanah/undo.py |
|
148 | 148 | adding store/C\xc3\xa9lesteVille_is_a_Capital_City (esc) |
|
149 | 149 | adding store/foo.d |
|
150 | 150 | adding store/foo.i |
|
151 | 151 | adding store/foo.n |
|
152 | 152 | adding store/undo.babar |
|
153 | 153 | adding store/undo.d |
|
154 | 154 | adding store/undo.foo.d |
|
155 | 155 | adding store/undo.foo.i |
|
156 | 156 | adding store/undo.foo.n |
|
157 | 157 | adding store/undo.i |
|
158 | 158 | adding store/undo.n |
|
159 | 159 | adding store/undo.py |
|
160 | 160 | adding undo.babar |
|
161 | 161 | adding undo.d |
|
162 | 162 | adding undo.foo.d |
|
163 | 163 | adding undo.foo.i |
|
164 | 164 | adding undo.foo.n |
|
165 | 165 | adding undo.i |
|
166 | 166 | adding undo.n |
|
167 | 167 | adding undo.py |
|
168 | 168 | $ hg ci -m 'add files with "tricky" name' |
|
169 | 169 | $ hg --config server.uncompressed=false serve -p $HGPORT -d --pid-file=hg.pid |
|
170 | 170 | $ cat hg.pid > $DAEMON_PIDS |
|
171 | 171 | $ cd .. |
|
172 | 172 | |
|
173 | 173 | Check local clone |
|
174 | 174 | ================== |
|
175 | 175 | |
|
176 | 176 | The logic is close enough of uncompressed. |
|
177 | 177 | This is present here to reuse the testing around file with "special" names. |
|
178 | 178 | |
|
179 | 179 | $ hg clone server local-clone |
|
180 |
updating to branch default |
|
|
181 |
1088 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
|
182 | abort: $ENOENT$: '$TESTTMP/local-clone/.hg/store/dh/containe/isam-bui/bazel-coverage-generator-sandboxfs-compatibility-0758e3e4d94041277bcd011e1d54c247523c124b4a325686.i' (known-bad-output !) | |
|
183 | [255] | |
|
180 | updating to branch default | |
|
181 | 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved | |
|
184 | 182 | |
|
185 | 183 | Check that the clone went well |
|
186 | 184 | |
|
187 | 185 | $ hg verify -R local-clone |
|
188 |
checking changesets |
|
|
189 | checking manifests (missing-correct-output !) | |
|
190 |
crosschecking files in changesets and manifests |
|
|
191 | checking files (missing-correct-output !) | |
|
192 |
checked 3 changesets with 1088 changes to 1088 files |
|
|
193 | abort: repository local-clone not found (known-bad-output !) | |
|
194 | [255] | |
|
186 | checking changesets | |
|
187 | checking manifests | |
|
188 | crosschecking files in changesets and manifests | |
|
189 | checking files | |
|
190 | checked 3 changesets with 1088 changes to 1088 files | |
|
195 | 191 | |
|
196 | 192 | Check uncompressed |
|
197 | 193 |
================= |
|
198 | 194 | |
|
199 | 195 | Cannot stream clone when server.uncompressed is set |
|
200 | 196 | |
|
201 | 197 | $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=stream_out' |
|
202 | 198 | 200 Script output follows |
|
203 | 199 | |
|
204 | 200 | 1 |
|
205 | 201 | |
|
206 | 202 | #if stream-legacy |
|
207 | 203 | $ hg debugcapabilities http://localhost:$HGPORT |
|
208 | 204 | Main capabilities: |
|
209 | 205 | batch |
|
210 | 206 | branchmap |
|
211 | 207 | $USUAL_BUNDLE2_CAPS_SERVER$ |
|
212 | 208 | changegroupsubset |
|
213 | 209 | compression=$BUNDLE2_COMPRESSIONS$ |
|
214 | 210 | getbundle |
|
215 | 211 | httpheader=1024 |
|
216 | 212 | httpmediatype=0.1rx,0.1tx,0.2tx |
|
217 | 213 | known |
|
218 | 214 | lookup |
|
219 | 215 | pushkey |
|
220 | 216 | unbundle=HG10GZ,HG10BZ,HG10UN |
|
221 | 217 | unbundlehash |
|
222 | 218 | Bundle2 capabilities: |
|
223 | 219 | HG20 |
|
224 | 220 | bookmarks |
|
225 | 221 | changegroup |
|
226 | 222 | 01 |
|
227 | 223 | 02 |
|
228 | 224 | checkheads |
|
229 | 225 | related |
|
230 | 226 | digests |
|
231 | 227 | md5 |
|
232 | 228 | sha1 |
|
233 | 229 | sha512 |
|
234 | 230 | error |
|
235 | 231 | abort |
|
236 | 232 | unsupportedcontent |
|
237 | 233 | pushraced |
|
238 | 234 | pushkey |
|
239 | 235 | hgtagsfnodes |
|
240 | 236 | listkeys |
|
241 | 237 | phases |
|
242 | 238 | heads |
|
243 | 239 | pushkey |
|
244 | 240 | remote-changegroup |
|
245 | 241 | http |
|
246 | 242 | https |
|
247 | 243 | |
|
248 | 244 | $ hg clone --stream -U http://localhost:$HGPORT server-disabled |
|
249 | 245 | warning: stream clone requested but server has them disabled |
|
250 | 246 | requesting all changes |
|
251 | 247 | adding changesets |
|
252 | 248 | adding manifests |
|
253 | 249 | adding file changes |
|
254 | 250 | added 3 changesets with 1088 changes to 1088 files |
|
255 | 251 | new changesets 96ee1d7354c4:5223b5e3265f |
|
256 | 252 | |
|
257 | 253 | $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1" |
|
258 | 254 | 200 Script output follows |
|
259 | 255 | content-type: application/mercurial-0.2 |
|
260 | 256 | |
|
261 | 257 | |
|
262 | 258 | $ f --size body --hexdump --bytes 100 |
|
263 | 259 | body: size=232 |
|
264 | 260 | 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......| |
|
265 | 261 | 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...| |
|
266 | 262 | 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest| |
|
267 | 263 | 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques| |
|
268 | 264 | 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d| |
|
269 | 265 | 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th| |
|
270 | 266 | 0060: 69 73 20 66 |is f| |
|
271 | 267 | |
|
272 | 268 | #endif |
|
273 | 269 | #if stream-bundle2 |
|
274 | 270 | $ hg debugcapabilities http://localhost:$HGPORT |
|
275 | 271 | Main capabilities: |
|
276 | 272 | batch |
|
277 | 273 | branchmap |
|
278 | 274 | $USUAL_BUNDLE2_CAPS_SERVER$ |
|
279 | 275 | changegroupsubset |
|
280 | 276 | compression=$BUNDLE2_COMPRESSIONS$ |
|
281 | 277 | getbundle |
|
282 | 278 | httpheader=1024 |
|
283 | 279 | httpmediatype=0.1rx,0.1tx,0.2tx |
|
284 | 280 | known |
|
285 | 281 | lookup |
|
286 | 282 | pushkey |
|
287 | 283 | unbundle=HG10GZ,HG10BZ,HG10UN |
|
288 | 284 | unbundlehash |
|
289 | 285 | Bundle2 capabilities: |
|
290 | 286 | HG20 |
|
291 | 287 | bookmarks |
|
292 | 288 | changegroup |
|
293 | 289 | 01 |
|
294 | 290 | 02 |
|
295 | 291 | checkheads |
|
296 | 292 | related |
|
297 | 293 | digests |
|
298 | 294 | md5 |
|
299 | 295 | sha1 |
|
300 | 296 | sha512 |
|
301 | 297 | error |
|
302 | 298 | abort |
|
303 | 299 | unsupportedcontent |
|
304 | 300 | pushraced |
|
305 | 301 | pushkey |
|
306 | 302 | hgtagsfnodes |
|
307 | 303 | listkeys |
|
308 | 304 | phases |
|
309 | 305 | heads |
|
310 | 306 | pushkey |
|
311 | 307 | remote-changegroup |
|
312 | 308 | http |
|
313 | 309 | https |
|
314 | 310 | |
|
315 | 311 | $ hg clone --stream -U http://localhost:$HGPORT server-disabled |
|
316 | 312 | warning: stream clone requested but server has them disabled |
|
317 | 313 | requesting all changes |
|
318 | 314 | adding changesets |
|
319 | 315 | adding manifests |
|
320 | 316 | adding file changes |
|
321 | 317 | added 3 changesets with 1088 changes to 1088 files |
|
322 | 318 | new changesets 96ee1d7354c4:5223b5e3265f |
|
323 | 319 | |
|
324 | 320 | $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1" |
|
325 | 321 | 200 Script output follows |
|
326 | 322 | content-type: application/mercurial-0.2 |
|
327 | 323 | |
|
328 | 324 | |
|
329 | 325 | $ f --size body --hexdump --bytes 100 |
|
330 | 326 | body: size=232 |
|
331 | 327 | 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......| |
|
332 | 328 | 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...| |
|
333 | 329 | 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest| |
|
334 | 330 | 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques| |
|
335 | 331 | 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d| |
|
336 | 332 | 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th| |
|
337 | 333 | 0060: 69 73 20 66 |is f| |
|
338 | 334 | |
|
339 | 335 | #endif |
|
340 | 336 | |
|
341 | 337 | $ killdaemons.py |
|
342 | 338 | $ cd server |
|
343 | 339 | $ hg serve -p $HGPORT -d --pid-file=hg.pid --error errors.txt |
|
344 | 340 | $ cat hg.pid > $DAEMON_PIDS |
|
345 | 341 | $ cd .. |
|
346 | 342 | |
|
347 | 343 | Basic clone |
|
348 | 344 | |
|
349 | 345 | #if stream-legacy |
|
350 | 346 | $ hg clone --stream -U http://localhost:$HGPORT clone1 |
|
351 | 347 | streaming all changes |
|
352 | 348 | 1090 files to transfer, 102 KB of data (no-zstd !) |
|
353 | 349 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) |
|
354 | 350 | 1090 files to transfer, 98.8 KB of data (zstd !) |
|
355 | 351 | transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) |
|
356 | 352 | searching for changes |
|
357 | 353 | no changes found |
|
358 | 354 | $ cat server/errors.txt |
|
359 | 355 | #endif |
|
360 | 356 | #if stream-bundle2 |
|
361 | 357 | $ hg clone --stream -U http://localhost:$HGPORT clone1 |
|
362 | 358 | streaming all changes |
|
363 | 359 | 1093 files to transfer, 102 KB of data (no-zstd !) |
|
364 | 360 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) |
|
365 | 361 | 1093 files to transfer, 98.9 KB of data (zstd !) |
|
366 | 362 | transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !) |
|
367 | 363 | |
|
368 | 364 | $ ls -1 clone1/.hg/cache |
|
369 | 365 | branch2-base |
|
370 | 366 | branch2-immutable |
|
371 | 367 | branch2-served |
|
372 | 368 | branch2-served.hidden |
|
373 | 369 | branch2-visible |
|
374 | 370 | branch2-visible-hidden |
|
375 | 371 | rbc-names-v1 |
|
376 | 372 | rbc-revs-v1 |
|
377 | 373 | tags2 |
|
378 | 374 | tags2-served |
|
379 | 375 | $ cat server/errors.txt |
|
380 | 376 | #endif |
|
381 | 377 | |
|
382 | 378 | getbundle requests with stream=1 are uncompressed |
|
383 | 379 | |
|
384 | 380 | $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto '0.1 0.2 comp=zlib,none' --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1" |
|
385 | 381 | 200 Script output follows |
|
386 | 382 | content-type: application/mercurial-0.2 |
|
387 | 383 | |
|
388 | 384 | |
|
389 | 385 | #if no-zstd no-rust |
|
390 | 386 | $ f --size --hex --bytes 256 body |
|
391 | 387 | body: size=119153 |
|
392 | 388 | 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......| |
|
393 | 389 | 0010: 80 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| |
|
394 | 390 | 0020: 06 09 04 0c 44 62 79 74 65 63 6f 75 6e 74 31 30 |....Dbytecount10| |
|
395 | 391 | 0030: 34 31 31 35 66 69 6c 65 63 6f 75 6e 74 31 30 39 |4115filecount109| |
|
396 | 392 | 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 |3requirementsdot| |
|
397 | 393 | 0050: 65 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 |encode%2Cfncache| |
|
398 | 394 | 0060: 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 |%2Cgeneraldelta%| |
|
399 | 395 | 0070: 32 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 |2Crevlogv1%2Cspa| |
|
400 | 396 | 0080: 72 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 |rserevlog%2Cstor| |
|
401 | 397 | 0090: 65 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 |e....s.Bdata/0.i| |
|
402 | 398 | 00a0: 00 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 |................| |
|
403 | 399 | 00b0: 00 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff |................| |
|
404 | 400 | 00c0: 80 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c |.)c.I.#....Vg.g,| |
|
405 | 401 | 00d0: 69 d1 ec 39 00 00 00 00 00 00 00 00 00 00 00 00 |i..9............| |
|
406 | 402 | 00e0: 75 30 73 26 45 64 61 74 61 2f 30 30 63 68 61 6e |u0s&Edata/00chan| |
|
407 | 403 | 00f0: 67 65 6c 6f 67 2d 61 62 33 34 39 31 38 30 61 30 |gelog-ab349180a0| |
|
408 | 404 | #endif |
|
409 | 405 | #if zstd no-rust |
|
410 | 406 | $ f --size --hex --bytes 256 body |
|
411 | 407 | body: size=116340 |
|
412 | 408 | 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......| |
|
413 | 409 | 0010: 9a 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| |
|
414 | 410 | 0020: 06 09 04 0c 5e 62 79 74 65 63 6f 75 6e 74 31 30 |....^bytecount10| |
|
415 | 411 | 0030: 31 32 37 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1276filecount109| |
|
416 | 412 | 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 |3requirementsdot| |
|
417 | 413 | 0050: 65 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 |encode%2Cfncache| |
|
418 | 414 | 0060: 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 |%2Cgeneraldelta%| |
|
419 | 415 | 0070: 32 43 72 65 76 6c 6f 67 2d 63 6f 6d 70 72 65 73 |2Crevlog-compres| |
|
420 | 416 | 0080: 73 69 6f 6e 2d 7a 73 74 64 25 32 43 72 65 76 6c |sion-zstd%2Crevl| |
|
421 | 417 | 0090: 6f 67 76 31 25 32 43 73 70 61 72 73 65 72 65 76 |ogv1%2Csparserev| |
|
422 | 418 | 00a0: 6c 6f 67 25 32 43 73 74 6f 72 65 00 00 80 00 73 |log%2Cstore....s| |
|
423 | 419 | 00b0: 08 42 64 61 74 61 2f 30 2e 69 00 03 00 01 00 00 |.Bdata/0.i......| |
|
424 | 420 | 00c0: 00 00 00 00 00 02 00 00 00 01 00 00 00 00 00 00 |................| |
|
425 | 421 | 00d0: 00 01 ff ff ff ff ff ff ff ff 80 29 63 a0 49 d3 |...........)c.I.| |
|
426 | 422 | 00e0: 23 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 00 00 |#....Vg.g,i..9..| |
|
427 | 423 | 00f0: 00 00 00 00 00 00 00 00 00 00 75 30 73 26 45 64 |..........u0s&Ed| |
|
428 | 424 | #endif |
|
429 | 425 | #if zstd rust no-dirstate-v2 |
|
430 | 426 | $ f --size --hex --bytes 256 body |
|
431 | 427 | body: size=116361 |
|
432 | 428 | 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......| |
|
433 | 429 | 0010: af 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| |
|
434 | 430 | 0020: 06 09 04 0c 73 62 79 74 65 63 6f 75 6e 74 31 30 |....sbytecount10| |
|
435 | 431 | 0030: 31 32 37 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1276filecount109| |
|
436 | 432 | 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 |3requirementsdot| |
|
437 | 433 | 0050: 65 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 |encode%2Cfncache| |
|
438 | 434 | 0060: 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 |%2Cgeneraldelta%| |
|
439 | 435 | 0070: 32 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f 64 |2Cpersistent-nod| |
|
440 | 436 | 0080: 65 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 6f |emap%2Crevlog-co| |
|
441 | 437 | 0090: 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 32 |mpression-zstd%2| |
|
442 | 438 | 00a0: 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 |Crevlogv1%2Cspar| |
|
443 | 439 | 00b0: 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 |serevlog%2Cstore| |
|
444 | 440 | 00c0: 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 |....s.Bdata/0.i.| |
|
445 | 441 | 00d0: 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 |................| |
|
446 | 442 | 00e0: 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 |................| |
|
447 | 443 | 00f0: 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 |)c.I.#....Vg.g,i| |
|
448 | 444 | #endif |
|
449 | 445 | #if zstd dirstate-v2 |
|
450 | 446 | $ f --size --hex --bytes 256 body |
|
451 | 447 | body: size=109549 |
|
452 | 448 | 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......| |
|
453 | 449 | 0010: c0 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| |
|
454 | 450 | 0020: 05 09 04 0c 85 62 79 74 65 63 6f 75 6e 74 39 35 |.....bytecount95| |
|
455 | 451 | 0030: 38 39 37 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |897filecount1030| |
|
456 | 452 | 0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote| |
|
457 | 453 | 0050: 6e 63 6f 64 65 25 32 43 65 78 70 2d 64 69 72 73 |ncode%2Cexp-dirs| |
|
458 | 454 | 0060: 74 61 74 65 2d 76 32 25 32 43 66 6e 63 61 63 68 |tate-v2%2Cfncach| |
|
459 | 455 | 0070: 65 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 |e%2Cgeneraldelta| |
|
460 | 456 | 0080: 25 32 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f |%2Cpersistent-no| |
|
461 | 457 | 0090: 64 65 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 |demap%2Crevlog-c| |
|
462 | 458 | 00a0: 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 |ompression-zstd%| |
|
463 | 459 | 00b0: 32 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 |2Crevlogv1%2Cspa| |
|
464 | 460 | 00c0: 72 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 |rserevlog%2Cstor| |
|
465 | 461 | 00d0: 65 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 |e....s.Bdata/0.i| |
|
466 | 462 | 00e0: 00 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 |................| |
|
467 | 463 | 00f0: 00 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff |................| |
|
468 | 464 | #endif |
|
469 | 465 | |
|
470 | 466 | --uncompressed is an alias to --stream |
|
471 | 467 | |
|
472 | 468 | #if stream-legacy |
|
473 | 469 | $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed |
|
474 | 470 | streaming all changes |
|
475 | 471 | 1090 files to transfer, 102 KB of data (no-zstd !) |
|
476 | 472 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) |
|
477 | 473 | 1090 files to transfer, 98.8 KB of data (zstd !) |
|
478 | 474 | transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) |
|
479 | 475 | searching for changes |
|
480 | 476 | no changes found |
|
481 | 477 | #endif |
|
482 | 478 | #if stream-bundle2 |
|
483 | 479 | $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed |
|
484 | 480 | streaming all changes |
|
485 | 481 | 1093 files to transfer, 102 KB of data (no-zstd !) |
|
486 | 482 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) |
|
487 | 483 | 1093 files to transfer, 98.9 KB of data (zstd !) |
|
488 | 484 | transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !) |
|
489 | 485 | #endif |
|
490 | 486 | |
|
491 | 487 | Clone with background file closing enabled |
|
492 | 488 | |
|
493 | 489 | #if stream-legacy |
|
494 | 490 | $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding |
|
495 | 491 | using http://localhost:$HGPORT/ |
|
496 | 492 | sending capabilities command |
|
497 | 493 | sending branchmap command |
|
498 | 494 | streaming all changes |
|
499 | 495 | sending stream_out command |
|
500 | 496 | 1090 files to transfer, 102 KB of data (no-zstd !) |
|
501 | 497 | 1090 files to transfer, 98.8 KB of data (zstd !) |
|
502 | 498 | starting 4 threads for background file closing |
|
503 | 499 | updating the branch cache |
|
504 | 500 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) |
|
505 | 501 | transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) |
|
506 | 502 | query 1; heads |
|
507 | 503 | sending batch command |
|
508 | 504 | searching for changes |
|
509 | 505 | all remote heads known locally |
|
510 | 506 | no changes found |
|
511 | 507 | sending getbundle command |
|
512 | 508 | bundle2-input-bundle: with-transaction |
|
513 | 509 | bundle2-input-part: "listkeys" (params: 1 mandatory) supported |
|
514 | 510 | bundle2-input-part: "phase-heads" supported |
|
515 | 511 | bundle2-input-part: total payload size 24 |
|
516 | 512 | bundle2-input-bundle: 2 parts total |
|
517 | 513 | checking for updated bookmarks |
|
518 | 514 | updating the branch cache |
|
519 | 515 | (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob) |
|
520 | 516 | #endif |
|
521 | 517 | #if stream-bundle2 |
|
522 | 518 | $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding |
|
523 | 519 | using http://localhost:$HGPORT/ |
|
524 | 520 | sending capabilities command |
|
525 | 521 | query 1; heads |
|
526 | 522 | sending batch command |
|
527 | 523 | streaming all changes |
|
528 | 524 | sending getbundle command |
|
529 | 525 | bundle2-input-bundle: with-transaction |
|
530 | 526 | bundle2-input-part: "stream2" (params: 3 mandatory) supported |
|
531 | 527 | applying stream bundle |
|
532 | 528 | 1093 files to transfer, 102 KB of data (no-zstd !) |
|
533 | 529 | 1093 files to transfer, 98.9 KB of data (zstd !) |
|
534 | 530 | starting 4 threads for background file closing |
|
535 | 531 | starting 4 threads for background file closing |
|
536 | 532 | updating the branch cache |
|
537 | 533 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) |
|
538 | 534 | bundle2-input-part: total payload size 118984 (no-zstd !) |
|
539 | 535 | transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !) |
|
540 | 536 | bundle2-input-part: total payload size 116145 (zstd !) |
|
541 | 537 | bundle2-input-part: "listkeys" (params: 1 mandatory) supported |
|
542 | 538 | bundle2-input-bundle: 2 parts total |
|
543 | 539 | checking for updated bookmarks |
|
544 | 540 | updating the branch cache |
|
545 | 541 | (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob) |
|
546 | 542 | #endif |
|
547 | 543 | |
|
548 | 544 | Cannot stream clone when there are secret changesets |
|
549 | 545 | |
|
550 | 546 | $ hg -R server phase --force --secret -r tip |
|
551 | 547 | $ hg clone --stream -U http://localhost:$HGPORT secret-denied |
|
552 | 548 | warning: stream clone requested but server has them disabled |
|
553 | 549 | requesting all changes |
|
554 | 550 | adding changesets |
|
555 | 551 | adding manifests |
|
556 | 552 | adding file changes |
|
557 | 553 | added 2 changesets with 1025 changes to 1025 files |
|
558 | 554 | new changesets 96ee1d7354c4:c17445101a72 |
|
559 | 555 | |
|
560 | 556 | $ killdaemons.py |
|
561 | 557 | |
|
562 | 558 | Streaming of secrets can be overridden by server config |
|
563 | 559 | |
|
564 | 560 | $ cd server |
|
565 | 561 | $ hg serve --config server.uncompressedallowsecret=true -p $HGPORT -d --pid-file=hg.pid |
|
566 | 562 | $ cat hg.pid > $DAEMON_PIDS |
|
567 | 563 | $ cd .. |
|
568 | 564 | |
|
569 | 565 | #if stream-legacy |
|
570 | 566 | $ hg clone --stream -U http://localhost:$HGPORT secret-allowed |
|
571 | 567 | streaming all changes |
|
572 | 568 | 1090 files to transfer, 102 KB of data (no-zstd !) |
|
573 | 569 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) |
|
574 | 570 | 1090 files to transfer, 98.8 KB of data (zstd !) |
|
575 | 571 | transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) |
|
576 | 572 | searching for changes |
|
577 | 573 | no changes found |
|
578 | 574 | #endif |
|
579 | 575 | #if stream-bundle2 |
|
580 | 576 | $ hg clone --stream -U http://localhost:$HGPORT secret-allowed |
|
581 | 577 | streaming all changes |
|
582 | 578 | 1093 files to transfer, 102 KB of data (no-zstd !) |
|
583 | 579 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) |
|
584 | 580 | 1093 files to transfer, 98.9 KB of data (zstd !) |
|
585 | 581 | transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !) |
|
586 | 582 | #endif |
|
587 | 583 | |
|
588 | 584 | $ killdaemons.py |
|
589 | 585 | |
|
590 | 586 | Verify interaction between preferuncompressed and secret presence |
|
591 | 587 | |
|
592 | 588 | $ cd server |
|
593 | 589 | $ hg serve --config server.preferuncompressed=true -p $HGPORT -d --pid-file=hg.pid |
|
594 | 590 | $ cat hg.pid > $DAEMON_PIDS |
|
595 | 591 | $ cd .. |
|
596 | 592 | |
|
597 | 593 | $ hg clone -U http://localhost:$HGPORT preferuncompressed-secret |
|
598 | 594 | requesting all changes |
|
599 | 595 | adding changesets |
|
600 | 596 | adding manifests |
|
601 | 597 | adding file changes |
|
602 | 598 | added 2 changesets with 1025 changes to 1025 files |
|
603 | 599 | new changesets 96ee1d7354c4:c17445101a72 |
|
604 | 600 | |
|
605 | 601 | $ killdaemons.py |
|
606 | 602 | |
|
607 | 603 | Clone not allowed when full bundles disabled and can't serve secrets |
|
608 | 604 | |
|
609 | 605 | $ cd server |
|
610 | 606 | $ hg serve --config server.disablefullbundle=true -p $HGPORT -d --pid-file=hg.pid |
|
611 | 607 | $ cat hg.pid > $DAEMON_PIDS |
|
612 | 608 | $ cd .. |
|
613 | 609 | |
|
614 | 610 | $ hg clone --stream http://localhost:$HGPORT secret-full-disabled |
|
615 | 611 | warning: stream clone requested but server has them disabled |
|
616 | 612 | requesting all changes |
|
617 | 613 | remote: abort: server has pull-based clones disabled |
|
618 | 614 | abort: pull failed on remote |
|
619 | 615 | (remove --pull if specified or upgrade Mercurial) |
|
620 | 616 | [100] |
|
621 | 617 | |
|
622 | 618 | Local stream clone with secrets involved |
|
623 | 619 | (This is just a test over behavior: if you have access to the repo's files, |
|
624 | 620 | there is no security so it isn't important to prevent a clone here.) |
|
625 | 621 | |
|
626 | 622 | $ hg clone -U --stream server local-secret |
|
627 | 623 | warning: stream clone requested but server has them disabled |
|
628 | 624 | requesting all changes |
|
629 | 625 | adding changesets |
|
630 | 626 | adding manifests |
|
631 | 627 | adding file changes |
|
632 | 628 | added 2 changesets with 1025 changes to 1025 files |
|
633 | 629 | new changesets 96ee1d7354c4:c17445101a72 |
|
634 | 630 | |
|
635 | 631 | Stream clone while repo is changing: |
|
636 | 632 | |
|
637 | 633 | $ mkdir changing |
|
638 | 634 | $ cd changing |
|
639 | 635 | |
|
640 | 636 | extension for delaying the server process so we reliably can modify the repo |
|
641 | 637 | while cloning |
|
642 | 638 | |
|
643 | 639 | $ cat > stream_steps.py <<EOF |
|
644 | 640 | > import os |
|
645 | 641 | > import sys |
|
646 | 642 | > from mercurial import ( |
|
647 | 643 | > encoding, |
|
648 | 644 | > extensions, |
|
649 | 645 | > streamclone, |
|
650 | 646 | > testing, |
|
651 | 647 | > ) |
|
652 | 648 | > WALKED_FILE_1 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_1'] |
|
653 | 649 | > WALKED_FILE_2 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_2'] |
|
654 | 650 | > |
|
655 | 651 | > def _test_sync_point_walk_1(orig, repo): |
|
656 | 652 | > testing.write_file(WALKED_FILE_1) |
|
657 | 653 | > |
|
658 | 654 | > def _test_sync_point_walk_2(orig, repo): |
|
659 | 655 | > assert repo._currentlock(repo._lockref) is None |
|
660 | 656 | > testing.wait_file(WALKED_FILE_2) |
|
661 | 657 | > |
|
662 | 658 | > extensions.wrapfunction( |
|
663 | 659 | > streamclone, |
|
664 | 660 | > '_test_sync_point_walk_1', |
|
665 | 661 | > _test_sync_point_walk_1 |
|
666 | 662 | > ) |
|
667 | 663 | > extensions.wrapfunction( |
|
668 | 664 | > streamclone, |
|
669 | 665 | > '_test_sync_point_walk_2', |
|
670 | 666 | > _test_sync_point_walk_2 |
|
671 | 667 | > ) |
|
672 | 668 | > EOF |
|
673 | 669 | |
|
674 | 670 | prepare repo with small and big file to cover both code paths in emitrevlogdata |
|
675 | 671 | |
|
676 | 672 | $ hg init repo |
|
677 | 673 | $ touch repo/f1 |
|
678 | 674 | $ $TESTDIR/seq.py 50000 > repo/f2 |
|
679 | 675 | $ hg -R repo ci -Aqm "0" |
|
680 | 676 | $ HG_TEST_STREAM_WALKED_FILE_1="$TESTTMP/sync_file_walked_1" |
|
681 | 677 | $ export HG_TEST_STREAM_WALKED_FILE_1 |
|
682 | 678 | $ HG_TEST_STREAM_WALKED_FILE_2="$TESTTMP/sync_file_walked_2" |
|
683 | 679 | $ export HG_TEST_STREAM_WALKED_FILE_2 |
|
684 | 680 | $ HG_TEST_STREAM_WALKED_FILE_3="$TESTTMP/sync_file_walked_3" |
|
685 | 681 | $ export HG_TEST_STREAM_WALKED_FILE_3 |
|
686 | 682 | # $ cat << EOF >> $HGRCPATH |
|
687 | 683 | # > [hooks] |
|
688 | 684 | # > pre-clone=rm -f "$TESTTMP/sync_file_walked_*" |
|
689 | 685 | # > EOF |
|
690 | 686 | $ hg serve -R repo -p $HGPORT1 -d --error errors.log --pid-file=hg.pid --config extensions.stream_steps="$RUNTESTDIR/testlib/ext-stream-clone-steps.py" |
|
691 | 687 | $ cat hg.pid >> $DAEMON_PIDS |
|
692 | 688 | |
|
693 | 689 | clone while modifying the repo between stating file with write lock and |
|
694 | 690 | actually serving file content |
|
695 | 691 | |
|
696 | 692 | $ (hg clone -q --stream -U http://localhost:$HGPORT1 clone; touch "$HG_TEST_STREAM_WALKED_FILE_3") & |
|
697 | 693 | $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1 |
|
698 | 694 | $ echo >> repo/f1 |
|
699 | 695 | $ echo >> repo/f2 |
|
700 | 696 | $ hg -R repo ci -m "1" --config ui.timeout.warn=-1 |
|
701 | 697 | $ touch $HG_TEST_STREAM_WALKED_FILE_2 |
|
702 | 698 | $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3 |
|
703 | 699 | $ hg -R clone id |
|
704 | 700 | 000000000000 |
|
705 | 701 | $ cat errors.log |
|
706 | 702 | $ cd .. |
|
707 | 703 | |
|
708 | 704 | Stream repository with bookmarks |
|
709 | 705 | -------------------------------- |
|
710 | 706 | |
|
711 | 707 | (revert introduction of secret changeset) |
|
712 | 708 | |
|
713 | 709 | $ hg -R server phase --draft 'secret()' |
|
714 | 710 | |
|
715 | 711 | add a bookmark |
|
716 | 712 | |
|
717 | 713 | $ hg -R server bookmark -r tip some-bookmark |
|
718 | 714 | |
|
719 | 715 | clone it |
|
720 | 716 | |
|
721 | 717 | #if stream-legacy |
|
722 | 718 | $ hg clone --stream http://localhost:$HGPORT with-bookmarks |
|
723 | 719 | streaming all changes |
|
724 | 720 | 1090 files to transfer, 102 KB of data (no-zstd !) |
|
725 | 721 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) |
|
726 | 722 | 1090 files to transfer, 98.8 KB of data (zstd !) |
|
727 | 723 | transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) |
|
728 | 724 | searching for changes |
|
729 | 725 | no changes found |
|
730 | 726 | updating to branch default |
|
731 | 727 | 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
732 | 728 | #endif |
|
733 | 729 | #if stream-bundle2 |
|
734 | 730 | $ hg clone --stream http://localhost:$HGPORT with-bookmarks |
|
735 | 731 | streaming all changes |
|
736 | 732 | 1096 files to transfer, 102 KB of data (no-zstd !) |
|
737 | 733 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) |
|
738 | 734 | 1096 files to transfer, 99.1 KB of data (zstd !) |
|
739 | 735 | transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !) |
|
740 | 736 | updating to branch default |
|
741 | 737 | 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
742 | 738 | #endif |
|
743 | 739 | $ hg verify -R with-bookmarks |
|
744 | 740 | checking changesets |
|
745 | 741 | checking manifests |
|
746 | 742 | crosschecking files in changesets and manifests |
|
747 | 743 | checking files |
|
748 | 744 | checked 3 changesets with 1088 changes to 1088 files |
|
749 | 745 | $ hg -R with-bookmarks bookmarks |
|
750 | 746 | some-bookmark 2:5223b5e3265f |
|
751 | 747 | |
|
752 | 748 | Stream repository with phases |
|
753 | 749 | ----------------------------- |
|
754 | 750 | |
|
755 | 751 | Clone as publishing |
|
756 | 752 | |
|
757 | 753 | $ hg -R server phase -r 'all()' |
|
758 | 754 | 0: draft |
|
759 | 755 | 1: draft |
|
760 | 756 | 2: draft |
|
761 | 757 | |
|
762 | 758 | #if stream-legacy |
|
763 | 759 | $ hg clone --stream http://localhost:$HGPORT phase-publish |
|
764 | 760 | streaming all changes |
|
765 | 761 | 1090 files to transfer, 102 KB of data (no-zstd !) |
|
766 | 762 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) |
|
767 | 763 | 1090 files to transfer, 98.8 KB of data (zstd !) |
|
768 | 764 | transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) |
|
769 | 765 | searching for changes |
|
770 | 766 | no changes found |
|
771 | 767 | updating to branch default |
|
772 | 768 | 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
773 | 769 | #endif |
|
774 | 770 | #if stream-bundle2 |
|
775 | 771 | $ hg clone --stream http://localhost:$HGPORT phase-publish |
|
776 | 772 | streaming all changes |
|
777 | 773 | 1096 files to transfer, 102 KB of data (no-zstd !) |
|
778 | 774 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) |
|
779 | 775 | 1096 files to transfer, 99.1 KB of data (zstd !) |
|
780 | 776 | transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !) |
|
781 | 777 | updating to branch default |
|
782 | 778 | 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
783 | 779 | #endif |
|
784 | 780 | $ hg verify -R phase-publish |
|
785 | 781 | checking changesets |
|
786 | 782 | checking manifests |
|
787 | 783 | crosschecking files in changesets and manifests |
|
788 | 784 | checking files |
|
789 | 785 | checked 3 changesets with 1088 changes to 1088 files |
|
790 | 786 | $ hg -R phase-publish phase -r 'all()' |
|
791 | 787 | 0: public |
|
792 | 788 | 1: public |
|
793 | 789 | 2: public |
|
794 | 790 | |
|
795 | 791 | Clone as non publishing |
|
796 | 792 | |
|
797 | 793 | $ cat << EOF >> server/.hg/hgrc |
|
798 | 794 | > [phases] |
|
799 | 795 | > publish = False |
|
800 | 796 | > EOF |
|
801 | 797 | $ killdaemons.py |
|
802 | 798 | $ hg -R server serve -p $HGPORT -d --pid-file=hg.pid |
|
803 | 799 | $ cat hg.pid > $DAEMON_PIDS |
|
804 | 800 | |
|
805 | 801 | #if stream-legacy |
|
806 | 802 | |
|
807 | 803 | With v1 of the stream protocol, changeset are always cloned as public. It make |
|
808 | 804 | stream v1 unsuitable for non-publishing repository. |
|
809 | 805 | |
|
810 | 806 | $ hg clone --stream http://localhost:$HGPORT phase-no-publish |
|
811 | 807 | streaming all changes |
|
812 | 808 | 1090 files to transfer, 102 KB of data (no-zstd !) |
|
813 | 809 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) |
|
814 | 810 | 1090 files to transfer, 98.8 KB of data (zstd !) |
|
815 | 811 | transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) |
|
816 | 812 | searching for changes |
|
817 | 813 | no changes found |
|
818 | 814 | updating to branch default |
|
819 | 815 | 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
820 | 816 | $ hg -R phase-no-publish phase -r 'all()' |
|
821 | 817 | 0: public |
|
822 | 818 | 1: public |
|
823 | 819 | 2: public |
|
824 | 820 | #endif |
|
825 | 821 | #if stream-bundle2 |
|
826 | 822 | $ hg clone --stream http://localhost:$HGPORT phase-no-publish |
|
827 | 823 | streaming all changes |
|
828 | 824 | 1097 files to transfer, 102 KB of data (no-zstd !) |
|
829 | 825 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) |
|
830 | 826 | 1097 files to transfer, 99.1 KB of data (zstd !) |
|
831 | 827 | transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !) |
|
832 | 828 | updating to branch default |
|
833 | 829 | 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
834 | 830 | $ hg -R phase-no-publish phase -r 'all()' |
|
835 | 831 | 0: draft |
|
836 | 832 | 1: draft |
|
837 | 833 | 2: draft |
|
838 | 834 | #endif |
|
839 | 835 | $ hg verify -R phase-no-publish |
|
840 | 836 | checking changesets |
|
841 | 837 | checking manifests |
|
842 | 838 | crosschecking files in changesets and manifests |
|
843 | 839 | checking files |
|
844 | 840 | checked 3 changesets with 1088 changes to 1088 files |
|
845 | 841 | |
|
846 | 842 | $ killdaemons.py |
|
847 | 843 | |
|
848 | 844 | #if stream-legacy |
|
849 | 845 | |
|
850 | 846 | With v1 of the stream protocol, changeset are always cloned as public. There's |
|
851 | 847 | no obsolescence markers exchange in stream v1. |
|
852 | 848 | |
|
853 | 849 | #endif |
|
854 | 850 | #if stream-bundle2 |
|
855 | 851 | |
|
856 | 852 | Stream repository with obsolescence |
|
857 | 853 | ----------------------------------- |
|
858 | 854 | |
|
859 | 855 | Clone non-publishing with obsolescence |
|
860 | 856 | |
|
861 | 857 | $ cat >> $HGRCPATH << EOF |
|
862 | 858 | > [experimental] |
|
863 | 859 | > evolution=all |
|
864 | 860 | > EOF |
|
865 | 861 | |
|
866 | 862 | $ cd server |
|
867 | 863 | $ echo foo > foo |
|
868 | 864 | $ hg -q commit -m 'about to be pruned' |
|
869 | 865 | $ hg debugobsolete `hg log -r . -T '{node}'` -d '0 0' -u test --record-parents |
|
870 | 866 | 1 new obsolescence markers |
|
871 | 867 | obsoleted 1 changesets |
|
872 | 868 | $ hg up null -q |
|
873 | 869 | $ hg log -T '{rev}: {phase}\n' |
|
874 | 870 | 2: draft |
|
875 | 871 | 1: draft |
|
876 | 872 | 0: draft |
|
877 | 873 | $ hg serve -p $HGPORT -d --pid-file=hg.pid |
|
878 | 874 | $ cat hg.pid > $DAEMON_PIDS |
|
879 | 875 | $ cd .. |
|
880 | 876 | |
|
881 | 877 | $ hg clone -U --stream http://localhost:$HGPORT with-obsolescence |
|
882 | 878 | streaming all changes |
|
883 | 879 | 1098 files to transfer, 102 KB of data (no-zstd !) |
|
884 | 880 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) |
|
885 | 881 | 1098 files to transfer, 99.5 KB of data (zstd !) |
|
886 | 882 | transferred 99.5 KB in * seconds (* */sec) (glob) (zstd !) |
|
887 | 883 | $ hg -R with-obsolescence log -T '{rev}: {phase}\n' |
|
888 | 884 | 2: draft |
|
889 | 885 | 1: draft |
|
890 | 886 | 0: draft |
|
891 | 887 | $ hg debugobsolete -R with-obsolescence |
|
892 | 888 | 8c206a663911c1f97f2f9d7382e417ae55872cfa 0 {5223b5e3265f0df40bb743da62249413d74ac70f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'} |
|
893 | 889 | $ hg verify -R with-obsolescence |
|
894 | 890 | checking changesets |
|
895 | 891 | checking manifests |
|
896 | 892 | crosschecking files in changesets and manifests |
|
897 | 893 | checking files |
|
898 | 894 | checked 4 changesets with 1089 changes to 1088 files |
|
899 | 895 | |
|
900 | 896 | $ hg clone -U --stream --config experimental.evolution=0 http://localhost:$HGPORT with-obsolescence-no-evolution |
|
901 | 897 | streaming all changes |
|
902 | 898 | remote: abort: server has obsolescence markers, but client cannot receive them via stream clone |
|
903 | 899 | abort: pull failed on remote |
|
904 | 900 | [100] |
|
905 | 901 | |
|
906 | 902 | $ killdaemons.py |
|
907 | 903 | |
|
908 | 904 | #endif |
General Comments 0
You need to be logged in to leave comments.
Login now