##// END OF EJS Templates
streamclone: remove sleep based "synchronisation" in tests...
marmoute -
r47748:faa43f09 default
parent child Browse files
Show More
@@ -0,0 +1,31 b''
1 from __future__ import absolute_import
2
3 from mercurial import (
4 encoding,
5 extensions,
6 streamclone,
7 testing,
8 )
9
10
11 WALKED_FILE_1 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_1']
12 WALKED_FILE_2 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_2']
13
14
15 def _test_sync_point_walk_1(orig, repo):
16 testing.write_file(WALKED_FILE_1)
17
18
19 def _test_sync_point_walk_2(orig, repo):
20 assert repo._currentlock(repo._lockref) is None
21 testing.wait_file(WALKED_FILE_2)
22
23
24 def uisetup(ui):
25 extensions.wrapfunction(
26 streamclone, '_test_sync_point_walk_1', _test_sync_point_walk_1
27 )
28
29 extensions.wrapfunction(
30 streamclone, '_test_sync_point_walk_2', _test_sync_point_walk_2
31 )
@@ -1,735 +1,747 b''
1 1 # streamclone.py - producing and consuming streaming repository data
2 2 #
3 3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import contextlib
11 11 import os
12 12 import struct
13 13
14 14 from .i18n import _
15 15 from .pycompat import open
16 16 from .interfaces import repository
17 17 from . import (
18 18 cacheutil,
19 19 error,
20 20 narrowspec,
21 21 phases,
22 22 pycompat,
23 23 requirements as requirementsmod,
24 24 scmutil,
25 25 store,
26 26 util,
27 27 )
28 28
29 29
30 30 def canperformstreamclone(pullop, bundle2=False):
31 31 """Whether it is possible to perform a streaming clone as part of pull.
32 32
33 33 ``bundle2`` will cause the function to consider stream clone through
34 34 bundle2 and only through bundle2.
35 35
36 36 Returns a tuple of (supported, requirements). ``supported`` is True if
37 37 streaming clone is supported and False otherwise. ``requirements`` is
38 38 a set of repo requirements from the remote, or ``None`` if stream clone
39 39 isn't supported.
40 40 """
41 41 repo = pullop.repo
42 42 remote = pullop.remote
43 43
44 44 bundle2supported = False
45 45 if pullop.canusebundle2:
46 46 if b'v2' in pullop.remotebundle2caps.get(b'stream', []):
47 47 bundle2supported = True
48 48 # else
49 49 # Server doesn't support bundle2 stream clone or doesn't support
50 50 # the versions we support. Fall back and possibly allow legacy.
51 51
52 52 # Ensures legacy code path uses available bundle2.
53 53 if bundle2supported and not bundle2:
54 54 return False, None
55 55 # Ensures bundle2 doesn't try to do a stream clone if it isn't supported.
56 56 elif bundle2 and not bundle2supported:
57 57 return False, None
58 58
59 59 # Streaming clone only works on empty repositories.
60 60 if len(repo):
61 61 return False, None
62 62
63 63 # Streaming clone only works if all data is being requested.
64 64 if pullop.heads:
65 65 return False, None
66 66
67 67 streamrequested = pullop.streamclonerequested
68 68
69 69 # If we don't have a preference, let the server decide for us. This
70 70 # likely only comes into play in LANs.
71 71 if streamrequested is None:
72 72 # The server can advertise whether to prefer streaming clone.
73 73 streamrequested = remote.capable(b'stream-preferred')
74 74
75 75 if not streamrequested:
76 76 return False, None
77 77
78 78 # In order for stream clone to work, the client has to support all the
79 79 # requirements advertised by the server.
80 80 #
81 81 # The server advertises its requirements via the "stream" and "streamreqs"
82 82 # capability. "stream" (a value-less capability) is advertised if and only
83 83 # if the only requirement is "revlogv1." Else, the "streamreqs" capability
84 84 # is advertised and contains a comma-delimited list of requirements.
85 85 requirements = set()
86 86 if remote.capable(b'stream'):
87 87 requirements.add(requirementsmod.REVLOGV1_REQUIREMENT)
88 88 else:
89 89 streamreqs = remote.capable(b'streamreqs')
90 90 # This is weird and shouldn't happen with modern servers.
91 91 if not streamreqs:
92 92 pullop.repo.ui.warn(
93 93 _(
94 94 b'warning: stream clone requested but server has them '
95 95 b'disabled\n'
96 96 )
97 97 )
98 98 return False, None
99 99
100 100 streamreqs = set(streamreqs.split(b','))
101 101 # Server requires something we don't support. Bail.
102 102 missingreqs = streamreqs - repo.supportedformats
103 103 if missingreqs:
104 104 pullop.repo.ui.warn(
105 105 _(
106 106 b'warning: stream clone requested but client is missing '
107 107 b'requirements: %s\n'
108 108 )
109 109 % b', '.join(sorted(missingreqs))
110 110 )
111 111 pullop.repo.ui.warn(
112 112 _(
113 113 b'(see https://www.mercurial-scm.org/wiki/MissingRequirement '
114 114 b'for more information)\n'
115 115 )
116 116 )
117 117 return False, None
118 118 requirements = streamreqs
119 119
120 120 return True, requirements
121 121
122 122
123 123 def maybeperformlegacystreamclone(pullop):
124 124 """Possibly perform a legacy stream clone operation.
125 125
126 126 Legacy stream clones are performed as part of pull but before all other
127 127 operations.
128 128
129 129 A legacy stream clone will not be performed if a bundle2 stream clone is
130 130 supported.
131 131 """
132 132 from . import localrepo
133 133
134 134 supported, requirements = canperformstreamclone(pullop)
135 135
136 136 if not supported:
137 137 return
138 138
139 139 repo = pullop.repo
140 140 remote = pullop.remote
141 141
142 142 # Save remote branchmap. We will use it later to speed up branchcache
143 143 # creation.
144 144 rbranchmap = None
145 145 if remote.capable(b'branchmap'):
146 146 with remote.commandexecutor() as e:
147 147 rbranchmap = e.callcommand(b'branchmap', {}).result()
148 148
149 149 repo.ui.status(_(b'streaming all changes\n'))
150 150
151 151 with remote.commandexecutor() as e:
152 152 fp = e.callcommand(b'stream_out', {}).result()
153 153
154 154 # TODO strictly speaking, this code should all be inside the context
155 155 # manager because the context manager is supposed to ensure all wire state
156 156 # is flushed when exiting. But the legacy peers don't do this, so it
157 157 # doesn't matter.
158 158 l = fp.readline()
159 159 try:
160 160 resp = int(l)
161 161 except ValueError:
162 162 raise error.ResponseError(
163 163 _(b'unexpected response from remote server:'), l
164 164 )
165 165 if resp == 1:
166 166 raise error.Abort(_(b'operation forbidden by server'))
167 167 elif resp == 2:
168 168 raise error.Abort(_(b'locking the remote repository failed'))
169 169 elif resp != 0:
170 170 raise error.Abort(_(b'the server sent an unknown error code'))
171 171
172 172 l = fp.readline()
173 173 try:
174 174 filecount, bytecount = map(int, l.split(b' ', 1))
175 175 except (ValueError, TypeError):
176 176 raise error.ResponseError(
177 177 _(b'unexpected response from remote server:'), l
178 178 )
179 179
180 180 with repo.lock():
181 181 consumev1(repo, fp, filecount, bytecount)
182 182
183 183 # new requirements = old non-format requirements +
184 184 # new format-related remote requirements
185 185 # requirements from the streamed-in repository
186 186 repo.requirements = requirements | (
187 187 repo.requirements - repo.supportedformats
188 188 )
189 189 repo.svfs.options = localrepo.resolvestorevfsoptions(
190 190 repo.ui, repo.requirements, repo.features
191 191 )
192 192 scmutil.writereporequirements(repo)
193 193
194 194 if rbranchmap:
195 195 repo._branchcaches.replace(repo, rbranchmap)
196 196
197 197 repo.invalidate()
198 198
199 199
200 200 def allowservergeneration(repo):
201 201 """Whether streaming clones are allowed from the server."""
202 202 if repository.REPO_FEATURE_STREAM_CLONE not in repo.features:
203 203 return False
204 204
205 205 if not repo.ui.configbool(b'server', b'uncompressed', untrusted=True):
206 206 return False
207 207
208 208 # The way stream clone works makes it impossible to hide secret changesets.
209 209 # So don't allow this by default.
210 210 secret = phases.hassecret(repo)
211 211 if secret:
212 212 return repo.ui.configbool(b'server', b'uncompressedallowsecret')
213 213
214 214 return True
215 215
216 216
217 217 # This is it's own function so extensions can override it.
218 218 def _walkstreamfiles(repo, matcher=None):
219 219 return repo.store.walk(matcher)
220 220
221 221
222 222 def generatev1(repo):
223 223 """Emit content for version 1 of a streaming clone.
224 224
225 225 This returns a 3-tuple of (file count, byte size, data iterator).
226 226
227 227 The data iterator consists of N entries for each file being transferred.
228 228 Each file entry starts as a line with the file name and integer size
229 229 delimited by a null byte.
230 230
231 231 The raw file data follows. Following the raw file data is the next file
232 232 entry, or EOF.
233 233
234 234 When used on the wire protocol, an additional line indicating protocol
235 235 success will be prepended to the stream. This function is not responsible
236 236 for adding it.
237 237
238 238 This function will obtain a repository lock to ensure a consistent view of
239 239 the store is captured. It therefore may raise LockError.
240 240 """
241 241 entries = []
242 242 total_bytes = 0
243 243 # Get consistent snapshot of repo, lock during scan.
244 244 with repo.lock():
245 245 repo.ui.debug(b'scanning\n')
246 246 for file_type, name, ename, size in _walkstreamfiles(repo):
247 247 if size:
248 248 entries.append((name, size))
249 249 total_bytes += size
250 _test_sync_point_walk_1(repo)
251 _test_sync_point_walk_2(repo)
250 252
251 253 repo.ui.debug(
252 254 b'%d files, %d bytes to transfer\n' % (len(entries), total_bytes)
253 255 )
254 256
255 257 svfs = repo.svfs
256 258 debugflag = repo.ui.debugflag
257 259
258 260 def emitrevlogdata():
259 261 for name, size in entries:
260 262 if debugflag:
261 263 repo.ui.debug(b'sending %s (%d bytes)\n' % (name, size))
262 264 # partially encode name over the wire for backwards compat
263 265 yield b'%s\0%d\n' % (store.encodedir(name), size)
264 266 # auditing at this stage is both pointless (paths are already
265 267 # trusted by the local repo) and expensive
266 268 with svfs(name, b'rb', auditpath=False) as fp:
267 269 if size <= 65536:
268 270 yield fp.read(size)
269 271 else:
270 272 for chunk in util.filechunkiter(fp, limit=size):
271 273 yield chunk
272 274
273 275 return len(entries), total_bytes, emitrevlogdata()
274 276
275 277
276 278 def generatev1wireproto(repo):
277 279 """Emit content for version 1 of streaming clone suitable for the wire.
278 280
279 281 This is the data output from ``generatev1()`` with 2 header lines. The
280 282 first line indicates overall success. The 2nd contains the file count and
281 283 byte size of payload.
282 284
283 285 The success line contains "0" for success, "1" for stream generation not
284 286 allowed, and "2" for error locking the repository (possibly indicating
285 287 a permissions error for the server process).
286 288 """
287 289 if not allowservergeneration(repo):
288 290 yield b'1\n'
289 291 return
290 292
291 293 try:
292 294 filecount, bytecount, it = generatev1(repo)
293 295 except error.LockError:
294 296 yield b'2\n'
295 297 return
296 298
297 299 # Indicates successful response.
298 300 yield b'0\n'
299 301 yield b'%d %d\n' % (filecount, bytecount)
300 302 for chunk in it:
301 303 yield chunk
302 304
303 305
304 306 def generatebundlev1(repo, compression=b'UN'):
305 307 """Emit content for version 1 of a stream clone bundle.
306 308
307 309 The first 4 bytes of the output ("HGS1") denote this as stream clone
308 310 bundle version 1.
309 311
310 312 The next 2 bytes indicate the compression type. Only "UN" is currently
311 313 supported.
312 314
313 315 The next 16 bytes are two 64-bit big endian unsigned integers indicating
314 316 file count and byte count, respectively.
315 317
316 318 The next 2 bytes is a 16-bit big endian unsigned short declaring the length
317 319 of the requirements string, including a trailing \0. The following N bytes
318 320 are the requirements string, which is ASCII containing a comma-delimited
319 321 list of repo requirements that are needed to support the data.
320 322
321 323 The remaining content is the output of ``generatev1()`` (which may be
322 324 compressed in the future).
323 325
324 326 Returns a tuple of (requirements, data generator).
325 327 """
326 328 if compression != b'UN':
327 329 raise ValueError(b'we do not support the compression argument yet')
328 330
329 331 requirements = repo.requirements & repo.supportedformats
330 332 requires = b','.join(sorted(requirements))
331 333
332 334 def gen():
333 335 yield b'HGS1'
334 336 yield compression
335 337
336 338 filecount, bytecount, it = generatev1(repo)
337 339 repo.ui.status(
338 340 _(b'writing %d bytes for %d files\n') % (bytecount, filecount)
339 341 )
340 342
341 343 yield struct.pack(b'>QQ', filecount, bytecount)
342 344 yield struct.pack(b'>H', len(requires) + 1)
343 345 yield requires + b'\0'
344 346
345 347 # This is where we'll add compression in the future.
346 348 assert compression == b'UN'
347 349
348 350 progress = repo.ui.makeprogress(
349 351 _(b'bundle'), total=bytecount, unit=_(b'bytes')
350 352 )
351 353 progress.update(0)
352 354
353 355 for chunk in it:
354 356 progress.increment(step=len(chunk))
355 357 yield chunk
356 358
357 359 progress.complete()
358 360
359 361 return requirements, gen()
360 362
361 363
362 364 def consumev1(repo, fp, filecount, bytecount):
363 365 """Apply the contents from version 1 of a streaming clone file handle.
364 366
365 367 This takes the output from "stream_out" and applies it to the specified
366 368 repository.
367 369
368 370 Like "stream_out," the status line added by the wire protocol is not
369 371 handled by this function.
370 372 """
371 373 with repo.lock():
372 374 repo.ui.status(
373 375 _(b'%d files to transfer, %s of data\n')
374 376 % (filecount, util.bytecount(bytecount))
375 377 )
376 378 progress = repo.ui.makeprogress(
377 379 _(b'clone'), total=bytecount, unit=_(b'bytes')
378 380 )
379 381 progress.update(0)
380 382 start = util.timer()
381 383
382 384 # TODO: get rid of (potential) inconsistency
383 385 #
384 386 # If transaction is started and any @filecache property is
385 387 # changed at this point, it causes inconsistency between
386 388 # in-memory cached property and streamclone-ed file on the
387 389 # disk. Nested transaction prevents transaction scope "clone"
388 390 # below from writing in-memory changes out at the end of it,
389 391 # even though in-memory changes are discarded at the end of it
390 392 # regardless of transaction nesting.
391 393 #
392 394 # But transaction nesting can't be simply prohibited, because
393 395 # nesting occurs also in ordinary case (e.g. enabling
394 396 # clonebundles).
395 397
396 398 with repo.transaction(b'clone'):
397 399 with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount):
398 400 for i in pycompat.xrange(filecount):
399 401 # XXX doesn't support '\n' or '\r' in filenames
400 402 l = fp.readline()
401 403 try:
402 404 name, size = l.split(b'\0', 1)
403 405 size = int(size)
404 406 except (ValueError, TypeError):
405 407 raise error.ResponseError(
406 408 _(b'unexpected response from remote server:'), l
407 409 )
408 410 if repo.ui.debugflag:
409 411 repo.ui.debug(
410 412 b'adding %s (%s)\n' % (name, util.bytecount(size))
411 413 )
412 414 # for backwards compat, name was partially encoded
413 415 path = store.decodedir(name)
414 416 with repo.svfs(path, b'w', backgroundclose=True) as ofp:
415 417 for chunk in util.filechunkiter(fp, limit=size):
416 418 progress.increment(step=len(chunk))
417 419 ofp.write(chunk)
418 420
419 421 # force @filecache properties to be reloaded from
420 422 # streamclone-ed file at next access
421 423 repo.invalidate(clearfilecache=True)
422 424
423 425 elapsed = util.timer() - start
424 426 if elapsed <= 0:
425 427 elapsed = 0.001
426 428 progress.complete()
427 429 repo.ui.status(
428 430 _(b'transferred %s in %.1f seconds (%s/sec)\n')
429 431 % (
430 432 util.bytecount(bytecount),
431 433 elapsed,
432 434 util.bytecount(bytecount / elapsed),
433 435 )
434 436 )
435 437
436 438
437 439 def readbundle1header(fp):
438 440 compression = fp.read(2)
439 441 if compression != b'UN':
440 442 raise error.Abort(
441 443 _(
442 444 b'only uncompressed stream clone bundles are '
443 445 b'supported; got %s'
444 446 )
445 447 % compression
446 448 )
447 449
448 450 filecount, bytecount = struct.unpack(b'>QQ', fp.read(16))
449 451 requireslen = struct.unpack(b'>H', fp.read(2))[0]
450 452 requires = fp.read(requireslen)
451 453
452 454 if not requires.endswith(b'\0'):
453 455 raise error.Abort(
454 456 _(
455 457 b'malformed stream clone bundle: '
456 458 b'requirements not properly encoded'
457 459 )
458 460 )
459 461
460 462 requirements = set(requires.rstrip(b'\0').split(b','))
461 463
462 464 return filecount, bytecount, requirements
463 465
464 466
465 467 def applybundlev1(repo, fp):
466 468 """Apply the content from a stream clone bundle version 1.
467 469
468 470 We assume the 4 byte header has been read and validated and the file handle
469 471 is at the 2 byte compression identifier.
470 472 """
471 473 if len(repo):
472 474 raise error.Abort(
473 475 _(b'cannot apply stream clone bundle on non-empty repo')
474 476 )
475 477
476 478 filecount, bytecount, requirements = readbundle1header(fp)
477 479 missingreqs = requirements - repo.supportedformats
478 480 if missingreqs:
479 481 raise error.Abort(
480 482 _(b'unable to apply stream clone: unsupported format: %s')
481 483 % b', '.join(sorted(missingreqs))
482 484 )
483 485
484 486 consumev1(repo, fp, filecount, bytecount)
485 487
486 488
487 489 class streamcloneapplier(object):
488 490 """Class to manage applying streaming clone bundles.
489 491
490 492 We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle
491 493 readers to perform bundle type-specific functionality.
492 494 """
493 495
494 496 def __init__(self, fh):
495 497 self._fh = fh
496 498
497 499 def apply(self, repo):
498 500 return applybundlev1(repo, self._fh)
499 501
500 502
501 503 # type of file to stream
502 504 _fileappend = 0 # append only file
503 505 _filefull = 1 # full snapshot file
504 506
505 507 # Source of the file
506 508 _srcstore = b's' # store (svfs)
507 509 _srccache = b'c' # cache (cache)
508 510
509 511 # This is it's own function so extensions can override it.
510 512 def _walkstreamfullstorefiles(repo):
511 513 """list snapshot file from the store"""
512 514 fnames = []
513 515 if not repo.publishing():
514 516 fnames.append(b'phaseroots')
515 517 return fnames
516 518
517 519
518 520 def _filterfull(entry, copy, vfsmap):
519 521 """actually copy the snapshot files"""
520 522 src, name, ftype, data = entry
521 523 if ftype != _filefull:
522 524 return entry
523 525 return (src, name, ftype, copy(vfsmap[src].join(name)))
524 526
525 527
526 528 @contextlib.contextmanager
527 529 def maketempcopies():
528 530 """return a function to temporary copy file"""
529 531 files = []
530 532 try:
531 533
532 534 def copy(src):
533 535 fd, dst = pycompat.mkstemp()
534 536 os.close(fd)
535 537 files.append(dst)
536 538 util.copyfiles(src, dst, hardlink=True)
537 539 return dst
538 540
539 541 yield copy
540 542 finally:
541 543 for tmp in files:
542 544 util.tryunlink(tmp)
543 545
544 546
545 547 def _makemap(repo):
546 548 """make a (src -> vfs) map for the repo"""
547 549 vfsmap = {
548 550 _srcstore: repo.svfs,
549 551 _srccache: repo.cachevfs,
550 552 }
551 553 # we keep repo.vfs out of the on purpose, ther are too many danger there
552 554 # (eg: .hg/hgrc)
553 555 assert repo.vfs not in vfsmap.values()
554 556
555 557 return vfsmap
556 558
557 559
558 560 def _emit2(repo, entries, totalfilesize):
559 561 """actually emit the stream bundle"""
560 562 vfsmap = _makemap(repo)
561 563 progress = repo.ui.makeprogress(
562 564 _(b'bundle'), total=totalfilesize, unit=_(b'bytes')
563 565 )
564 566 progress.update(0)
565 567 with maketempcopies() as copy, progress:
566 568 # copy is delayed until we are in the try
567 569 entries = [_filterfull(e, copy, vfsmap) for e in entries]
568 570 yield None # this release the lock on the repository
569 571 seen = 0
570 572
571 573 for src, name, ftype, data in entries:
572 574 vfs = vfsmap[src]
573 575 yield src
574 576 yield util.uvarintencode(len(name))
575 577 if ftype == _fileappend:
576 578 fp = vfs(name)
577 579 size = data
578 580 elif ftype == _filefull:
579 581 fp = open(data, b'rb')
580 582 size = util.fstat(fp).st_size
581 583 try:
582 584 yield util.uvarintencode(size)
583 585 yield name
584 586 if size <= 65536:
585 587 chunks = (fp.read(size),)
586 588 else:
587 589 chunks = util.filechunkiter(fp, limit=size)
588 590 for chunk in chunks:
589 591 seen += len(chunk)
590 592 progress.update(seen)
591 593 yield chunk
592 594 finally:
593 595 fp.close()
594 596
595 597
598 def _test_sync_point_walk_1(repo):
599 """a function for synchronisation during tests"""
600
601
602 def _test_sync_point_walk_2(repo):
603 """a function for synchronisation during tests"""
604
605
596 606 def generatev2(repo, includes, excludes, includeobsmarkers):
597 607 """Emit content for version 2 of a streaming clone.
598 608
599 609 the data stream consists the following entries:
600 610 1) A char representing the file destination (eg: store or cache)
601 611 2) A varint containing the length of the filename
602 612 3) A varint containing the length of file data
603 613 4) N bytes containing the filename (the internal, store-agnostic form)
604 614 5) N bytes containing the file data
605 615
606 616 Returns a 3-tuple of (file count, file size, data iterator).
607 617 """
608 618
609 619 with repo.lock():
610 620
611 621 entries = []
612 622 totalfilesize = 0
613 623
614 624 matcher = None
615 625 if includes or excludes:
616 626 matcher = narrowspec.match(repo.root, includes, excludes)
617 627
618 628 repo.ui.debug(b'scanning\n')
619 629 for rl_type, name, ename, size in _walkstreamfiles(repo, matcher):
620 630 if size:
621 631 entries.append((_srcstore, name, _fileappend, size))
622 632 totalfilesize += size
623 633 for name in _walkstreamfullstorefiles(repo):
624 634 if repo.svfs.exists(name):
625 635 totalfilesize += repo.svfs.lstat(name).st_size
626 636 entries.append((_srcstore, name, _filefull, None))
627 637 if includeobsmarkers and repo.svfs.exists(b'obsstore'):
628 638 totalfilesize += repo.svfs.lstat(b'obsstore').st_size
629 639 entries.append((_srcstore, b'obsstore', _filefull, None))
630 640 for name in cacheutil.cachetocopy(repo):
631 641 if repo.cachevfs.exists(name):
632 642 totalfilesize += repo.cachevfs.lstat(name).st_size
633 643 entries.append((_srccache, name, _filefull, None))
634 644
635 645 chunks = _emit2(repo, entries, totalfilesize)
636 646 first = next(chunks)
637 647 assert first is None
648 _test_sync_point_walk_1(repo)
649 _test_sync_point_walk_2(repo)
638 650
639 651 return len(entries), totalfilesize, chunks
640 652
641 653
642 654 @contextlib.contextmanager
643 655 def nested(*ctxs):
644 656 this = ctxs[0]
645 657 rest = ctxs[1:]
646 658 with this:
647 659 if rest:
648 660 with nested(*rest):
649 661 yield
650 662 else:
651 663 yield
652 664
653 665
654 666 def consumev2(repo, fp, filecount, filesize):
655 667 """Apply the contents from a version 2 streaming clone.
656 668
657 669 Data is read from an object that only needs to provide a ``read(size)``
658 670 method.
659 671 """
660 672 with repo.lock():
661 673 repo.ui.status(
662 674 _(b'%d files to transfer, %s of data\n')
663 675 % (filecount, util.bytecount(filesize))
664 676 )
665 677
666 678 start = util.timer()
667 679 progress = repo.ui.makeprogress(
668 680 _(b'clone'), total=filesize, unit=_(b'bytes')
669 681 )
670 682 progress.update(0)
671 683
672 684 vfsmap = _makemap(repo)
673 685
674 686 with repo.transaction(b'clone'):
675 687 ctxs = (vfs.backgroundclosing(repo.ui) for vfs in vfsmap.values())
676 688 with nested(*ctxs):
677 689 for i in range(filecount):
678 690 src = util.readexactly(fp, 1)
679 691 vfs = vfsmap[src]
680 692 namelen = util.uvarintdecodestream(fp)
681 693 datalen = util.uvarintdecodestream(fp)
682 694
683 695 name = util.readexactly(fp, namelen)
684 696
685 697 if repo.ui.debugflag:
686 698 repo.ui.debug(
687 699 b'adding [%s] %s (%s)\n'
688 700 % (src, name, util.bytecount(datalen))
689 701 )
690 702
691 703 with vfs(name, b'w') as ofp:
692 704 for chunk in util.filechunkiter(fp, limit=datalen):
693 705 progress.increment(step=len(chunk))
694 706 ofp.write(chunk)
695 707
696 708 # force @filecache properties to be reloaded from
697 709 # streamclone-ed file at next access
698 710 repo.invalidate(clearfilecache=True)
699 711
700 712 elapsed = util.timer() - start
701 713 if elapsed <= 0:
702 714 elapsed = 0.001
703 715 repo.ui.status(
704 716 _(b'transferred %s in %.1f seconds (%s/sec)\n')
705 717 % (
706 718 util.bytecount(progress.pos),
707 719 elapsed,
708 720 util.bytecount(progress.pos / elapsed),
709 721 )
710 722 )
711 723 progress.complete()
712 724
713 725
714 726 def applybundlev2(repo, fp, filecount, filesize, requirements):
715 727 from . import localrepo
716 728
717 729 missingreqs = [r for r in requirements if r not in repo.supported]
718 730 if missingreqs:
719 731 raise error.Abort(
720 732 _(b'unable to apply stream clone: unsupported format: %s')
721 733 % b', '.join(sorted(missingreqs))
722 734 )
723 735
724 736 consumev2(repo, fp, filecount, filesize)
725 737
726 738 # new requirements = old non-format requirements +
727 739 # new format-related remote requirements
728 740 # requirements from the streamed-in repository
729 741 repo.requirements = set(requirements) | (
730 742 repo.requirements - repo.supportedformats
731 743 )
732 744 repo.svfs.options = localrepo.resolvestorevfsoptions(
733 745 repo.ui, repo.requirements, repo.features
734 746 )
735 747 scmutil.writereporequirements(repo)
@@ -1,638 +1,671 b''
1 1 #require serve no-reposimplestore no-chg
2 2
3 3 #testcases stream-legacy stream-bundle2
4 4
5 5 #if stream-legacy
6 6 $ cat << EOF >> $HGRCPATH
7 7 > [server]
8 8 > bundle2.stream = no
9 9 > EOF
10 10 #endif
11 11
12 12 Initialize repository
13 13 the status call is to check for issue5130
14 14
15 15 $ hg init server
16 16 $ cd server
17 17 $ touch foo
18 18 $ hg -q commit -A -m initial
19 19 >>> for i in range(1024):
20 20 ... with open(str(i), 'wb') as fh:
21 21 ... fh.write(b"%d" % i) and None
22 22 $ hg -q commit -A -m 'add a lot of files'
23 23 $ hg st
24 24 $ hg --config server.uncompressed=false serve -p $HGPORT -d --pid-file=hg.pid
25 25 $ cat hg.pid > $DAEMON_PIDS
26 26 $ cd ..
27 27
28 28 Cannot stream clone when server.uncompressed is set
29 29
30 30 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=stream_out'
31 31 200 Script output follows
32 32
33 33 1
34 34
35 35 #if stream-legacy
36 36 $ hg debugcapabilities http://localhost:$HGPORT
37 37 Main capabilities:
38 38 batch
39 39 branchmap
40 40 $USUAL_BUNDLE2_CAPS_SERVER$
41 41 changegroupsubset
42 42 compression=$BUNDLE2_COMPRESSIONS$
43 43 getbundle
44 44 httpheader=1024
45 45 httpmediatype=0.1rx,0.1tx,0.2tx
46 46 known
47 47 lookup
48 48 pushkey
49 49 unbundle=HG10GZ,HG10BZ,HG10UN
50 50 unbundlehash
51 51 Bundle2 capabilities:
52 52 HG20
53 53 bookmarks
54 54 changegroup
55 55 01
56 56 02
57 57 checkheads
58 58 related
59 59 digests
60 60 md5
61 61 sha1
62 62 sha512
63 63 error
64 64 abort
65 65 unsupportedcontent
66 66 pushraced
67 67 pushkey
68 68 hgtagsfnodes
69 69 listkeys
70 70 phases
71 71 heads
72 72 pushkey
73 73 remote-changegroup
74 74 http
75 75 https
76 76
77 77 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
78 78 warning: stream clone requested but server has them disabled
79 79 requesting all changes
80 80 adding changesets
81 81 adding manifests
82 82 adding file changes
83 83 added 2 changesets with 1025 changes to 1025 files
84 84 new changesets 96ee1d7354c4:c17445101a72
85 85
86 86 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
87 87 200 Script output follows
88 88 content-type: application/mercurial-0.2
89 89
90 90
91 91 $ f --size body --hexdump --bytes 100
92 92 body: size=232
93 93 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
94 94 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...|
95 95 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest|
96 96 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
97 97 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
98 98 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
99 99 0060: 69 73 20 66 |is f|
100 100
101 101 #endif
102 102 #if stream-bundle2
103 103 $ hg debugcapabilities http://localhost:$HGPORT
104 104 Main capabilities:
105 105 batch
106 106 branchmap
107 107 $USUAL_BUNDLE2_CAPS_SERVER$
108 108 changegroupsubset
109 109 compression=$BUNDLE2_COMPRESSIONS$
110 110 getbundle
111 111 httpheader=1024
112 112 httpmediatype=0.1rx,0.1tx,0.2tx
113 113 known
114 114 lookup
115 115 pushkey
116 116 unbundle=HG10GZ,HG10BZ,HG10UN
117 117 unbundlehash
118 118 Bundle2 capabilities:
119 119 HG20
120 120 bookmarks
121 121 changegroup
122 122 01
123 123 02
124 124 checkheads
125 125 related
126 126 digests
127 127 md5
128 128 sha1
129 129 sha512
130 130 error
131 131 abort
132 132 unsupportedcontent
133 133 pushraced
134 134 pushkey
135 135 hgtagsfnodes
136 136 listkeys
137 137 phases
138 138 heads
139 139 pushkey
140 140 remote-changegroup
141 141 http
142 142 https
143 143
144 144 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
145 145 warning: stream clone requested but server has them disabled
146 146 requesting all changes
147 147 adding changesets
148 148 adding manifests
149 149 adding file changes
150 150 added 2 changesets with 1025 changes to 1025 files
151 151 new changesets 96ee1d7354c4:c17445101a72
152 152
153 153 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
154 154 200 Script output follows
155 155 content-type: application/mercurial-0.2
156 156
157 157
158 158 $ f --size body --hexdump --bytes 100
159 159 body: size=232
160 160 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
161 161 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...|
162 162 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest|
163 163 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
164 164 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
165 165 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
166 166 0060: 69 73 20 66 |is f|
167 167
168 168 #endif
169 169
170 170 $ killdaemons.py
171 171 $ cd server
172 172 $ hg serve -p $HGPORT -d --pid-file=hg.pid --error errors.txt
173 173 $ cat hg.pid > $DAEMON_PIDS
174 174 $ cd ..
175 175
176 176 Basic clone
177 177
178 178 #if stream-legacy
179 179 $ hg clone --stream -U http://localhost:$HGPORT clone1
180 180 streaming all changes
181 181 1027 files to transfer, 96.3 KB of data (no-zstd !)
182 182 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
183 183 1027 files to transfer, 93.5 KB of data (zstd !)
184 184 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
185 185 searching for changes
186 186 no changes found
187 187 $ cat server/errors.txt
188 188 #endif
189 189 #if stream-bundle2
190 190 $ hg clone --stream -U http://localhost:$HGPORT clone1
191 191 streaming all changes
192 192 1030 files to transfer, 96.5 KB of data (no-zstd !)
193 193 transferred 96.5 KB in * seconds (*/sec) (glob) (no-zstd !)
194 194 1030 files to transfer, 93.6 KB of data (zstd !)
195 195 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
196 196
197 197 $ ls -1 clone1/.hg/cache
198 198 branch2-base
199 199 branch2-immutable
200 200 branch2-served
201 201 branch2-served.hidden
202 202 branch2-visible
203 203 branch2-visible-hidden
204 204 hgtagsfnodes1
205 205 rbc-names-v1
206 206 rbc-revs-v1
207 207 tags2
208 208 tags2-served
209 209 $ cat server/errors.txt
210 210 #endif
211 211
212 212 getbundle requests with stream=1 are uncompressed
213 213
214 214 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto '0.1 0.2 comp=zlib,none' --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
215 215 200 Script output follows
216 216 content-type: application/mercurial-0.2
217 217
218 218
219 219 $ f --size --hex --bytes 256 body
220 220 body: size=112262 (no-zstd !)
221 221 body: size=109410 (zstd no-rust !)
222 222 body: size=109431 (rust !)
223 223 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
224 224 0010: 7f 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| (no-zstd !)
225 225 0020: 05 09 04 0c 44 62 79 74 65 63 6f 75 6e 74 39 38 |....Dbytecount98| (no-zstd !)
226 226 0030: 37 37 35 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |775filecount1030| (no-zstd !)
227 227 0010: 99 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| (zstd no-rust !)
228 228 0010: ae 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| (rust !)
229 229 0020: 05 09 04 0c 5e 62 79 74 65 63 6f 75 6e 74 39 35 |....^bytecount95| (zstd no-rust !)
230 230 0020: 05 09 04 0c 73 62 79 74 65 63 6f 75 6e 74 39 35 |....sbytecount95| (rust !)
231 231 0030: 38 39 37 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |897filecount1030| (zstd !)
232 232 0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote|
233 233 0050: 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 25 |ncode%2Cfncache%|
234 234 0060: 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 32 |2Cgeneraldelta%2|
235 235 0070: 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 |Crevlogv1%2Cspar| (no-zstd !)
236 236 0080: 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 |serevlog%2Cstore| (no-zstd !)
237 237 0090: 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 |....s.Bdata/0.i.| (no-zstd !)
238 238 00a0: 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 |................| (no-zstd !)
239 239 00b0: 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 |................| (no-zstd !)
240 240 00c0: 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 |)c.I.#....Vg.g,i| (no-zstd !)
241 241 00d0: d1 ec 39 00 00 00 00 00 00 00 00 00 00 00 00 75 |..9............u| (no-zstd !)
242 242 00e0: 30 73 08 42 64 61 74 61 2f 31 2e 69 00 03 00 01 |0s.Bdata/1.i....| (no-zstd !)
243 243 00f0: 00 00 00 00 00 00 00 02 00 00 00 01 00 00 00 00 |................| (no-zstd !)
244 244 0070: 43 72 65 76 6c 6f 67 2d 63 6f 6d 70 72 65 73 73 |Crevlog-compress| (zstd no-rust !)
245 245 0070: 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f 64 65 |Cpersistent-node| (rust !)
246 246 0080: 69 6f 6e 2d 7a 73 74 64 25 32 43 72 65 76 6c 6f |ion-zstd%2Crevlo| (zstd no-rust !)
247 247 0080: 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 6f 6d |map%2Crevlog-com| (rust !)
248 248 0090: 67 76 31 25 32 43 73 70 61 72 73 65 72 65 76 6c |gv1%2Csparserevl| (zstd no-rust !)
249 249 0090: 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 32 43 |pression-zstd%2C| (rust !)
250 250 00a0: 6f 67 25 32 43 73 74 6f 72 65 00 00 80 00 73 08 |og%2Cstore....s.| (zstd no-rust !)
251 251 00a0: 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 73 |revlogv1%2Cspars| (rust !)
252 252 00b0: 42 64 61 74 61 2f 30 2e 69 00 03 00 01 00 00 00 |Bdata/0.i.......| (zstd no-rust !)
253 253 00b0: 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 00 |erevlog%2Cstore.| (rust !)
254 254 00c0: 00 00 00 00 02 00 00 00 01 00 00 00 00 00 00 00 |................| (zstd no-rust !)
255 255 00c0: 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 03 |...s.Bdata/0.i..| (rust !)
256 256 00d0: 01 ff ff ff ff ff ff ff ff 80 29 63 a0 49 d3 23 |..........)c.I.#| (zstd no-rust !)
257 257 00d0: 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 00 |................| (rust !)
258 258 00e0: 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 00 00 00 |....Vg.g,i..9...| (zstd no-rust !)
259 259 00e0: 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 29 |...............)| (rust !)
260 260 00f0: 00 00 00 00 00 00 00 00 00 75 30 73 08 42 64 61 |.........u0s.Bda| (zstd no-rust !)
261 261 00f0: 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 d1 |c.I.#....Vg.g,i.| (rust !)
262 262
263 263 --uncompressed is an alias to --stream
264 264
265 265 #if stream-legacy
266 266 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
267 267 streaming all changes
268 268 1027 files to transfer, 96.3 KB of data (no-zstd !)
269 269 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
270 270 1027 files to transfer, 93.5 KB of data (zstd !)
271 271 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
272 272 searching for changes
273 273 no changes found
274 274 #endif
275 275 #if stream-bundle2
276 276 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
277 277 streaming all changes
278 278 1030 files to transfer, 96.5 KB of data (no-zstd !)
279 279 transferred 96.5 KB in * seconds (* */sec) (glob) (no-zstd !)
280 280 1030 files to transfer, 93.6 KB of data (zstd !)
281 281 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
282 282 #endif
283 283
284 284 Clone with background file closing enabled
285 285
286 286 #if stream-legacy
287 287 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
288 288 using http://localhost:$HGPORT/
289 289 sending capabilities command
290 290 sending branchmap command
291 291 streaming all changes
292 292 sending stream_out command
293 293 1027 files to transfer, 96.3 KB of data (no-zstd !)
294 294 1027 files to transfer, 93.5 KB of data (zstd !)
295 295 starting 4 threads for background file closing
296 296 updating the branch cache
297 297 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
298 298 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
299 299 query 1; heads
300 300 sending batch command
301 301 searching for changes
302 302 all remote heads known locally
303 303 no changes found
304 304 sending getbundle command
305 305 bundle2-input-bundle: with-transaction
306 306 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
307 307 bundle2-input-part: "phase-heads" supported
308 308 bundle2-input-part: total payload size 24
309 309 bundle2-input-bundle: 2 parts total
310 310 checking for updated bookmarks
311 311 updating the branch cache
312 312 (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
313 313 #endif
314 314 #if stream-bundle2
315 315 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
316 316 using http://localhost:$HGPORT/
317 317 sending capabilities command
318 318 query 1; heads
319 319 sending batch command
320 320 streaming all changes
321 321 sending getbundle command
322 322 bundle2-input-bundle: with-transaction
323 323 bundle2-input-part: "stream2" (params: 3 mandatory) supported
324 324 applying stream bundle
325 325 1030 files to transfer, 96.5 KB of data (no-zstd !)
326 326 1030 files to transfer, 93.6 KB of data (zstd !)
327 327 starting 4 threads for background file closing
328 328 starting 4 threads for background file closing
329 329 updating the branch cache
330 330 transferred 96.5 KB in * seconds (* */sec) (glob) (no-zstd !)
331 331 bundle2-input-part: total payload size 112094 (no-zstd !)
332 332 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
333 333 bundle2-input-part: total payload size 109216 (zstd !)
334 334 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
335 335 bundle2-input-bundle: 2 parts total
336 336 checking for updated bookmarks
337 337 updating the branch cache
338 338 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
339 339 #endif
340 340
341 341 Cannot stream clone when there are secret changesets
342 342
343 343 $ hg -R server phase --force --secret -r tip
344 344 $ hg clone --stream -U http://localhost:$HGPORT secret-denied
345 345 warning: stream clone requested but server has them disabled
346 346 requesting all changes
347 347 adding changesets
348 348 adding manifests
349 349 adding file changes
350 350 added 1 changesets with 1 changes to 1 files
351 351 new changesets 96ee1d7354c4
352 352
353 353 $ killdaemons.py
354 354
355 355 Streaming of secrets can be overridden by server config
356 356
357 357 $ cd server
358 358 $ hg serve --config server.uncompressedallowsecret=true -p $HGPORT -d --pid-file=hg.pid
359 359 $ cat hg.pid > $DAEMON_PIDS
360 360 $ cd ..
361 361
362 362 #if stream-legacy
363 363 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
364 364 streaming all changes
365 365 1027 files to transfer, 96.3 KB of data (no-zstd !)
366 366 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
367 367 1027 files to transfer, 93.5 KB of data (zstd !)
368 368 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
369 369 searching for changes
370 370 no changes found
371 371 #endif
372 372 #if stream-bundle2
373 373 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
374 374 streaming all changes
375 375 1030 files to transfer, 96.5 KB of data (no-zstd !)
376 376 transferred 96.5 KB in * seconds (* */sec) (glob) (no-zstd !)
377 377 1030 files to transfer, 93.6 KB of data (zstd !)
378 378 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
379 379 #endif
380 380
381 381 $ killdaemons.py
382 382
383 383 Verify interaction between preferuncompressed and secret presence
384 384
385 385 $ cd server
386 386 $ hg serve --config server.preferuncompressed=true -p $HGPORT -d --pid-file=hg.pid
387 387 $ cat hg.pid > $DAEMON_PIDS
388 388 $ cd ..
389 389
390 390 $ hg clone -U http://localhost:$HGPORT preferuncompressed-secret
391 391 requesting all changes
392 392 adding changesets
393 393 adding manifests
394 394 adding file changes
395 395 added 1 changesets with 1 changes to 1 files
396 396 new changesets 96ee1d7354c4
397 397
398 398 $ killdaemons.py
399 399
400 400 Clone not allowed when full bundles disabled and can't serve secrets
401 401
402 402 $ cd server
403 403 $ hg serve --config server.disablefullbundle=true -p $HGPORT -d --pid-file=hg.pid
404 404 $ cat hg.pid > $DAEMON_PIDS
405 405 $ cd ..
406 406
407 407 $ hg clone --stream http://localhost:$HGPORT secret-full-disabled
408 408 warning: stream clone requested but server has them disabled
409 409 requesting all changes
410 410 remote: abort: server has pull-based clones disabled
411 411 abort: pull failed on remote
412 412 (remove --pull if specified or upgrade Mercurial)
413 413 [100]
414 414
415 415 Local stream clone with secrets involved
416 416 (This is just a test over behavior: if you have access to the repo's files,
417 417 there is no security so it isn't important to prevent a clone here.)
418 418
419 419 $ hg clone -U --stream server local-secret
420 420 warning: stream clone requested but server has them disabled
421 421 requesting all changes
422 422 adding changesets
423 423 adding manifests
424 424 adding file changes
425 425 added 1 changesets with 1 changes to 1 files
426 426 new changesets 96ee1d7354c4
427 427
428 428 Stream clone while repo is changing:
429 429
430 430 $ mkdir changing
431 431 $ cd changing
432 432
433 433 extension for delaying the server process so we reliably can modify the repo
434 434 while cloning
435 435
436 $ cat > delayer.py <<EOF
437 > import time
438 > from mercurial import extensions, vfs
439 > def __call__(orig, self, path, *args, **kwargs):
440 > if path == 'data/f1.i':
441 > time.sleep(2)
442 > return orig(self, path, *args, **kwargs)
443 > extensions.wrapfunction(vfs.vfs, '__call__', __call__)
436 $ cat > stream_steps.py <<EOF
437 > import os
438 > import sys
439 > from mercurial import (
440 > encoding,
441 > extensions,
442 > streamclone,
443 > testing,
444 > )
445 > WALKED_FILE_1 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_1']
446 > WALKED_FILE_2 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_2']
447 >
448 > def _test_sync_point_walk_1(orig, repo):
449 > testing.write_file(WALKED_FILE_1)
450 >
451 > def _test_sync_point_walk_2(orig, repo):
452 > assert repo._currentlock(repo._lockref) is None
453 > testing.wait_file(WALKED_FILE_2)
454 >
455 > extensions.wrapfunction(
456 > streamclone,
457 > '_test_sync_point_walk_1',
458 > _test_sync_point_walk_1
459 > )
460 > extensions.wrapfunction(
461 > streamclone,
462 > '_test_sync_point_walk_2',
463 > _test_sync_point_walk_2
464 > )
444 465 > EOF
445 466
446 467 prepare repo with small and big file to cover both code paths in emitrevlogdata
447 468
448 469 $ hg init repo
449 470 $ touch repo/f1
450 471 $ $TESTDIR/seq.py 50000 > repo/f2
451 472 $ hg -R repo ci -Aqm "0"
452 $ hg serve -R repo -p $HGPORT1 -d --pid-file=hg.pid --config extensions.delayer=delayer.py
473 $ HG_TEST_STREAM_WALKED_FILE_1="$TESTTMP/sync_file_walked_1"
474 $ export HG_TEST_STREAM_WALKED_FILE_1
475 $ HG_TEST_STREAM_WALKED_FILE_2="$TESTTMP/sync_file_walked_2"
476 $ export HG_TEST_STREAM_WALKED_FILE_2
477 $ HG_TEST_STREAM_WALKED_FILE_3="$TESTTMP/sync_file_walked_3"
478 $ export HG_TEST_STREAM_WALKED_FILE_3
479 # $ cat << EOF >> $HGRCPATH
480 # > [hooks]
481 # > pre-clone=rm -f "$TESTTMP/sync_file_walked_*"
482 # > EOF
483 $ hg serve -R repo -p $HGPORT1 -d --error errors.log --pid-file=hg.pid --config extensions.stream_steps="$RUNTESTDIR/testlib/ext-stream-clone-steps.py"
453 484 $ cat hg.pid >> $DAEMON_PIDS
454 485
455 486 clone while modifying the repo between stating file with write lock and
456 487 actually serving file content
457 488
458 $ hg clone -q --stream -U http://localhost:$HGPORT1 clone &
459 $ sleep 1
489 $ (hg clone -q --stream -U http://localhost:$HGPORT1 clone; touch "$HG_TEST_STREAM_WALKED_FILE_3") &
490 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
460 491 $ echo >> repo/f1
461 492 $ echo >> repo/f2
462 493 $ hg -R repo ci -m "1" --config ui.timeout.warn=-1
463 $ wait
494 $ touch $HG_TEST_STREAM_WALKED_FILE_2
495 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
464 496 $ hg -R clone id
465 497 000000000000
498 $ cat errors.log
466 499 $ cd ..
467 500
468 501 Stream repository with bookmarks
469 502 --------------------------------
470 503
471 504 (revert introduction of secret changeset)
472 505
473 506 $ hg -R server phase --draft 'secret()'
474 507
475 508 add a bookmark
476 509
477 510 $ hg -R server bookmark -r tip some-bookmark
478 511
479 512 clone it
480 513
481 514 #if stream-legacy
482 515 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
483 516 streaming all changes
484 517 1027 files to transfer, 96.3 KB of data (no-zstd !)
485 518 transferred 96.3 KB in * seconds (*) (glob) (no-zstd !)
486 519 1027 files to transfer, 93.5 KB of data (zstd !)
487 520 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
488 521 searching for changes
489 522 no changes found
490 523 updating to branch default
491 524 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
492 525 #endif
493 526 #if stream-bundle2
494 527 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
495 528 streaming all changes
496 529 1033 files to transfer, 96.6 KB of data (no-zstd !)
497 530 transferred 96.6 KB in * seconds (* */sec) (glob) (no-zstd !)
498 531 1033 files to transfer, 93.8 KB of data (zstd !)
499 532 transferred 93.8 KB in * seconds (* */sec) (glob) (zstd !)
500 533 updating to branch default
501 534 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
502 535 #endif
503 536 $ hg -R with-bookmarks bookmarks
504 537 some-bookmark 1:c17445101a72
505 538
506 539 Stream repository with phases
507 540 -----------------------------
508 541
509 542 Clone as publishing
510 543
511 544 $ hg -R server phase -r 'all()'
512 545 0: draft
513 546 1: draft
514 547
515 548 #if stream-legacy
516 549 $ hg clone --stream http://localhost:$HGPORT phase-publish
517 550 streaming all changes
518 551 1027 files to transfer, 96.3 KB of data (no-zstd !)
519 552 transferred 96.3 KB in * seconds (*) (glob) (no-zstd !)
520 553 1027 files to transfer, 93.5 KB of data (zstd !)
521 554 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
522 555 searching for changes
523 556 no changes found
524 557 updating to branch default
525 558 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
526 559 #endif
527 560 #if stream-bundle2
528 561 $ hg clone --stream http://localhost:$HGPORT phase-publish
529 562 streaming all changes
530 563 1033 files to transfer, 96.6 KB of data (no-zstd !)
531 564 transferred 96.6 KB in * seconds (* */sec) (glob) (no-zstd !)
532 565 1033 files to transfer, 93.8 KB of data (zstd !)
533 566 transferred 93.8 KB in * seconds (* */sec) (glob) (zstd !)
534 567 updating to branch default
535 568 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
536 569 #endif
537 570 $ hg -R phase-publish phase -r 'all()'
538 571 0: public
539 572 1: public
540 573
541 574 Clone as non publishing
542 575
543 576 $ cat << EOF >> server/.hg/hgrc
544 577 > [phases]
545 578 > publish = False
546 579 > EOF
547 580 $ killdaemons.py
548 581 $ hg -R server serve -p $HGPORT -d --pid-file=hg.pid
549 582 $ cat hg.pid > $DAEMON_PIDS
550 583
551 584 #if stream-legacy
552 585
553 586 With v1 of the stream protocol, changeset are always cloned as public. It make
554 587 stream v1 unsuitable for non-publishing repository.
555 588
556 589 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
557 590 streaming all changes
558 591 1027 files to transfer, 96.3 KB of data (no-zstd !)
559 592 transferred 96.3 KB in * seconds (* */sec) (glob) (no-zstd !)
560 593 1027 files to transfer, 93.5 KB of data (zstd !)
561 594 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
562 595 searching for changes
563 596 no changes found
564 597 updating to branch default
565 598 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
566 599 $ hg -R phase-no-publish phase -r 'all()'
567 600 0: public
568 601 1: public
569 602 #endif
570 603 #if stream-bundle2
571 604 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
572 605 streaming all changes
573 606 1034 files to transfer, 96.7 KB of data (no-zstd !)
574 607 transferred 96.7 KB in * seconds (* */sec) (glob) (no-zstd !)
575 608 1034 files to transfer, 93.9 KB of data (zstd !)
576 609 transferred 93.9 KB in * seconds (* */sec) (glob) (zstd !)
577 610 updating to branch default
578 611 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
579 612 $ hg -R phase-no-publish phase -r 'all()'
580 613 0: draft
581 614 1: draft
582 615 #endif
583 616
584 617 $ killdaemons.py
585 618
586 619 #if stream-legacy
587 620
588 621 With v1 of the stream protocol, changeset are always cloned as public. There's
589 622 no obsolescence markers exchange in stream v1.
590 623
591 624 #endif
592 625 #if stream-bundle2
593 626
594 627 Stream repository with obsolescence
595 628 -----------------------------------
596 629
597 630 Clone non-publishing with obsolescence
598 631
599 632 $ cat >> $HGRCPATH << EOF
600 633 > [experimental]
601 634 > evolution=all
602 635 > EOF
603 636
604 637 $ cd server
605 638 $ echo foo > foo
606 639 $ hg -q commit -m 'about to be pruned'
607 640 $ hg debugobsolete `hg log -r . -T '{node}'` -d '0 0' -u test --record-parents
608 641 1 new obsolescence markers
609 642 obsoleted 1 changesets
610 643 $ hg up null -q
611 644 $ hg log -T '{rev}: {phase}\n'
612 645 1: draft
613 646 0: draft
614 647 $ hg serve -p $HGPORT -d --pid-file=hg.pid
615 648 $ cat hg.pid > $DAEMON_PIDS
616 649 $ cd ..
617 650
618 651 $ hg clone -U --stream http://localhost:$HGPORT with-obsolescence
619 652 streaming all changes
620 653 1035 files to transfer, 97.1 KB of data (no-zstd !)
621 654 transferred 97.1 KB in * seconds (* */sec) (glob) (no-zstd !)
622 655 1035 files to transfer, 94.3 KB of data (zstd !)
623 656 transferred 94.3 KB in * seconds (* */sec) (glob) (zstd !)
624 657 $ hg -R with-obsolescence log -T '{rev}: {phase}\n'
625 658 1: draft
626 659 0: draft
627 660 $ hg debugobsolete -R with-obsolescence
628 661 50382b884f66690b7045cac93a540cba4d4c906f 0 {c17445101a72edac06facd130d14808dfbd5c7c2} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
629 662
630 663 $ hg clone -U --stream --config experimental.evolution=0 http://localhost:$HGPORT with-obsolescence-no-evolution
631 664 streaming all changes
632 665 remote: abort: server has obsolescence markers, but client cannot receive them via stream clone
633 666 abort: pull failed on remote
634 667 [100]
635 668
636 669 $ killdaemons.py
637 670
638 671 #endif
General Comments 0
You need to be logged in to leave comments. Login now