##// END OF EJS Templates
streamclone: also stream caches to the client...
Boris Feld -
r35785:5f5fb279 default
parent child Browse files
Show More
@@ -11,10 +11,12 b' import contextlib'
11 11 import os
12 12 import struct
13 13 import tempfile
14 import warnings
14 15
15 16 from .i18n import _
16 17 from . import (
17 18 branchmap,
19 cacheutil,
18 20 error,
19 21 phases,
20 22 store,
@@ -435,6 +437,10 b' class streamcloneapplier(object):'
435 437 _fileappend = 0 # append only file
436 438 _filefull = 1 # full snapshot file
437 439
440 # Source of the file
441 _srcstore = 's' # store (svfs)
442 _srccache = 'c' # cache (cache)
443
438 444 # This is it's own function so extensions can override it.
439 445 def _walkstreamfullstorefiles(repo):
440 446 """list snapshot file from the store"""
@@ -443,12 +449,12 b' def _walkstreamfullstorefiles(repo):'
443 449 fnames.append('phaseroots')
444 450 return fnames
445 451
446 def _filterfull(entry, copy, vfs):
452 def _filterfull(entry, copy, vfsmap):
447 453 """actually copy the snapshot files"""
448 name, ftype, data = entry
454 src, name, ftype, data = entry
449 455 if ftype != _filefull:
450 456 return entry
451 return (name, ftype, copy(vfs.join(name)))
457 return (src, name, ftype, copy(vfsmap[src].join(name)))
452 458
453 459 @contextlib.contextmanager
454 460 def maketempcopies():
@@ -466,19 +472,33 b' def maketempcopies():'
466 472 for tmp in files:
467 473 util.tryunlink(tmp)
468 474
475 def _makemap(repo):
476 """make a (src -> vfs) map for the repo"""
477 vfsmap = {
478 _srcstore: repo.svfs,
479 _srccache: repo.cachevfs,
480 }
481 # we keep repo.vfs out of the on purpose, ther are too many danger there
482 # (eg: .hg/hgrc)
483 assert repo.vfs not in vfsmap.values()
484
485 return vfsmap
486
469 487 def _emit(repo, entries, totalfilesize):
470 488 """actually emit the stream bundle"""
471 vfs = repo.svfs
489 vfsmap = _makemap(repo)
472 490 progress = repo.ui.progress
473 491 progress(_('bundle'), 0, total=totalfilesize, unit=_('bytes'))
474 492 with maketempcopies() as copy:
475 493 try:
476 494 # copy is delayed until we are in the try
477 entries = [_filterfull(e, copy, vfs) for e in entries]
495 entries = [_filterfull(e, copy, vfsmap) for e in entries]
478 496 yield None # this release the lock on the repository
479 497 seen = 0
480 498
481 for name, ftype, data in entries:
499 for src, name, ftype, data in entries:
500 vfs = vfsmap[src]
501 yield src
482 502 yield util.uvarintencode(len(name))
483 503 if ftype == _fileappend:
484 504 fp = vfs(name)
@@ -507,10 +527,11 b' def generatev2(repo):'
507 527 """Emit content for version 2 of a streaming clone.
508 528
509 529 the data stream consists the following entries:
510 1) A varint containing the length of the filename
511 2) A varint containing the length of file data
512 3) N bytes containing the filename (the internal, store-agnostic form)
513 4) N bytes containing the file data
530 1) A char representing the file destination (eg: store or cache)
531 2) A varint containing the length of the filename
532 3) A varint containing the length of file data
533 4) N bytes containing the filename (the internal, store-agnostic form)
534 5) N bytes containing the file data
514 535
515 536 Returns a 3-tuple of (file count, file size, data iterator).
516 537 """
@@ -523,12 +544,16 b' def generatev2(repo):'
523 544 repo.ui.debug('scanning\n')
524 545 for name, ename, size in _walkstreamfiles(repo):
525 546 if size:
526 entries.append((name, _fileappend, size))
547 entries.append((_srcstore, name, _fileappend, size))
527 548 totalfilesize += size
528 549 for name in _walkstreamfullstorefiles(repo):
529 550 if repo.svfs.exists(name):
530 551 totalfilesize += repo.svfs.lstat(name).st_size
531 entries.append((name, _filefull, None))
552 entries.append((_srcstore, name, _filefull, None))
553 for name in cacheutil.cachetocopy(repo):
554 if repo.cachevfs.exists(name):
555 totalfilesize += repo.cachevfs.lstat(name).st_size
556 entries.append((_srccache, name, _filefull, None))
532 557
533 558 chunks = _emit(repo, entries, totalfilesize)
534 559 first = next(chunks)
@@ -536,6 +561,16 b' def generatev2(repo):'
536 561
537 562 return len(entries), totalfilesize, chunks
538 563
564 @contextlib.contextmanager
565 def nested(*ctxs):
566 with warnings.catch_warnings():
567 # For some reason, Python decided 'nested' was deprecated without
568 # replacement. They officially advertised for filtering the deprecation
569 # warning for people who actually need the feature.
570 warnings.filterwarnings("ignore",category=DeprecationWarning)
571 with contextlib.nested(*ctxs):
572 yield
573
539 574 def consumev2(repo, fp, filecount, filesize):
540 575 """Apply the contents from a version 2 streaming clone.
541 576
@@ -552,19 +587,23 b' def consumev2(repo, fp, filecount, files'
552 587
553 588 progress(_('clone'), handledbytes, total=filesize, unit=_('bytes'))
554 589
555 vfs = repo.svfs
590 vfsmap = _makemap(repo)
556 591
557 592 with repo.transaction('clone'):
558 with vfs.backgroundclosing(repo.ui):
593 ctxs = (vfs.backgroundclosing(repo.ui)
594 for vfs in vfsmap.values())
595 with nested(*ctxs):
559 596 for i in range(filecount):
597 src = fp.read(1)
598 vfs = vfsmap[src]
560 599 namelen = util.uvarintdecodestream(fp)
561 600 datalen = util.uvarintdecodestream(fp)
562 601
563 602 name = fp.read(namelen)
564 603
565 604 if repo.ui.debugflag:
566 repo.ui.debug('adding %s (%s)\n' %
567 (name, util.bytecount(datalen)))
605 repo.ui.debug('adding [%s] %s (%s)\n' %
606 (src, name, util.bytecount(datalen)))
568 607
569 608 with vfs(name, 'w') as ofp:
570 609 for chunk in util.filechunkiter(fp, limit=datalen):
@@ -38,8 +38,13 b' Basic clone'
38 38 #if stream-bundle2
39 39 $ hg clone --stream -U http://localhost:$HGPORT clone1
40 40 streaming all changes
41 1027 files to transfer, 96.3 KB of data
42 transferred 96.3 KB in * seconds (* */sec) (glob)
41 1030 files to transfer, 96.4 KB of data
42 transferred 96.4 KB in * seconds (* */sec) (glob)
43
44 $ ls -1 clone1/.hg/cache
45 branch2-served
46 rbc-names-v1
47 rbc-revs-v1
43 48 #endif
44 49
45 50 --uncompressed is an alias to --stream
@@ -55,8 +60,8 b' Basic clone'
55 60 #if stream-bundle2
56 61 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
57 62 streaming all changes
58 1027 files to transfer, 96.3 KB of data
59 transferred 96.3 KB in * seconds (* */sec) (glob)
63 1030 files to transfer, 96.4 KB of data
64 transferred 96.4 KB in * seconds (* */sec) (glob)
60 65 #endif
61 66
62 67 Clone with background file closing enabled
@@ -95,10 +100,11 b' Clone with background file closing enabl'
95 100 bundle2-input-bundle: with-transaction
96 101 bundle2-input-part: "stream" (params: 4 mandatory) supported
97 102 applying stream bundle
98 1027 files to transfer, 96.3 KB of data
103 1030 files to transfer, 96.4 KB of data
104 starting 4 threads for background file closing
99 105 starting 4 threads for background file closing
100 transferred 96.3 KB in * seconds (* */sec) (glob)
101 bundle2-input-part: total payload size 110887
106 transferred 96.4 KB in * seconds (* */sec) (glob)
107 bundle2-input-part: total payload size 112077
102 108 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
103 109 bundle2-input-bundle: 1 parts total
104 110 checking for updated bookmarks
@@ -136,8 +142,8 b' Streaming of secrets can be overridden b'
136 142 #if stream-bundle2
137 143 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
138 144 streaming all changes
139 1027 files to transfer, 96.3 KB of data
140 transferred 96.3 KB in * seconds (* */sec) (glob)
145 1030 files to transfer, 96.4 KB of data
146 transferred 96.4 KB in * seconds (* */sec) (glob)
141 147 #endif
142 148
143 149 $ killdaemons.py
@@ -253,8 +259,8 b' clone it'
253 259 #if stream-bundle2
254 260 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
255 261 streaming all changes
256 1027 files to transfer, 96.3 KB of data
257 transferred 96.3 KB in * seconds (* */sec) (glob)
262 1033 files to transfer, 96.6 KB of data
263 transferred 96.6 KB in * seconds (* */sec) (glob)
258 264 updating to branch default
259 265 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
260 266 #endif
@@ -283,8 +289,8 b' Clone as publishing'
283 289 #if stream-bundle2
284 290 $ hg clone --stream http://localhost:$HGPORT phase-publish
285 291 streaming all changes
286 1027 files to transfer, 96.3 KB of data
287 transferred 96.3 KB in * seconds (* */sec) (glob)
292 1033 files to transfer, 96.6 KB of data
293 transferred 96.6 KB in * seconds (* */sec) (glob)
288 294 updating to branch default
289 295 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
290 296 #endif
@@ -318,8 +324,8 b' Clone as non publishing'
318 324 #if stream-bundle2
319 325 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
320 326 streaming all changes
321 1028 files to transfer, 96.4 KB of data
322 transferred 96.4 KB in * seconds (* */sec) (glob)
327 1034 files to transfer, 96.7 KB of data
328 transferred 96.7 KB in * seconds (* */sec) (glob)
323 329 updating to branch default
324 330 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
325 331 $ hg -R phase-no-publish phase -r 'all()'
General Comments 0
You need to be logged in to leave comments. Login now