##// END OF EJS Templates
streamclone: also stream caches to the client...
Boris Feld -
r35785:5f5fb279 default
parent child Browse files
Show More
@@ -11,10 +11,12 b' import contextlib'
11 import os
11 import os
12 import struct
12 import struct
13 import tempfile
13 import tempfile
14 import warnings
14
15
15 from .i18n import _
16 from .i18n import _
16 from . import (
17 from . import (
17 branchmap,
18 branchmap,
19 cacheutil,
18 error,
20 error,
19 phases,
21 phases,
20 store,
22 store,
@@ -435,6 +437,10 b' class streamcloneapplier(object):'
435 _fileappend = 0 # append only file
437 _fileappend = 0 # append only file
436 _filefull = 1 # full snapshot file
438 _filefull = 1 # full snapshot file
437
439
440 # Source of the file
441 _srcstore = 's' # store (svfs)
442 _srccache = 'c' # cache (cache)
443
438 # This is it's own function so extensions can override it.
444 # This is it's own function so extensions can override it.
439 def _walkstreamfullstorefiles(repo):
445 def _walkstreamfullstorefiles(repo):
440 """list snapshot file from the store"""
446 """list snapshot file from the store"""
@@ -443,12 +449,12 b' def _walkstreamfullstorefiles(repo):'
443 fnames.append('phaseroots')
449 fnames.append('phaseroots')
444 return fnames
450 return fnames
445
451
446 def _filterfull(entry, copy, vfs):
452 def _filterfull(entry, copy, vfsmap):
447 """actually copy the snapshot files"""
453 """actually copy the snapshot files"""
448 name, ftype, data = entry
454 src, name, ftype, data = entry
449 if ftype != _filefull:
455 if ftype != _filefull:
450 return entry
456 return entry
451 return (name, ftype, copy(vfs.join(name)))
457 return (src, name, ftype, copy(vfsmap[src].join(name)))
452
458
453 @contextlib.contextmanager
459 @contextlib.contextmanager
454 def maketempcopies():
460 def maketempcopies():
@@ -466,19 +472,33 b' def maketempcopies():'
466 for tmp in files:
472 for tmp in files:
467 util.tryunlink(tmp)
473 util.tryunlink(tmp)
468
474
475 def _makemap(repo):
476 """make a (src -> vfs) map for the repo"""
477 vfsmap = {
478 _srcstore: repo.svfs,
479 _srccache: repo.cachevfs,
480 }
481 # we keep repo.vfs out of the on purpose, ther are too many danger there
482 # (eg: .hg/hgrc)
483 assert repo.vfs not in vfsmap.values()
484
485 return vfsmap
486
469 def _emit(repo, entries, totalfilesize):
487 def _emit(repo, entries, totalfilesize):
470 """actually emit the stream bundle"""
488 """actually emit the stream bundle"""
471 vfs = repo.svfs
489 vfsmap = _makemap(repo)
472 progress = repo.ui.progress
490 progress = repo.ui.progress
473 progress(_('bundle'), 0, total=totalfilesize, unit=_('bytes'))
491 progress(_('bundle'), 0, total=totalfilesize, unit=_('bytes'))
474 with maketempcopies() as copy:
492 with maketempcopies() as copy:
475 try:
493 try:
476 # copy is delayed until we are in the try
494 # copy is delayed until we are in the try
477 entries = [_filterfull(e, copy, vfs) for e in entries]
495 entries = [_filterfull(e, copy, vfsmap) for e in entries]
478 yield None # this release the lock on the repository
496 yield None # this release the lock on the repository
479 seen = 0
497 seen = 0
480
498
481 for name, ftype, data in entries:
499 for src, name, ftype, data in entries:
500 vfs = vfsmap[src]
501 yield src
482 yield util.uvarintencode(len(name))
502 yield util.uvarintencode(len(name))
483 if ftype == _fileappend:
503 if ftype == _fileappend:
484 fp = vfs(name)
504 fp = vfs(name)
@@ -507,10 +527,11 b' def generatev2(repo):'
507 """Emit content for version 2 of a streaming clone.
527 """Emit content for version 2 of a streaming clone.
508
528
509 the data stream consists the following entries:
529 the data stream consists the following entries:
510 1) A varint containing the length of the filename
530 1) A char representing the file destination (eg: store or cache)
511 2) A varint containing the length of file data
531 2) A varint containing the length of the filename
512 3) N bytes containing the filename (the internal, store-agnostic form)
532 3) A varint containing the length of file data
513 4) N bytes containing the file data
533 4) N bytes containing the filename (the internal, store-agnostic form)
534 5) N bytes containing the file data
514
535
515 Returns a 3-tuple of (file count, file size, data iterator).
536 Returns a 3-tuple of (file count, file size, data iterator).
516 """
537 """
@@ -523,12 +544,16 b' def generatev2(repo):'
523 repo.ui.debug('scanning\n')
544 repo.ui.debug('scanning\n')
524 for name, ename, size in _walkstreamfiles(repo):
545 for name, ename, size in _walkstreamfiles(repo):
525 if size:
546 if size:
526 entries.append((name, _fileappend, size))
547 entries.append((_srcstore, name, _fileappend, size))
527 totalfilesize += size
548 totalfilesize += size
528 for name in _walkstreamfullstorefiles(repo):
549 for name in _walkstreamfullstorefiles(repo):
529 if repo.svfs.exists(name):
550 if repo.svfs.exists(name):
530 totalfilesize += repo.svfs.lstat(name).st_size
551 totalfilesize += repo.svfs.lstat(name).st_size
531 entries.append((name, _filefull, None))
552 entries.append((_srcstore, name, _filefull, None))
553 for name in cacheutil.cachetocopy(repo):
554 if repo.cachevfs.exists(name):
555 totalfilesize += repo.cachevfs.lstat(name).st_size
556 entries.append((_srccache, name, _filefull, None))
532
557
533 chunks = _emit(repo, entries, totalfilesize)
558 chunks = _emit(repo, entries, totalfilesize)
534 first = next(chunks)
559 first = next(chunks)
@@ -536,6 +561,16 b' def generatev2(repo):'
536
561
537 return len(entries), totalfilesize, chunks
562 return len(entries), totalfilesize, chunks
538
563
564 @contextlib.contextmanager
565 def nested(*ctxs):
566 with warnings.catch_warnings():
567 # For some reason, Python decided 'nested' was deprecated without
568 # replacement. They officially advertised for filtering the deprecation
569 # warning for people who actually need the feature.
570 warnings.filterwarnings("ignore",category=DeprecationWarning)
571 with contextlib.nested(*ctxs):
572 yield
573
539 def consumev2(repo, fp, filecount, filesize):
574 def consumev2(repo, fp, filecount, filesize):
540 """Apply the contents from a version 2 streaming clone.
575 """Apply the contents from a version 2 streaming clone.
541
576
@@ -552,19 +587,23 b' def consumev2(repo, fp, filecount, files'
552
587
553 progress(_('clone'), handledbytes, total=filesize, unit=_('bytes'))
588 progress(_('clone'), handledbytes, total=filesize, unit=_('bytes'))
554
589
555 vfs = repo.svfs
590 vfsmap = _makemap(repo)
556
591
557 with repo.transaction('clone'):
592 with repo.transaction('clone'):
558 with vfs.backgroundclosing(repo.ui):
593 ctxs = (vfs.backgroundclosing(repo.ui)
594 for vfs in vfsmap.values())
595 with nested(*ctxs):
559 for i in range(filecount):
596 for i in range(filecount):
597 src = fp.read(1)
598 vfs = vfsmap[src]
560 namelen = util.uvarintdecodestream(fp)
599 namelen = util.uvarintdecodestream(fp)
561 datalen = util.uvarintdecodestream(fp)
600 datalen = util.uvarintdecodestream(fp)
562
601
563 name = fp.read(namelen)
602 name = fp.read(namelen)
564
603
565 if repo.ui.debugflag:
604 if repo.ui.debugflag:
566 repo.ui.debug('adding %s (%s)\n' %
605 repo.ui.debug('adding [%s] %s (%s)\n' %
567 (name, util.bytecount(datalen)))
606 (src, name, util.bytecount(datalen)))
568
607
569 with vfs(name, 'w') as ofp:
608 with vfs(name, 'w') as ofp:
570 for chunk in util.filechunkiter(fp, limit=datalen):
609 for chunk in util.filechunkiter(fp, limit=datalen):
@@ -38,8 +38,13 b' Basic clone'
38 #if stream-bundle2
38 #if stream-bundle2
39 $ hg clone --stream -U http://localhost:$HGPORT clone1
39 $ hg clone --stream -U http://localhost:$HGPORT clone1
40 streaming all changes
40 streaming all changes
41 1027 files to transfer, 96.3 KB of data
41 1030 files to transfer, 96.4 KB of data
42 transferred 96.3 KB in * seconds (* */sec) (glob)
42 transferred 96.4 KB in * seconds (* */sec) (glob)
43
44 $ ls -1 clone1/.hg/cache
45 branch2-served
46 rbc-names-v1
47 rbc-revs-v1
43 #endif
48 #endif
44
49
45 --uncompressed is an alias to --stream
50 --uncompressed is an alias to --stream
@@ -55,8 +60,8 b' Basic clone'
55 #if stream-bundle2
60 #if stream-bundle2
56 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
61 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
57 streaming all changes
62 streaming all changes
58 1027 files to transfer, 96.3 KB of data
63 1030 files to transfer, 96.4 KB of data
59 transferred 96.3 KB in * seconds (* */sec) (glob)
64 transferred 96.4 KB in * seconds (* */sec) (glob)
60 #endif
65 #endif
61
66
62 Clone with background file closing enabled
67 Clone with background file closing enabled
@@ -95,10 +100,11 b' Clone with background file closing enabl'
95 bundle2-input-bundle: with-transaction
100 bundle2-input-bundle: with-transaction
96 bundle2-input-part: "stream" (params: 4 mandatory) supported
101 bundle2-input-part: "stream" (params: 4 mandatory) supported
97 applying stream bundle
102 applying stream bundle
98 1027 files to transfer, 96.3 KB of data
103 1030 files to transfer, 96.4 KB of data
104 starting 4 threads for background file closing
99 starting 4 threads for background file closing
105 starting 4 threads for background file closing
100 transferred 96.3 KB in * seconds (* */sec) (glob)
106 transferred 96.4 KB in * seconds (* */sec) (glob)
101 bundle2-input-part: total payload size 110887
107 bundle2-input-part: total payload size 112077
102 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
108 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
103 bundle2-input-bundle: 1 parts total
109 bundle2-input-bundle: 1 parts total
104 checking for updated bookmarks
110 checking for updated bookmarks
@@ -136,8 +142,8 b' Streaming of secrets can be overridden b'
136 #if stream-bundle2
142 #if stream-bundle2
137 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
143 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
138 streaming all changes
144 streaming all changes
139 1027 files to transfer, 96.3 KB of data
145 1030 files to transfer, 96.4 KB of data
140 transferred 96.3 KB in * seconds (* */sec) (glob)
146 transferred 96.4 KB in * seconds (* */sec) (glob)
141 #endif
147 #endif
142
148
143 $ killdaemons.py
149 $ killdaemons.py
@@ -253,8 +259,8 b' clone it'
253 #if stream-bundle2
259 #if stream-bundle2
254 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
260 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
255 streaming all changes
261 streaming all changes
256 1027 files to transfer, 96.3 KB of data
262 1033 files to transfer, 96.6 KB of data
257 transferred 96.3 KB in * seconds (* */sec) (glob)
263 transferred 96.6 KB in * seconds (* */sec) (glob)
258 updating to branch default
264 updating to branch default
259 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
265 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
260 #endif
266 #endif
@@ -283,8 +289,8 b' Clone as publishing'
283 #if stream-bundle2
289 #if stream-bundle2
284 $ hg clone --stream http://localhost:$HGPORT phase-publish
290 $ hg clone --stream http://localhost:$HGPORT phase-publish
285 streaming all changes
291 streaming all changes
286 1027 files to transfer, 96.3 KB of data
292 1033 files to transfer, 96.6 KB of data
287 transferred 96.3 KB in * seconds (* */sec) (glob)
293 transferred 96.6 KB in * seconds (* */sec) (glob)
288 updating to branch default
294 updating to branch default
289 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
295 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
290 #endif
296 #endif
@@ -318,8 +324,8 b' Clone as non publishing'
318 #if stream-bundle2
324 #if stream-bundle2
319 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
325 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
320 streaming all changes
326 streaming all changes
321 1028 files to transfer, 96.4 KB of data
327 1034 files to transfer, 96.7 KB of data
322 transferred 96.4 KB in * seconds (* */sec) (glob)
328 transferred 96.7 KB in * seconds (* */sec) (glob)
323 updating to branch default
329 updating to branch default
324 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
330 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
325 $ hg -R phase-no-publish phase -r 'all()'
331 $ hg -R phase-no-publish phase -r 'all()'
General Comments 0
You need to be logged in to leave comments. Login now