Show More
@@ -620,7 +620,7 def _makemap(repo): | |||
|
620 | 620 | return vfsmap |
|
621 | 621 | |
|
622 | 622 | |
|
623 |
def _emit2(repo, entries |
|
|
623 | def _emit2(repo, entries): | |
|
624 | 624 | """actually emit the stream bundle""" |
|
625 | 625 | vfsmap = _makemap(repo) |
|
626 | 626 | # we keep repo.vfs out of the on purpose, ther are too many danger there |
@@ -633,27 +633,38 def _emit2(repo, entries, totalfilesize) | |||
|
633 | 633 | b'repo.vfs must not be added to vfsmap for security reasons' |
|
634 | 634 | ) |
|
635 | 635 | |
|
636 | # translate the vfs one | |
|
637 | entries = [(vfs_key, vfsmap[vfs_key], e) for (vfs_key, e) in entries] | |
|
638 | ||
|
639 | file_count = totalfilesize = 0 | |
|
640 | # record the expected size of every file | |
|
641 | for k, vfs, e in entries: | |
|
642 | for f in e.files(): | |
|
643 | file_count += 1 | |
|
644 | totalfilesize += f.file_size(vfs) | |
|
645 | ||
|
636 | 646 | progress = repo.ui.makeprogress( |
|
637 | 647 | _(b'bundle'), total=totalfilesize, unit=_(b'bytes') |
|
638 | 648 | ) |
|
639 | 649 | progress.update(0) |
|
640 | 650 | with TempCopyManager() as copy, progress: |
|
641 | # copy is delayed until we are in the try | |
|
642 | entries = [_filterfull(e, copy, vfsmap) for e in entries] | |
|
643 | yield None # this release the lock on the repository | |
|
651 | # create a copy of volatile files | |
|
652 | for k, vfs, e in entries: | |
|
653 | for f in e.files(): | |
|
654 | if f.is_volatile: | |
|
655 | copy(vfs.join(f.unencoded_path)) | |
|
656 | # the first yield release the lock on the repository | |
|
657 | yield file_count, totalfilesize | |
|
644 | 658 | totalbytecount = 0 |
|
645 | 659 | |
|
646 |
for src, |
|
|
647 |
|
|
|
648 | vfs = vfsmap[src] | |
|
660 | for src, vfs, e in entries: | |
|
661 | for f in e.files(): | |
|
649 | 662 | yield src |
|
663 | name = f.unencoded_path | |
|
650 | 664 | yield util.uvarintencode(len(name)) |
|
651 | if ftype == _fileappend: | |
|
652 |
|
|
|
653 |
|
|
|
654 | elif ftype == _filefull: | |
|
655 | fp = open(data, b'rb') | |
|
656 | size = util.fstat(fp).st_size | |
|
665 | actual_path = copy[vfs.join(name)] | |
|
666 | fp = open(actual_path, b'rb') | |
|
667 | size = f.file_size(vfs) | |
|
657 | 668 | bytecount = 0 |
|
658 | 669 | try: |
|
659 | 670 | yield util.uvarintencode(size) |
@@ -768,20 +779,20 def generatev2(repo, includes, excludes, | |||
|
768 | 779 | |
|
769 | 780 | repo.ui.debug(b'scanning\n') |
|
770 | 781 | |
|
771 |
entries |
|
|
782 | entries = _entries_walk( | |
|
772 | 783 | repo, |
|
773 | 784 | includes=includes, |
|
774 | 785 | excludes=excludes, |
|
775 | 786 | includeobsmarkers=includeobsmarkers, |
|
776 | 787 | ) |
|
777 | 788 | |
|
778 |
chunks = _emit2(repo, entries |
|
|
789 | chunks = _emit2(repo, entries) | |
|
779 | 790 | first = next(chunks) |
|
780 | assert first is None | |
|
791 | file_count, total_file_size = first | |
|
781 | 792 | _test_sync_point_walk_1(repo) |
|
782 | 793 | _test_sync_point_walk_2(repo) |
|
783 | 794 | |
|
784 |
return |
|
|
795 | return file_count, total_file_size, chunks | |
|
785 | 796 | |
|
786 | 797 | |
|
787 | 798 | def generatev3(repo, includes, excludes, includeobsmarkers): |
General Comments 0
You need to be logged in to leave comments.
Login now