##// END OF EJS Templates
stream-clone: make it the responsability of the store entry to stream content...
marmoute -
r51532:5e60abf8 default
parent child Browse files
Show More
@@ -469,12 +469,36 b' class StoreFile:'
469
469
470 def file_size(self, vfs):
470 def file_size(self, vfs):
471 if self._file_size is None:
471 if self._file_size is None:
472 if vfs is None:
473 msg = b"calling vfs-less file_size without prior call: %s"
474 msg %= self.unencoded_path
475 raise error.ProgrammingError(msg)
472 try:
476 try:
473 self._file_size = vfs.stat(self.unencoded_path).st_size
477 self._file_size = vfs.stat(self.unencoded_path).st_size
474 except FileNotFoundError:
478 except FileNotFoundError:
475 self._file_size = 0
479 self._file_size = 0
476 return self._file_size
480 return self._file_size
477
481
482 def get_stream(self, vfs, copies):
483 """return data "stream" information for this file
484
485 (unencoded_file_path, content_iterator, content_size)
486 """
487 size = self.file_size(None)
488
489 def get_stream():
490 actual_path = copies[vfs.join(self.unencoded_path)]
491 with open(actual_path, 'rb') as fp:
492 yield None # ready to stream
493 if size <= 65536:
494 yield fp.read(size)
495 else:
496 yield from util.filechunkiter(fp, limit=size)
497
498 s = get_stream()
499 next(s)
500 return (self.unencoded_path, s, size)
501
478
502
479 @attr.s(slots=True, init=False)
503 @attr.s(slots=True, init=False)
480 class BaseStoreEntry:
504 class BaseStoreEntry:
@@ -485,6 +509,14 b' class BaseStoreEntry:'
485 def files(self) -> List[StoreFile]:
509 def files(self) -> List[StoreFile]:
486 raise NotImplementedError
510 raise NotImplementedError
487
511
512 def get_streams(self, vfs, copies=None):
513 """return a list of data stream associated to files for this entry
514
515 return [(unencoded_file_path, content_iterator, content_size), …]
516 """
517 assert vfs is not None
518 return [f.get_stream(vfs, copies) for f in self.files()]
519
488
520
489 @attr.s(slots=True, init=False)
521 @attr.s(slots=True, init=False)
490 class SimpleStoreEntry(BaseStoreEntry):
522 class SimpleStoreEntry(BaseStoreEntry):
@@ -11,7 +11,6 b' import os'
11 import struct
11 import struct
12
12
13 from .i18n import _
13 from .i18n import _
14 from .pycompat import open
15 from .interfaces import repository
14 from .interfaces import repository
16 from . import (
15 from . import (
17 bookmarks,
16 bookmarks,
@@ -658,36 +657,25 b' def _emit2(repo, entries):'
658 totalbytecount = 0
657 totalbytecount = 0
659
658
660 for src, vfs, e in entries:
659 for src, vfs, e in entries:
661 for f in e.files():
660 for name, stream, size in e.get_streams(vfs, copies=copy):
662 yield src
661 yield src
663 name = f.unencoded_path
664 yield util.uvarintencode(len(name))
662 yield util.uvarintencode(len(name))
665 actual_path = copy[vfs.join(name)]
663 yield util.uvarintencode(size)
666 fp = open(actual_path, b'rb')
664 yield name
667 size = f.file_size(vfs)
668 bytecount = 0
665 bytecount = 0
669 try:
666 for chunk in stream:
670 yield util.uvarintencode(size)
667 bytecount += len(chunk)
671 yield name
668 totalbytecount += len(chunk)
672 if size <= 65536:
669 progress.update(totalbytecount)
673 chunks = (fp.read(size),)
670 yield chunk
674 else:
671 if bytecount != size:
675 chunks = util.filechunkiter(fp, limit=size)
672 # Would most likely be caused by a race due to `hg
676 for chunk in chunks:
673 # strip` or a revlog split
677 bytecount += len(chunk)
674 msg = _(
678 totalbytecount += len(chunk)
675 b'clone could only read %d bytes from %s, but '
679 progress.update(totalbytecount)
676 b'expected %d bytes'
680 yield chunk
677 )
681 if bytecount != size:
678 raise error.Abort(msg % (bytecount, name, size))
682 # Would most likely be caused by a race due to `hg
683 # strip` or a revlog split
684 msg = _(
685 b'clone could only read %d bytes from %s, but '
686 b'expected %d bytes'
687 )
688 raise error.Abort(msg % (bytecount, name, size))
689 finally:
690 fp.close()
691
679
692
680
693 def _test_sync_point_walk_1(repo):
681 def _test_sync_point_walk_1(repo):
General Comments 0
You need to be logged in to leave comments. Login now