Show More
@@ -145,7 +145,9 b' def onetimesetup(ui):' | |||
|
145 | 145 | ) |
|
146 | 146 | |
|
147 | 147 | # don't clone filelogs to shallow clients |
|
148 | def _walkstreamfiles(orig, repo, matcher=None, phase=False): | |
|
148 | def _walkstreamfiles( | |
|
149 | orig, repo, matcher=None, phase=False, obsolescence=False | |
|
150 | ): | |
|
149 | 151 | if state.shallowremote: |
|
150 | 152 | # if we are shallow ourselves, stream our local commits |
|
151 | 153 | if shallowutil.isenabled(repo): |
@@ -200,7 +202,9 b' def onetimesetup(ui):' | |||
|
200 | 202 | _(b"Cannot clone from a shallow repo to a full repo.") |
|
201 | 203 | ) |
|
202 | 204 | else: |
|
203 |
for x in orig( |
|
|
205 | for x in orig( | |
|
206 | repo, matcher, phase=phase, obsolescence=obsolescence | |
|
207 | ): | |
|
204 | 208 | yield x |
|
205 | 209 | |
|
206 | 210 | extensions.wrapfunction(streamclone, b'_walkstreamfiles', _walkstreamfiles) |
@@ -685,13 +685,22 b' class basicstore:' | |||
|
685 | 685 | details=file_details, |
|
686 | 686 | ) |
|
687 | 687 | |
|
688 | def top_entries(self, phase=False) -> Generator[BaseStoreEntry, None, None]: | |
|
688 | def top_entries( | |
|
689 | self, phase=False, obsolescence=False | |
|
690 | ) -> Generator[BaseStoreEntry, None, None]: | |
|
689 | 691 | if phase and self.vfs.exists(b'phaseroots'): |
|
690 | 692 | yield SimpleStoreEntry( |
|
691 | 693 | entry_path=b'phaseroots', |
|
692 | 694 | is_volatile=True, |
|
693 | 695 | ) |
|
694 | 696 | |
|
697 | if obsolescence and self.vfs.exists(b'obsstore'): | |
|
698 | # XXX if we had the file size it could be non-volatile | |
|
699 | yield SimpleStoreEntry( | |
|
700 | entry_path=b'obsstore', | |
|
701 | is_volatile=True, | |
|
702 | ) | |
|
703 | ||
|
695 | 704 | files = reversed(self._walk(b'', False)) |
|
696 | 705 | |
|
697 | 706 | changelogs = collections.defaultdict(dict) |
@@ -733,7 +742,7 b' class basicstore:' | |||
|
733 | 742 | ) |
|
734 | 743 | |
|
735 | 744 | def walk( |
|
736 | self, matcher=None, phase=False | |
|
745 | self, matcher=None, phase=False, obsolescence=False | |
|
737 | 746 | ) -> Generator[BaseStoreEntry, None, None]: |
|
738 | 747 | """return files related to data storage (ie: revlogs) |
|
739 | 748 | |
@@ -745,7 +754,7 b' class basicstore:' | |||
|
745 | 754 | # yield data files first |
|
746 | 755 | for x in self.data_entries(matcher): |
|
747 | 756 | yield x |
|
748 | for x in self.top_entries(phase=phase): | |
|
757 | for x in self.top_entries(phase=phase, obsolescence=obsolescence): | |
|
749 | 758 | yield x |
|
750 | 759 | |
|
751 | 760 | def copylist(self): |
@@ -241,8 +241,8 b' def allowservergeneration(repo):' | |||
|
241 | 241 | |
|
242 | 242 | |
|
243 | 243 | # This is it's own function so extensions can override it. |
|
244 | def _walkstreamfiles(repo, matcher=None, phase=False): | |
|
245 | return repo.store.walk(matcher, phase=phase) | |
|
244 | def _walkstreamfiles(repo, matcher=None, phase=False, obsolescence=False): | |
|
245 | return repo.store.walk(matcher, phase=phase, obsolescence=obsolescence) | |
|
246 | 246 | |
|
247 | 247 | |
|
248 | 248 | def generatev1(repo): |
@@ -672,7 +672,7 b' def _v2_walk(repo, includes, excludes, i' | |||
|
672 | 672 | - `size`: the size of the file (or None) |
|
673 | 673 | """ |
|
674 | 674 | assert repo._currentlock(repo._lockref) is not None |
|
675 |
|
|
|
675 | files = [] | |
|
676 | 676 | totalfilesize = 0 |
|
677 | 677 | |
|
678 | 678 | matcher = None |
@@ -680,23 +680,23 b' def _v2_walk(repo, includes, excludes, i' | |||
|
680 | 680 | matcher = narrowspec.match(repo.root, includes, excludes) |
|
681 | 681 | |
|
682 | 682 | phase = not repo.publishing() |
|
683 | for entry in _walkstreamfiles(repo, matcher, phase=phase): | |
|
683 | entries = _walkstreamfiles( | |
|
684 | repo, matcher, phase=phase, obsolescence=includeobsmarkers | |
|
685 | ) | |
|
686 | for entry in entries: | |
|
684 | 687 | for f in entry.files(): |
|
685 | 688 | file_size = f.file_size(repo.store.vfs) |
|
686 | 689 | if file_size: |
|
687 | 690 | ft = _fileappend |
|
688 | 691 | if f.is_volatile: |
|
689 | 692 | ft = _filefull |
|
690 |
|
|
|
693 | files.append((_srcstore, f.unencoded_path, ft, file_size)) | |
|
691 | 694 | totalfilesize += file_size |
|
692 | if includeobsmarkers and repo.svfs.exists(b'obsstore'): | |
|
693 | totalfilesize += repo.svfs.lstat(b'obsstore').st_size | |
|
694 | entries.append((_srcstore, b'obsstore', _filefull, None)) | |
|
695 | 695 | for name in cacheutil.cachetocopy(repo): |
|
696 | 696 | if repo.cachevfs.exists(name): |
|
697 | 697 | totalfilesize += repo.cachevfs.lstat(name).st_size |
|
698 |
|
|
|
699 |
return |
|
|
698 | files.append((_srccache, name, _filefull, None)) | |
|
699 | return files, totalfilesize | |
|
700 | 700 | |
|
701 | 701 | |
|
702 | 702 | def generatev2(repo, includes, excludes, includeobsmarkers): |
General Comments 0
You need to be logged in to leave comments.
Login now